def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file header = [] header.append(u"extraction date") header.append(u"study unit") header.append(u"collection") header.append(u"ISSN SciELO") header.append(u"ISSN\'s") header.append(u"title at SciELO") header.append(u"title thematic areas") for area in choices.THEMATIC_AREAS: header.append(u"title is %s" % area.lower()) header.append(u"title is multidisciplinary") header.append(u"title current status") header.append(u"status change date") header.append(u"status change year") header.append(u"status change month") header.append(u"status change day") header.append(u"status changed to") header.append(u"status change reason") self.write(u','.join([u'"%s"' % i.replace(u'"', u'""') for i in header]))
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file header = [] header.append(u"extraction date") header.append(u"study unit") header.append(u"collection") header.append(u"ISSN SciELO") header.append(u"ISSN\'s") header.append(u"title at SciELO") header.append(u"title thematic areas") for area in choices.THEMATIC_AREAS: header.append(u"title is %s" % area.lower()) header.append(u"title is multidisciplinary") header.append(u"title current status") header.append(u"document publishing ID (PID SciELO)") header.append(u"document publishing year") header.append(u'docuemnt is citable') header.append(u"document type") header.append(u"document languages") header.append(u"document pt") header.append(u"document es") header.append(u"document en") header.append(u"document other languages") self.write(u','.join([u'"%s"' % i.replace(u'"', u'""') for i in header]))
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open( output_file, 'w', encoding='utf-8') if output_file else output_file header = [] header.append(u"extraction date") header.append(u"study unit") header.append(u"collection") header.append(u"ISSN SciELO") header.append(u"ISSN\'s") header.append(u"title at SciELO") header.append(u"title thematic areas") for area in choices.THEMATIC_AREAS: header.append(u"title is %s" % area.lower()) header.append(u"title is multidisciplinary") header.append(u"title current status") header.append(u"document publishing ID (PID SciELO)") header.append(u"document publishing year") header.append(u"document type") header.append(u"document is citable") header.append(u"document author") header.append(u"document author institution") header.append(u"document author affiliation country") header.append(u"document author affiliation state") header.append(u"document author affiliation city") self.write(u','.join( [u'"%s"' % i.replace(u'"', u'""') for i in header]))
def __init__(self, collection, issns=None, output_file=None, not_normalized=True): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = output_file self.not_normalized = not_normalized
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file self.write(','.join([u"PID",u"ISSN",u"título",u"área temática",u"ano de publicação",u"tipo de documento",u"paises de afiliação",u"exclusivo nacional",u"exclusivo estrangeiro",u"nacional + estrangeiro"]))
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file header = [u"PID",u"ISSN",u"título",u"área temática",u"ano de publicação",u"tipo de documento",u"título do artigo",u"doi",u"url",u"altmetrics url",u"score"] self.write(','.join(header))
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file header = [u"PID",u"issn",u"título",u"área temática",u"ano de publicação",u"tipo de documento",u"total autores",u"0 autores",u"1 autor",u"2 autores",u"3 autores",u"4 autores",u"5 autores",u"+6 autores",u"total páginas",u"total referências"] self.write(','.join(header))
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file header = [u"issn scielo",u"issn impresso",u"issn eletrônico",u"nome do publicador",u"título",u"título abreviado",u"título nlm",u"área temática",u"bases WOS",u"áreas temáticas WOS",u"situação atual",u"ano de inclusão",u"licença de uso padrão", u"histórico data", u"histórico ano", u"histórico status"] self.write(','.join(header))
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file header = [u"PID",u"ISSN",u"título",u"área temática",u"ano de publicação",u"tipo de documento",u"recebido",u"revisado",u"aceito",u"publicado",u"entrada no SciELO",u"atualização no SciELO"] self.write(','.join(header))
def __init__(self, collection, issns=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.counts = counts.Dumper(collection, output_file='counts.csv') self.affiliations = affiliations.Dumper(collection, output_file='affiliations.csv') self.languages = languages.Dumper(collection, output_file='languages.csv') self.licenses = licenses.Dumper(collection, output_file='licenses.csv') self.authors = authors.Dumper(authors, output_file='authors.csv') self.dates = dates.Dumper(authors, output_file='dates.csv')
def __init__(self, collection, issns=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.documents_counts = documents_counts.Dumper(collection, output_file='documents_counts.csv') self.documents_affiliations = documents_affiliations.Dumper(collection, output_file='documents_affiliations.csv') self.documents_languages = documents_languages.Dumper(collection, output_file='documents_languages.csv') self.documents_licenses = documents_licenses.Dumper(collection, output_file='documents_licenses.csv') self.documents_authors = documents_authors.Dumper(collection, output_file='documents_authors.csv') self.documents_dates = documents_dates.Dumper(collection, output_file='documents_dates.csv')
def __init__(self, collection, issns=None, from_date=FROM, until_date=UNTIL, dayly_granularity=DAYLY_GRANULARITY, fmt=OUTPUT_FORMAT, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.from_date = from_date self.until_date = until_date self.dayly_granularity = dayly_granularity self.output_file=output_file self.issns = issns self.collection = collection self.fmt = self.fmt_csv if fmt == 'json': self.fmt = self.fmt_json
def __init__(self, collection, issns=None, from_date=FROM, until_date=UNTIL, dayly_granularity=DAYLY_GRANULARITY, fmt=OUTPUT_FORMAT, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.from_date = from_date self.until_date = until_date self.dayly_granularity = dayly_granularity self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file self.issns = issns self.collection = collection if fmt == 'json': self.fmt = self.fmt_json else: self.fmt = self.fmt_csv header = [] header.append(u"extraction date") header.append(u"study unit") header.append(u"collection") header.append(u"ISSN SciELO") header.append(u"ISSN\'s") header.append(u"title at SciELO") header.append(u"title thematic areas") for area in choices.THEMATIC_AREAS: header.append(u"title is %s" % area.lower()) header.append(u"title is multidisciplinary") header.append(u"title current status") header.append(u"document publishing ID (PID SciELO)") header.append(u"document publishing year") header.append(u"document type") header.append(u'document is citable') header.append(u"issue") header.append(u"issue title") header.append(u"document title") header.append(u"processing date") header.append(u"publication date at SciELO") header.append(u"publication date") header.append(u"access date") header.append(u"access year") header.append(u"access month") header.append(u"access to abstract") header.append(u"access to html") header.append(u"access to pdf") header.append(u"access to epdf") header.append(u"access total") self.write(u','.join([u'"%s"' % i.replace(u'"', u'""') for i in header]))
def __init__(self, collection, issns=None, output_file=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self._publicationstats = utils.publicationstats_server() self.collection = collection self.issns = issns self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file header = [ u"Título do Periódico (publication_title)", u"ISSN impresso (print_identifier)", u"ISSN online (online_identifier)", u"Data do primeiro fascículo (date_first_issue_online)", u"volume do primeiro fascículo (num_first_vol_online)", u"número do primeiro fascículo (num_first_issue_online)", u"Data do último fascículo publicado (date_last_issue_online)", u"volume do último fascículo publicado (num_last_vol_online)", u"número do último fascículo publicado (num_last_issue_online)", u"url de fascículos (title_url)", u"primeiro autor (first_author)", u"ID do periódico no SciELO (title_id)", u"informação de embargo (embargo_info)", u"cobertura (coverage_depth)", u"informação sobre cobertura (coverage_notes)", u"nome do publicador (publisher_name)", u"tipo de publicação (publication_type)", u"data de publicação monográfica impressa (date_monograph_published_print)", u"data de publicação monográfica online (date_monograph_published_online)", u"volume de monografia (monograph_volume)", u"edição de monografia (monograph_edition)", u"primeiro editor (first_editor)", u"ID de publicação pai (parent_publication_title_id)", u"ID de publicação prévia (preceding_publication_title_id)", u"tipo de acesso (access_type)" ] self.write(u','.join([u'"%s"' % i.replace(u'"', u'""') for i in header]))
def __init__(self, collection, home_nationality=None, issns=None): self._ratchet = utils.ratchet_server() self._articlemeta = utils.articlemeta_server() self.collection = collection self.issns = issns self.home_nationality = home_nationality self.documents_counts = documents_counts.Dumper( collection, output_file='documents_counts.csv') self.documents_affiliations = documents_affiliations.Dumper( collection, output_file='documents_affiliations.csv') self.documents_languages = documents_languages.Dumper( collection, output_file='documents_languages.csv') self.documents_licenses = documents_licenses.Dumper( collection, output_file='documents_licenses.csv') self.documents_authors = documents_authors.Dumper( collection, output_file='documents_authors.csv') self.documents_dates = documents_dates.Dumper( collection, output_file='documents_dates.csv') if self.home_nationality: self.documents_affiliations_nationality = documents_affiliations_nationality.Dumper( home_nationality, collection, output_file='documents_affiliation_nationality.csv')