def isbn_handling(): index = 0 tempfile_name = "TempRangeMessage.xml" while os.path.isfile(tempfile_name): index += 1 tempfile_name = "TempRangeMessage_" + str(index) + ".xml" print('\nUsing temporary RangeMessage file path "' + tempfile_name + '"...') yield oat.ISBNHandling(tempfile_name) print('\nRemoving temporary RangeMessage file "' + tempfile_name + '"...') os.remove(tempfile_name)
def main(): parser = argparse.ArgumentParser() parser.add_argument("csv_file", help=ARG_HELP_STRINGS["csv_file"]) parser.add_argument("-O", "--offsetting_mode", help=ARG_HELP_STRINGS["offsetting"]) parser.add_argument("-b", "--bypass-cert-verification", action="store_true", help=ARG_HELP_STRINGS["bypass"]) parser.add_argument("-e", "--encoding", help=ARG_HELP_STRINGS["encoding"]) parser.add_argument("-f", "--force", action="store_true", help=ARG_HELP_STRINGS["force"]) parser.add_argument("-i", "--ignore-header", action="store_true", help=ARG_HELP_STRINGS["ignore_header"]) parser.add_argument("-j", "--force-header", action="store_true", help=ARG_HELP_STRINGS["force_header"]) parser.add_argument("-l", "--locale", help=ARG_HELP_STRINGS["locale"]) parser.add_argument("-a", "--add-unknown-columns", action="store_true", help=ARG_HELP_STRINGS["unknown_columns"]) parser.add_argument("-d", "--dialect", choices=["excel", "excel-tab", "unix"], help=ARG_HELP_STRINGS["dialect"]) parser.add_argument("-v", "--verbose", action="store_true", help=ARG_HELP_STRINGS["verbose"]) parser.add_argument("-o", "--overwrite", action="store_true", help=ARG_HELP_STRINGS["overwrite"]) parser.add_argument("-u", "--update", action="store_true", help=ARG_HELP_STRINGS["update"]) parser.add_argument("-r", "--round_monetary", action="store_true", help=ARG_HELP_STRINGS["round_monetary"]) parser.add_argument("--no-crossref", action="store_true", help=ARG_HELP_STRINGS["no_crossref"]) parser.add_argument("--no-pubmed", action="store_true", help=ARG_HELP_STRINGS["no_pubmed"]) parser.add_argument("--no-doaj", action="store_true", help=ARG_HELP_STRINGS["no_doaj"]) parser.add_argument("-institution", "--institution_column", type=int, help=ARG_HELP_STRINGS["institution"]) parser.add_argument("-period", "--period_column", type=int, help=ARG_HELP_STRINGS["period"]) parser.add_argument("-doi", "--doi_column", type=int, help=ARG_HELP_STRINGS["doi"]) parser.add_argument("-euro", "--euro_column", type=int, help=ARG_HELP_STRINGS["euro"]) parser.add_argument("-is_hybrid", "--is_hybrid_column", type=int, help=ARG_HELP_STRINGS["is_hybrid"]) parser.add_argument("-publisher", "--publisher_column", type=int, help=ARG_HELP_STRINGS["publisher"]) parser.add_argument("-journal_full_title", "--journal_full_title_column", type=int, help=ARG_HELP_STRINGS["journal_full_title"]) parser.add_argument("-book_title", "--book_title_column", type=int, help=ARG_HELP_STRINGS["book_title"]) parser.add_argument("-issn", "--issn_column", type=int, help=ARG_HELP_STRINGS["issn"]) parser.add_argument("-isbn", "--isbn_column", type=int, help=ARG_HELP_STRINGS["isbn"]) parser.add_argument("-backlist_oa", "--backlist_oa_column", type=int, help=ARG_HELP_STRINGS["backlist_oa"]) parser.add_argument("-additional_isbns", "--additional_isbn_columns", type=int, nargs='+', help=ARG_HELP_STRINGS["additional_isbns"]) parser.add_argument("-url", "--url_column", type=int, help=ARG_HELP_STRINGS["url"]) parser.add_argument("-start", type=int, help=ARG_HELP_STRINGS["start"]) parser.add_argument("-end", type=int, help=ARG_HELP_STRINGS["end"]) args = parser.parse_args() handler = logging.StreamHandler(sys.stderr) handler.setFormatter(oat.ANSIColorFormatter()) bufferedHandler = oat.BufferedErrorHandler(handler) bufferedHandler.setFormatter(oat.ANSIColorFormatter()) logging.root.addHandler(handler) logging.root.addHandler(bufferedHandler) logging.root.setLevel(logging.INFO) if args.locale: norm = locale.normalize(args.locale) if norm != args.locale: msg = "locale '{}' not found, normalised to '{}'".format( args.locale, norm) oat.print_y(msg) try: loc = locale.setlocale(locale.LC_ALL, norm) oat.print_g("Using locale " + loc) except locale.Error as loce: msg = "Setting locale to {} failed: {}".format(norm, loce.message) oat.print_r(msg) sys.exit() enc = None # CSV file encoding if args.encoding: try: codec = codecs.lookup(args.encoding) msg = ("Encoding '{}' found in Python's codec collection " + "as '{}'").format(args.encoding, codec.name) oat.print_g(msg) enc = args.encoding except LookupError: msg = ("Error: '" + args.encoding + "' not found Python's " + "codec collection. Either look for a valid name here " + "(https://docs.python.org/2/library/codecs.html#standard-" + "encodings) or omit this argument to enable automated " + "guessing.") oat.print_r(msg) sys.exit() result = oat.analyze_csv_file(args.csv_file, enc=enc) if result["success"]: csv_analysis = result["data"] print(csv_analysis) else: print(result["error_msg"]) sys.exit() if args.dialect: dialect = args.dialect oat.print_g('Dialect sniffing results ignored, using built-in CSV dialect "' + dialect + '"') else: dialect = csv_analysis.dialect if enc is None: enc = csv_analysis.enc has_header = csv_analysis.has_header or args.force_header if enc is None: print("Error: No encoding given for CSV file and automated " + "detection failed. Please set the encoding manually via the " + "--enc argument") sys.exit() csv_file = open(args.csv_file, "r", encoding=enc) reader = csv.reader(csv_file, dialect=dialect) first_row = next(reader) num_columns = len(first_row) print("\nCSV file has {} columns.".format(num_columns)) csv_file.seek(0) reader = csv.reader(csv_file, dialect=dialect) if args.update and args.overwrite: oat.print_r("Error: Either use the -u or the -o option, not both.") sys.exit() if args.overwrite: for column in OVERWRITE_STRATEGY.keys(): OVERWRITE_STRATEGY[column] = CSVColumn.OW_ALWAYS elif not args.update: for column in OVERWRITE_STRATEGY.keys(): OVERWRITE_STRATEGY[column] = CSVColumn.OW_ASK additional_isbn_columns = [] if args.additional_isbn_columns: for index in args.additional_isbn_columns: if index > num_columns: msg = "Error: Additional ISBN column index {} exceeds number of columns ({})." oat.print_r(msg.format(index, num_columns)) sys.exit() else: additional_isbn_columns.append(index) column_map = { "institution": CSVColumn("institution", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.institution_column, overwrite=OVERWRITE_STRATEGY["institution"]), "period": CSVColumn("period",{"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.period_column, overwrite=OVERWRITE_STRATEGY["period"]), "euro": CSVColumn("euro", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.euro_column, overwrite=OVERWRITE_STRATEGY["euro"]), "doi": CSVColumn("doi", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.doi_column, overwrite=OVERWRITE_STRATEGY["doi"]), "is_hybrid": CSVColumn("is_hybrid", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.NONE}, args.is_hybrid_column, overwrite=OVERWRITE_STRATEGY["is_hybrid"]), "publisher": CSVColumn("publisher", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.publisher_column, overwrite=OVERWRITE_STRATEGY["publisher"]), "journal_full_title": CSVColumn("journal_full_title", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.journal_full_title_column, overwrite=OVERWRITE_STRATEGY["journal_full_title"]), "issn": CSVColumn("issn", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.issn_column, overwrite=OVERWRITE_STRATEGY["issn"]), "issn_print": CSVColumn("issn_print", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["issn_print"]), "issn_electronic": CSVColumn("issn_electronic", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["issn_electronic"]), "issn_l": CSVColumn("issn_l", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["issn_l"]), "license_ref": CSVColumn("license_ref", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE} , None, overwrite=OVERWRITE_STRATEGY["license_ref"]), "indexed_in_crossref": CSVColumn("indexed_in_crossref", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["indexed_in_crossref"]), "pmid": CSVColumn("pmid", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["pmid"]), "pmcid": CSVColumn("pmcid", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["pmcid"]), "ut": CSVColumn("ut", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["ut"]), "url": CSVColumn("url", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.url_column, overwrite=OVERWRITE_STRATEGY["url"]), "doaj": CSVColumn("doaj", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["doaj"]), "agreement": CSVColumn("agreement", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["agreement"]), "book_title": CSVColumn("book_title", {"articles": CSVColumn.NONE, "books": CSVColumn.RECOMMENDED}, args.book_title_column, overwrite=OVERWRITE_STRATEGY["book_title"]), "backlist_oa": CSVColumn("backlist_oa", {"articles": CSVColumn.NONE, "books": CSVColumn.MANDATORY}, args.backlist_oa_column, overwrite=OVERWRITE_STRATEGY["backlist_oa"]), "isbn": CSVColumn("isbn", {"articles": CSVColumn.NONE, "books": CSVColumn.BACKUP}, args.isbn_column, overwrite=OVERWRITE_STRATEGY["isbn"]), "isbn_print": CSVColumn("isbn_print", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["isbn_print"]), "isbn_electronic": CSVColumn("isbn_electronic", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["isbn_electronic"]) } header = None if has_header: for row in reader: if not row: # Skip empty lines continue header = row # First non-empty row should be the header if args.ignore_header: print("Skipping header analysis due to command line argument.") break else: print("\n *** Analyzing CSV header ***\n") for (index, item) in enumerate(header): if index in additional_isbn_columns: msg = "Column named '{}' at index {} is designated as additional ISBN column" print(msg.format(item, index)) continue column_type = oat.get_column_type_from_whitelist(item) if column_type is not None and column_map[column_type].index is None: column_map[column_type].index = index column_map[column_type].column_name = item found_msg = ("Found column named '{}' at index {}, " + "assuming this to be the '{}' column.") print(found_msg.format(item, index, column_type)) break print("\n *** Starting heuristical analysis ***\n") for row in reader: if not row: # Skip empty lines # We analyze the first non-empty line, a possible header should # have been processed by now. continue column_candidates = { "doi": [], "period": [], "euro": [] } found_msg = "The entry in column {} looks like a potential {}: {}" for (index, entry) in enumerate(row): if index in [csvcolumn.index for csvcolumn in column_map.values()] + additional_isbn_columns: # Skip columns already assigned continue entry = entry.strip() # Search for a DOI if column_map['doi'].index is None: if oat.DOI_RE.match(entry): column_id = str(index) # identify column either numerically or by column header if header: column_id += " ('" + header[index] + "')" print(found_msg.format(column_id, "DOI", entry)) column_candidates['doi'].append(index) continue # Search for a potential year string if column_map['period'].index is None: try: maybe_period = int(entry) now = datetime.date.today().year # Should be a wide enough margin if maybe_period >= 2000 and maybe_period <= now + 2: column_id = str(index) if header: column_id += " ('" + header[index] + "')" print(found_msg.format(column_id, "year", entry)) column_candidates['period'].append(index) continue except ValueError: pass # Search for a potential monetary amount if column_map['euro'].index is None: try: maybe_euro = locale.atof(entry) if maybe_euro >= 10 and maybe_euro <= 10000: column_id = str(index) if header: column_id += " ('" + header[index] + "')" print (found_msg.format(column_id, "euro amount", entry)) column_candidates['euro'].append(index) continue except ValueError: pass for column_type, candidates in column_candidates.items(): if column_map[column_type].index is not None: continue if len(candidates) > 1: print("Could not reliably identify the '" + column_type + "' column - more than one possible candiate!") elif len(candidates) < 1: print("No candidate found for column '" + column_type + "'!") else: index = candidates.pop() column_map[column_type].index = index if header: column_id = header[index] column_map[column_type].column_name = column_id else: column_id = index msg = "Assuming column '{}' to be the '{}' column." print(msg.format(column_id, column_type)) column_map[column_type].index = index break print("\n *** CSV file analysis summary ***\n") index_dict = {csvc.index: csvc for csvc in column_map.values()} for index in range(num_columns): column_name = "" if header: column_name = header[index] if index in index_dict: column = index_dict[index] msg = u"column number {} ({}) is the '{}' column ({})".format( index, column_name, column.column_type, column.get_req_description()) print(msg) elif index in additional_isbn_columns: msg = u"column number {} ({}) is an additional ISBN column".format(index, column_name) oat.print_c(msg) else: if args.add_unknown_columns: msg = (u"column number {} ({}) is an unknown column, it will be " + "appended to the generated CSV file") print(msg.format(index, column_name)) if not column_name: # Use a generic name column_name = "unknown" while column_name in column_map.keys(): # TODO: Replace by a numerical, increasing suffix column_name += "_" column_map[column_name] = CSVColumn(column_name, CSVColumn.NONE, index) else: msg = (u"column number {} ({}) is an unknown column, it will be " + "ignored") print(msg.format(index, column_name)) print() for column in column_map.values(): if column.index is None: msg = "The '{}' column could not be identified ({})" print(msg.format(column.column_type, column.get_req_description())) print() article_mand_missing = [x.column_type for x in column_map.values() if x.requirement["articles"] == CSVColumn.MANDATORY and x.index is None] article_back_missing = [x.column_type for x in column_map.values() if x.requirement["articles"] == CSVColumn.BACKUP and x.index is None] book_mand_missing = [x.column_type for x in column_map.values() if x.requirement["books"] == CSVColumn.MANDATORY and x.index is None] book_back_missing = [x.column_type for x in column_map.values() if x.requirement["books"] == CSVColumn.BACKUP and x.index is None] if article_mand_missing: msg = "Article enrichment is not possible - mandatory columns are missing ({})" oat.print_y(msg.format(", ".join(article_mand_missing))) elif article_back_missing: msg = "Article enrichment is possible, but backup columns are missing ({}) - each record will need a valid DOI" oat.print_b(msg.format(", ".join(article_back_missing))) else: oat.print_g("Article enrichment is possible with all backup columns in place") if book_mand_missing: msg = "Book enrichment is not possible - mandatory columns are missing ({})" oat.print_y(msg.format(", ".join(book_mand_missing))) elif book_back_missing: msg = "Book enrichment is possible, but backup columns are missing ({}) - each record will need a valid DOI" oat.print_b(msg.format(", ".join(book_back_missing))) else: oat.print_g("Book enrichment is possible with all backup columns in place") print() if article_mand_missing and book_mand_missing: if not args.force: oat.print_r("ERROR: Could not detect the minimum mandatory data set for any " + "publication type. There are 2 ways to fix this:") if not header: print("1) Add a header row to your file and identify the " + "column(s) by assigning them an appropiate column name.") else: print("1) Identify the missing column(s) by assigning them " + "a different column name in the CSV header (You can " + "use the column name(s) mentioned in the message above)") print("2) Use command line parameters when calling this script " + "to identify the missing columns (use -h for help) ") sys.exit() else: oat.print_y("WARNING: Could not detect the minimum mandatory data set for any " + "publication type - forced to continue.") start = input("\nStart metadata aggregation? (y/n):") while start not in ["y", "n"]: start = input("Please type 'y' or 'n':") if start == "n": sys.exit() print("\n *** Starting metadata aggregation ***\n") enriched_content = {} for record_type, fields in oat.COLUMN_SCHEMAS.items(): # add headers enriched_content[record_type] = { "count": 0, "content": [list(fields)] } if not os.path.isdir("tempfiles"): os.mkdir("tempfiles") isbn_handling = oat.ISBNHandling("tempfiles/ISBNRangeFile.xml") doab_analysis = oat.DOABAnalysis(isbn_handling, "tempfiles/DOAB.csv", verbose=False) doaj_analysis = oat.DOAJAnalysis("tempfiles/DOAJ.csv") csv_file.seek(0) reader = csv.reader(csv_file, dialect=dialect) header_processed = False row_num = 0 for row in reader: row_num += 1 if not row: continue # skip empty lines if not header_processed: header_processed = True if has_header: # If the CSV file has a header, we are currently there - skip it # to get to the first data row continue if args.start and args.start > row_num: continue if args.end and args.end < row_num: continue print("---Processing line number " + str(row_num) + "---") result_type, enriched_row = oat.process_row(row, row_num, column_map, num_columns, additional_isbn_columns, doab_analysis, doaj_analysis, args.no_crossref, args.no_pubmed, args.no_doaj, args.round_monetary, args.offsetting_mode) for record_type, value in enriched_content.items(): if record_type == result_type: value["content"].append(enriched_row) value["count"] += 1 else: empty_line = ["" for x in value["content"][0]] value["content"].append(empty_line) csv_file.close() for record_type, value in enriched_content.items(): if value["count"] > 0: with open('out_' + record_type + '.csv', 'w') as out: writer = oat.OpenAPCUnicodeWriter(out, oat.OPENAPC_STANDARD_QUOTEMASK, True, True, True) writer.write_rows(value["content"]) if not bufferedHandler.buffer: oat.print_g("Metadata enrichment successful, no errors occured") else: oat.print_r("There were errors during the enrichment process:\n") # closing will implicitly flush the handler and print any buffered # messages to stderr bufferedHandler.close()
"file_path": "data/bpc.csv", "unused_fields": ["institution", "period", "license_ref"], "target_file": BPC_DATA, "row_length": 13, "has_issn": False, "has_isbn": True } } ISBNHANDLING = None if __name__ == '__main__': path.append(dirname(path[0])) import openapc_toolkit as oat import whitelists as wl ISBNHANDLING = oat.ISBNHandling("ISBNRangeFile.xml") for data_file, metadata in DATA_FILES.items(): metadata["file_path"] = join("..", "..", metadata["file_path"]) def fail(msg): oat.print_r(msg) else: path.append(join(path[0], "python")) import openapc_toolkit as oat from . import whitelists as wl ISBNHANDLING = oat.ISBNHandling("python/test/ISBNRangeFile.xml") class RowObject(object): """ A minimal container class to store contextual information along with csv rows.