def transform(name=None, input_file=None): """ function is responsible for transforming raw datasets into Collections """ if not name: # user has not provided a scraper name to get collections with logger.error('Scraper/Office name not provided. Cannot generate collections') sys.exit(1) try: # load the Graph representing the deduplicated scraped datasets GraphWrapper.load_graph(file_dir_path=Path(OUTPUT_DIR, 'graphs', name), file_stem_name=f'{name}.deduplicate') except: # load the Graph representing the scraped datasets GraphWrapper.load_graph(file_dir_path=Path(OUTPUT_DIR, 'graphs', name), file_stem_name=name) # get the loaded graph graph = GraphWrapper.get_graph() # identify collections within the graph identify_collections_within_graph(graph) # link dataset vertices to their appropriate collection(s) within the graph link_datasets_to_collections_in_graph(graph) # write the identified collections to the raw dataset files add_collections_to_raw_datasets(graph=graph, output_dir=OUTPUT_DIR) # write the graph to files # this method is explicitly thread/proccess safe, so no need for lock GraphWrapper.write_graph(file_dir_path=Path(os.getenv('ED_OUTPUT_PATH'), "graphs", f"{name}"), file_stem_name=f'{name}.collections') # create the page legend file for this graph GraphWrapper.create_graph_page_legend(file_dir_path=Path(os.getenv('ED_OUTPUT_PATH'), "graphs", f"{name}"), file_stem_name=f'{name}.collections') # create the collections.json file collections_list = [] # holds the list of collections acquired from graph with graph.graph_lock: for collection in graph.vs.select(is_collection_eq=True, name_ne='base_vertex'): collections_list.append({'collection_id': collection['collection_id'], 'collection_title': collection['title'], 'collection_url': collection['name']}) # get a list of non-duplicate collections collections_list = get_distinct_collections_from(collections_list, min_occurence_counter=1) # get the path were the gotten Collections will be saved to on local disk file_output_path = f'{CURRENT_TRANSFORMER_OUTPUT_DIR}/{(name or "all")}.collections.json' # write to file the collections gotten from 'name' scraped output h.write_file(file_output_path, collections_list) # write file the collections gotten from 'name' scraped out to S3 bucket h.upload_to_s3_if_configured(file_output_path, f'{(name or "all")}.collections.json')
def transform(name=None, input_file=None): """ function is responsible for transofrming raw datasets into Sources """ if input_file is None: # no input file specified file_list = h.traverse_output( name) # run through all the files in 'name' directory else: try: with open(input_file, 'r') as fp: file_list = [line.rstrip() for line in fp] except: logger.warning( f'Cannot read from list of output files at {input_file}, falling back to all collected data!' ) file_list = h.traverse_output(name) sources_list = [ ] # holds the list of sources acquired from 'name' scraper directory # loop through filepath in file list for file_path in file_list: # read the json data in each filepath data = h.read_file(file_path) if not data: # if data is None continue # retrieve source from dataset source = extract_source_from(dataset=data, use_key='collection') if not source: # source could not be retrieved continue # add source to list sources_list.append(source) # get a list of non-duplicate Sources sources_list = get_distinct_sources_from(sources_list, min_occurence_counter=2) # get the path were the gotten Sources will be saved to on local disk file_output_path = f'{CURRENT_TRANSFORMER_OUTPUT_DIR}/{(name or "all")}.sources.json' # write to file the Sources gotten from 'name' scraped output h.write_file(file_output_path, sources_list) # write file the Sources gotten from 'name' scraped out to S3 bucket h.upload_to_s3_if_configured(file_output_path, f'{(name or "all")}.sources.json')
def transform(name, input_file=None): if input_file is None: file_list = traverse_output(name) else: try: with open(input_file, 'r') as fp: file_list = [line.rstrip() for line in fp] except: logger.warn( f'Cannot read from list of output files at {input_file}, falling back to all collected data!' ) file_list = traverse_output(name) logger.debug(f'{len(file_list)} files to transform.') catalog = Catalog() catalog.catalog_id = "datopian_data_json_" + name datasets_number = 0 resources_number = 0 for file_path in file_list: data = read_file(file_path) if not data: continue dataset = _transform_scraped_dataset(data, name) catalog.datasets.append(dataset) datasets_number += 1 resources_number += len(dataset.distribution) logger.debug('{} datasets transformed.'.format(datasets_number)) logger.debug('{} resources transformed.'.format(resources_number)) output_path = h.get_output_path('datajson') file_path = os.path.join(output_path, f'{name}.data.json') with open(file_path, 'w') as output: output.write(catalog.dump()) logger.debug(f'Output file: {file_path}') h.upload_to_s3_if_configured(file_path, f'{name}.data.json')
def transform(name=None, input_file=None, use_raw_datasets=False) -> pd.DataFrame: """ function transforms the datajson/datasets into a dataframe/csv containig data to be used for RAG analyses on the efficacy of the scraping toolkit to get viable/usable structured data from the unstructured data source. The function by default operates on/utilises datajson i.e. the json that is ready to be ingested by the ckan harvester; However, setting 'use_raw_datasets' to True means the function will operate on the raw, parsed data which was scraped from the data source. PARAMETERS - name: if provided must correspond to the name of a scraper. if 'use_raw_datasets' is False, file with the format '<name>.data.json' will be located in the datajson subdirectory of 'ED_OUTPUT_PATH/transformers' and read. if 'use_raw_datasets' is True, dataset files contained in the 'name' scrapers subdirectory of the 'ED_OUTPUT_PATH/scrapers' will be read input_file: if provided mut be a file with list of datajson or dataset files to read. If no parameters are provided, which is the default behaviour; then all datajson files contained in datajson subdirectory of 'ED_OUTPUT_PATH/transformers' will be read. function returns the DataFrame containing the transformed datajson/dataset files """ file_list = [] # holds the list of files which contain datajson/dataset datasets_list = [] # holds the data jsons gotten from files if use_raw_datasets == True: # work on raw datasets if not input_file: # no input file provided # loop over directory structure if name: # loop over <name> scraper output e.g nces file_list = h.traverse_output(name) # datasets = list of all <name> files else: # loop over everything file_list = h.traverse_output(None) # datasets = list of all JSON files else: # input file provided # read input_file, which is a list of files with open(input_file, 'r') as fp: try: file_list = [line.rstrip() for line in fp] except Exception: logger.warning( f'Cannot read from list of output files at {input_file}, falling back to all collected data!' ) file_list = h.traverse_output(None) else: # work with processed/transformed datajson if not input_file: # no input file provided if name: # name of processed datajson is provided so get the file path file_list.append( Path(h.get_output_path('datajson'), f'{name}.data.json')) else: # name of processed datajson not provided file_list.extend( Path(h.get_output_path('datajson')).glob('*.json')) else: # input file provided # read input_file, which is a list of files with open(input_file, 'r') as fp: try: file_list = [line.rstrip() for line in fp] except Exception: logger.warning( f'Cannot read from list of output files at {input_file}, falling back to all collected data!' ) file_list.extend( Path(h.get_output_path('datajson')).glob('*.json')) if use_raw_datasets == True: # work on raw datasets # read the contents in file_list for file_path in file_list: # read json from the file using helper data = h.read_file(file_path) # compute the weight score of the dataset compute_score(data, append_score=True, use_raw_datasets=True) datasets_list.append(data) else: # work with processed json data # read the contents in the file_list for file_path in file_list: # read json from file using helper function data = h.read_file(file_path) for dataset_dict in data.get( 'dataset', []): # loop through the datasets contained in data # compute the weighted score of the dataset compute_score(dataset_dict, append_score=True, use_raw_datasets=False) datasets_list.append(dataset_dict) if use_raw_datasets == True: # work on raw datasets # map the datasets to pandas format dataset_rows_list = map(lambda dataset: [dataset.get('publisher'),\ dataset.get('source_url'), \ dataset.get('_weighted_score'), \ dataset.get('_weighted_score_ratio')], datasets_list) else: # work on processed datajson # map the dataset to pandas format dataset_rows_list = map(lambda dataset: [dataset.get('publisher')['name'],\ dataset.get('scraped_from'), \ dataset.get('_weighted_score'), \ dataset.get('_weighted_score_ratio')], datasets_list) # create the pandas df weighted_datasets_scores_df = pd.DataFrame(dataset_rows_list, columns=[ 'publisher', 'source url', 'weighted score', 'weighted score ratio' ]) # create a df that incorporates domain info weighted_datasets_scores_df2 = pd.DataFrame(columns=['domain']) weighted_datasets_scores_df2['domain'] = weighted_datasets_scores_df.\ apply(lambda row: urllib.parse.\ urlparse(row['source url']).hostname.\ replace('www2.', 'www.').replace('www.', ''), axis=1) weighted_datasets_scores_df2['publisher'] = weighted_datasets_scores_df[ 'publisher'] weighted_datasets_scores_df2['source url'] = weighted_datasets_scores_df[ 'source url'] weighted_datasets_scores_df2[ 'weighted score'] = weighted_datasets_scores_df['weighted score'] weighted_datasets_scores_df2[ 'weighted score ratio'] = weighted_datasets_scores_df[ 'weighted score ratio'] # create the output csv file name output_dated_dir = os.path.join( OUTPUT_DIR, f'{dt.now().year}-{dt.now().month}-{dt.now().day}') Path(output_dated_dir).mkdir(parents=True, exist_ok=True) if use_raw_datasets == True: # use raw datasets output_filename = "datasets_weighted_scores_{}_raw.csv".format( name or "all") else: # use processed datajson output_filename = "datasets_weighted_scores_{}.csv".format(name or "all") # create the fullpath weer file will be written fullpath = os.path.join(OUTPUT_DIR, output_filename) # write the dataframe to csv weighted_datasets_scores_df2.to_csv(fullpath, index=False) weighted_datasets_scores_df2.to_csv(os.path.join(output_dated_dir, output_filename), index=False) # write the csv to S3 bucket h.upload_to_s3_if_configured(fullpath, f'{output_filename}') return weighted_datasets_scores_df2
def transform(name, input_file=None): if input_file is None: file_list = traverse_output(name) else: try: with open(input_file, 'r') as fp: file_list = [line.rstrip() for line in fp] except: logger.warning( f'Cannot read from list of output files at {input_file}, falling back to all collected data!' ) file_list = traverse_output(name) logger.debug(f'{len(file_list)} files to transform.') catalog = Catalog() catalog.catalog_id = "datopian_data_json_" + (name or 'all') # keep track/stata for item transformed datasets_number = 0 resources_number = 0 sources_number = 0 collections_number = 0 # loop through the list of filepaths to be transformed for file_path in file_list: data = read_file(file_path) if not data: continue dataset = _transform_scraped_dataset(data, name) if not dataset: # no dataset was returned (i.e. dataset probably marked for removal) continue catalog.datasets.append(dataset) datasets_number += 1 resources_number += len(dataset.distribution) # TODO WORK FROM BELOW HERE # get the list of Sources for this catalog catalog_sources = list() try: # read the list of preprocessed (but still 'raw') Sources from file catalog_sources = read_file( f"{h.get_output_path('sources')}/{(name or 'all')}.sources.json") # transform the list of preprocessed Sources to a list of Source objects acceptable for the catalog object catalog_sources = _transform_preprocessed_sources(catalog_sources) except: logger.warning( f'"sources transformer" output file ({(name or "all")}.sources.json) not found. This datajson output will have no "source" field' ) # add the list of Source objects to the catalog catalog.sources = catalog_sources or [] # update the number fo transformed Sources sources_number = len(catalog_sources or []) # get the list of Collections for this catalog catalog_collections = list() try: # read the list of preprocessed (but still 'raw') Collections from file catalog_collections = read_file( f"{h.get_output_path('collections')}/{(name or 'all')}.collections.json" ) # transform the list of preprocessed Collections to a list of Collection objects acceptable for the catalog object catalog_collections = _transform_preprocessed_collections( catalog_collections) except: logger.warning( f'"sources transformer" output file ({(name or "all")}.collections.json) not found. This datajson output will have no "collection" field' ) # add the list of Collection objects to the catalog catalog.collections = catalog_collections or [] # update the number fo transformed Collections collections_number = len(catalog_collections or []) # validate the catalog object if not catalog.validate_catalog(pls_fix=True): logger.error(f"catalog validation Failed! Ending transform process") return logger.debug('{} Sources transformed.'.format(sources_number)) logger.debug('{} Collections transformed.'.format(collections_number)) logger.debug('{} datasets transformed.'.format(datasets_number)) logger.debug('{} resources transformed.'.format(resources_number)) output_path = h.get_output_path('datajson') file_path = os.path.join(output_path, f'{(name or "all")}.data.json') with open(file_path, 'w') as output: output.write(catalog.dump()) logger.debug(f'Output file: {file_path}') h.upload_to_s3_if_configured(file_path, f'{(name or "all")}.data.json')
def compare(): air_csv = './tools/data/AIR.csv' summary = Summary() if path.exists('./output/deduplicated_all.lst'): output_list_file = './output/deduplicated_all.lst' else: output_list_file = None try: print("Generating AIR data frame... ", end = '', flush=True) summary.air_df = pd.read_csv(air_csv) print('done.') print("Generating Datopian data frame... ", end = '', flush=True) try: summary.out_df = summary.generate_output_df(use_dump=True, output_list_file=output_list_file) except: summary.out_df = summary.generate_output_df(use_dump=False, output_list_file=output_list_file) print('done.') except Exception as e: print(e) summary.calculate_totals() # if name == 'nt': # _ = system('cls') # else: # _ = system('clear') statistics = { 'total': { 'datopian': { 'datasets': int(summary.total['out_datasets']), 'resources': int(summary.total['out_resources']), 'pages': summary.get_resources_dict('out', 'source_url'), 'datasets_by_office': summary.get_datasets_dict(), 'resources_by_office': summary.get_resources_dict('out', 'url') }, 'air': { 'datasets': 0, 'resources': int(summary.total['air_resources']), 'pages': summary.get_resources_dict('air', 'source_url'), 'resources_by_office': summary.get_resources_dict('air', 'url') } }, 'intersections': { 'datopian_only': { 'pages': int(summary.get_values_only_in('out', 'source_url')), 'resources': int(summary.get_values_only_in('out', 'url')) }, 'air_only': { 'pages': int(summary.get_values_only_in('air', 'source_url')), 'resources': int(summary.get_values_only_in('air', 'url')) } } } print( f"Total number of raw datasets: {summary.total['out_datasets']}\n" f"\n---\n\n" f"Total number of raw datasets per scraper: \n\n{summary.get_datasets_table()}\n" f"\n---\n\n" f"Total number of resources:\n" f" AIR: {summary.total['air_resources']}\n" f"Datopian: {summary.total['out_resources']}\n" f"\n---\n\n" f"Total number of resources by office: \n{summary.get_resources_table(column='url')}\n" f"\n---\n\n" f"Total number of pages by office: \n{summary.get_resources_table(column='source_url')}\n" f"\n---\n\n" f"Pages scraped by AIR only: {summary.get_values_only_in('air', 'source_url')}\n" f"Pages scraped by Datopian only: {summary.get_values_only_in('out', 'source_url')}\n" f"\n---\n" f"Resources collected by AIR only: {summary.get_values_only_in('air', 'url')}\n" f"Resources collected by Datopian only: {summary.get_values_only_in('out', 'url')}\n" f"\n---\n\n" f"CSV file with all the resources was dumped in {summary.dump('./output/datopian.csv')}" ) with open('./output/statistics.json', 'w') as stats_file: json.dump(statistics, stats_file) upload_to_s3_if_configured('./output/statistics.json', 'compare-statistics.json')