def run(): test = literal_eval(os.environ["BATCHPAR_test"]) bucket = os.environ['BATCHPAR_bucket'] batch_file = os.environ['BATCHPAR_batch_file'] db_name = os.environ["BATCHPAR_db_name"] es_host = os.environ['BATCHPAR_outinfo'] es_port = int(os.environ['BATCHPAR_out_port']) es_index = os.environ['BATCHPAR_out_index'] es_type = os.environ['BATCHPAR_out_type'] entity_type = os.environ["BATCHPAR_entity_type"] aws_auth_region = os.environ["BATCHPAR_aws_auth_region"] # database setup logging.info('Retrieving engine connection') engine = get_mysql_engine("BATCHPAR_config", "mysqldb", db_name) logging.info('Building FOS lookup') fos_lookup = build_fos_lookup(engine, max_lvl=6) nf = NutsFinder() # es setup logging.info('Connecting to ES') strans_kwargs = { 'filename': 'eurito/arxiv-eu.json', 'from_key': 'tier_0', 'to_key': 'tier_1', 'ignore': ['id'] } es = ElasticsearchPlus(hosts=es_host, port=es_port, aws_auth_region=aws_auth_region, no_commit=("AWSBATCHTEST" in os.environ), entity_type=entity_type, strans_kwargs=strans_kwargs, null_empty_str=True, coordinates_as_floats=True, listify_terms=True, do_sort=False, ngram_fields=['textBody_abstract_article']) # collect file logging.info('Retrieving article ids') nrows = 20 if test else None s3 = boto3.resource('s3') obj = s3.Object(bucket, batch_file) art_ids = json.loads(obj.get()['Body']._raw_stream.read()) logging.info(f"{len(art_ids)} article IDs " "retrieved from s3") # Get all grid countries # and country: continent lookup logging.info('Doing country lookup') country_lookup = get_country_region_lookup() eu_countries = get_eu_countries() with db_session(engine) as session: grid_regions = { obj.id: country_lookup[obj.country_code] for obj in session.query(Inst).all() if obj.country_code is not None } grid_countries = { obj.id: obj.country_code for obj in session.query(Inst).all() if obj.country_code is not None } grid_institutes = { obj.id: obj.name for obj in session.query(Inst).all() } grid_latlon = { obj.id: (obj.latitude, obj.longitude) for obj in session.query(Inst).all() } # logging.info('Processing rows') with db_session(engine) as session: for count, obj in enumerate( (session.query(Art).filter(Art.id.in_(art_ids)).all())): row = object_to_dict(obj) # Extract year from date if row['created'] is not None: row['year'] = row['created'].year # Normalise citation count for searchkit if row['citation_count'] is None: row['citation_count'] = 0 # Extract field of study row['fields_of_study'] = make_fos_tree(row['fields_of_study'], fos_lookup) row['_fields_of_study'] = [ f for fields in row['fields_of_study']['nodes'] for f in fields if f != [] ] # Format hierarchical fields as expected by searchkit row['categories'] = [ cat['description'] for cat in row.pop('categories') ] institutes = row.pop('institutes') good_institutes = [ i['institute_id'] for i in institutes if i['matching_score'] > 0.9 ] # Add NUTS regions for inst_id in good_institutes: if inst_id not in grid_latlon: continue lat, lon = grid_latlon[inst_id] if lat is None or lon is None: continue nuts = nf.find(lat=lat, lon=lon) for i in range(0, 4): name = f'nuts_{i}' if name not in row: row[name] = set() for nut in nuts: if nut['LEVL_CODE'] != i: continue row[name].add(nut['NUTS_ID']) for i in range(0, 4): name = f'nuts_{i}' if name in row: row[name] = list(row[name]) # Add other geographies countries = set(grid_countries[inst_id] for inst_id in good_institutes if inst_id in grid_countries) regions = set(grid_regions[inst_id] for inst_id in good_institutes if inst_id in grid_countries) row['countries'] = list(countries) #[c for c, r in countries] row['regions'] = [r for c, r in regions] row['is_eu'] = any(c in eu_countries for c in countries) # Pull out international institute info has_mn = any( is_multinational(inst, grid_countries.values()) for inst in good_institutes) row['has_multinational'] = has_mn # Generate author & institute properties mag_authors = row.pop('mag_authors') if mag_authors is None: row['authors'] = None row['institutes'] = None else: if all('author_order' in a for a in mag_authors): mag_authors = sorted(mag_authors, key=lambda a: a['author_order']) row['authors'] = [ author['author_name'].title() for author in mag_authors ] gids = [ author['affiliation_grid_id'] for author in mag_authors if 'affiliation_grid_id' in author ] row['institutes'] = [ grid_institutes[g].title() for g in gids if g in grid_institutes and g in good_institutes ] if row['institutes'] in (None, []): row['institutes'] = [ grid_institutes[g].title() for g in good_institutes ] uid = row.pop('id') _row = es.index(index=es_index, doc_type=es_type, id=uid, body=row) if not count % 1000: logging.info(f"{count} rows loaded to " "elasticsearch") logging.warning("Batch job complete.")
def run(): test = literal_eval(os.environ["BATCHPAR_test"]) bucket = os.environ['BATCHPAR_bucket'] batch_file = os.environ['BATCHPAR_batch_file'] db_name = os.environ["BATCHPAR_db_name"] es_host = os.environ['BATCHPAR_outinfo'] es_port = int(os.environ['BATCHPAR_out_port']) es_index = os.environ['BATCHPAR_out_index'] es_type = os.environ['BATCHPAR_out_type'] entity_type = os.environ["BATCHPAR_entity_type"] aws_auth_region = os.environ["BATCHPAR_aws_auth_region"] # database setup engine = get_mysql_engine("BATCHPAR_config", "mysqldb", db_name) static_engine = get_mysql_engine("BATCHPAR_config", "mysqldb", "static_data") states_lookup = { row['state_code']: row['state_name'] for _, row in pd.read_sql_table('us_states_lookup', static_engine).iterrows() } states_lookup["AE"] = "Armed Forces (Canada, Europe, Middle East)" states_lookup["AA"] = "Armed Forces (Americas)" states_lookup["AP"] = "Armed Forces (Pacific)" states_lookup[None] = None # default lookup for non-US countries # Get continent lookup url = "https://nesta-open-data.s3.eu-west-2.amazonaws.com/rwjf-viz/continent_codes_names.json" continent_lookup = { row["Code"]: row["Name"] for row in requests.get(url).json() } continent_lookup[None] = None eu_countries = get_eu_countries() # es setup strans_kwargs = { 'filename': 'eurito/crunchbase-eu.json', 'from_key': 'tier_0', 'to_key': 'tier_1', 'ignore': ['id'] } es = ElasticsearchPlus( hosts=es_host, port=es_port, aws_auth_region=aws_auth_region, no_commit=("AWSBATCHTEST" in os.environ), entity_type=entity_type, strans_kwargs=strans_kwargs, null_empty_str=True, coordinates_as_floats=True, country_detection=True, listify_terms=True, terms_delimiters=("|", ), null_pairs={"currency_of_funding": "cost_of_funding"}, ngram_fields=[ 'textBody_summary_organisation', 'textBody_descriptive_organisation' ]) # collect file nrows = 20 if test else None s3 = boto3.resource('s3') obj = s3.Object(bucket, batch_file) org_ids = json.loads(obj.get()['Body']._raw_stream.read()) logging.info(f"{len(org_ids)} organisations retrieved from s3") org_fields = set(c.name for c in Organization.__table__.columns) geo_fields = [ 'country_alpha_2', 'country_alpha_3', 'country_numeric', 'continent', 'latitude', 'longitude' ] # First get all funders investor_names = defaultdict(list) with db_session(engine) as session: rows = (session.query(Organization, FundingRound).join( FundingRound, Organization.id == FundingRound.company_id).filter( Organization.id.in_(org_ids)).all()) for row in rows: _id = row.Organization.id _investor_names = row.FundingRound.investor_names investor_names[_id] += parse_investor_names(_investor_names) # Pipe orgs to ES with db_session(engine) as session: rows = (session.query(Organization, Geographic).join( Geographic, Organization.location_id == Geographic.id).filter( Organization.id.in_(org_ids)).limit(nrows).all()) for count, row in enumerate(rows, 1): # convert sqlalchemy to dictionary row_combined = { k: v for k, v in row.Organization.__dict__.items() if k in org_fields } row_combined[ 'currency_of_funding'] = 'USD' # all values are from 'funding_total_usd' row_combined.update({ k: v for k, v in row.Geographic.__dict__.items() if k in geo_fields }) row_combined['investor_names'] = list( set(investor_names[row_combined['id']])) row_combined['is_eu'] = row_combined[ 'country_alpha_2'] in eu_countries # reformat coordinates row_combined['coordinates'] = { 'lat': row_combined.pop('latitude'), 'lon': row_combined.pop('longitude') } # iterate through categories and groups row_combined['category_list'] = [] row_combined['category_group_list'] = [] for category in (session.query(CategoryGroup).select_from( OrganizationCategory).join(CategoryGroup).filter( OrganizationCategory.organization_id == row.Organization.id).all()): row_combined['category_list'].append(category.category_name) row_combined['category_group_list'] += [ group for group in str(category.category_group_list).split('|') if group is not 'None' ] # Add a field for US state name state_code = row_combined['state_code'] row_combined['placeName_state_organisation'] = states_lookup[ state_code] continent_code = row_combined['continent'] row_combined[ 'placeName_continent_organisation'] = continent_lookup[ continent_code] row_combined['updated_at'] = row_combined['updated_at'].strftime( '%Y-%m-%d') uid = row_combined.pop('id') _row = es.index(index=es_index, doc_type=es_type, id=uid, body=row_combined) if not count % 1000: logging.info(f"{count} rows loaded to elasticsearch") logging.warning("Batch job complete.")
def run(): test = literal_eval(os.environ["BATCHPAR_test"]) bucket = os.environ['BATCHPAR_bucket'] batch_file = os.environ['BATCHPAR_batch_file'] db_name = os.environ["BATCHPAR_db_name"] es_host = os.environ['BATCHPAR_outinfo'] es_port = int(os.environ['BATCHPAR_out_port']) es_index = os.environ['BATCHPAR_out_index'] es_type = os.environ['BATCHPAR_out_type'] entity_type = os.environ["BATCHPAR_entity_type"] aws_auth_region = os.environ["BATCHPAR_aws_auth_region"] # database setup logging.info('Retrieving engine connection') engine = get_mysql_engine("BATCHPAR_config", "mysqldb", db_name) _engine = get_mysql_engine("BATCHPAR_config", "readonly", "patstat_2019_05_13") # es setup logging.info('Connecting to ES') strans_kwargs = { 'filename': 'eurito/patstat-eu.json', 'from_key': 'tier_0', 'to_key': 'tier_1', 'ignore': ['id'] } es = ElasticsearchPlus(hosts=es_host, port=es_port, aws_auth_region=aws_auth_region, no_commit=("AWSBATCHTEST" in os.environ), entity_type=entity_type, strans_kwargs=strans_kwargs, auto_translate=True, auto_translate_kwargs={'min_len': 20}, null_empty_str=True, coordinates_as_floats=True, do_sort=True, ngram_fields=['textBody_abstract_patent']) # collect file logging.info('Retrieving patent family ids') nrows = 20 if test else None s3 = boto3.resource('s3') obj = s3.Object(bucket, batch_file) docdb_fam_ids = json.loads(obj.get()['Body']._raw_stream.read()) logging.info(f"{len(docdb_fam_ids)} patent family IDs " "retrieved from s3") eu_countries = get_eu_countries() logging.info('Processing rows') _filter = ApplnFamily.docdb_family_id.in_(docdb_fam_ids) with db_session(engine) as session: for obj in session.query(ApplnFamily).filter(_filter).all(): row = object_to_dict(obj) appln_ids = row.pop('appln_id') with db_session(_engine) as _session: _titles = metadata(Tls202ApplnTitle, _session, appln_ids) _abstrs = metadata(Tls203ApplnAbstr, _session, appln_ids) ipcs = metadata(Tls209ApplnIpc, _session, appln_ids) nace2s = metadata(Tls229ApplnNace2, _session, appln_ids) techs = metadata(Tls230ApplnTechnField, _session, appln_ids) # Get persons _pers_applns = metadata(Tls207PersAppln, _session, appln_ids) pers_ids = set(pa['person_id'] for pa in _pers_applns) persons = metadata(Tls906Person, _session, pers_ids, field_selector=Tls906Person.person_id) title = select_text(_titles, 'appln_title_lg', 'appln_title') abstr = select_text(_abstrs, 'appln_abstract_lg', 'appln_abstract') # Get names from lookups ipcs = list(set(i['ipc_class_symbol'].split()[0] for i in ipcs)) nace2s = list(set(n['nace2_code'] for n in nace2s)) techs = list(set(t['techn_field_nr'] for t in techs)) ctrys = list(set(p['person_ctry_code'] for p in persons)) nuts = list(set(p['nuts'] for p in persons)) is_eu = any(c in eu_countries for c in ctrys) # Index the data row = dict(title=title, abstract=abstr, ipc=ipcs, nace2=nace2s, tech=techs, ctry=ctrys, nuts=nuts, is_eu=is_eu, **row) uid = row.pop('docdb_family_id') _row = es.index(index=es_index, doc_type=es_type, id=uid, body=row) logging.warning("Batch job complete.")