def build_shorttexts_dataset(crosssite_usernames, site): siteNameStr = str(site.siteName) # Load or create/initialize the spreadsheet of users' short texts shorttexts_csv_path = __get_shorttexts_csv_path__(site) csv_string = "cross-site (Wikipedia and "+siteNameStr+") users' short texts" headers = [COLUMN_SHORTTEXT_ID, COLUMN_SHORTTEXT_STRING, COLUMN_USERNAME] shorttexts_in_csv = csv_util.load_or_initialize_csv(shorttexts_csv_path, csv_string, headers, COLUMN_SHORTTEXT_ID) usernames_in_csv = list(set(csv_util.get_all_column_values(shorttexts_csv_path, COLUMN_USERNAME))) # only need to fetch the short texts for usernames that we haven't already done users_todo = [u for u in crosssite_usernames if u not in usernames_in_csv] if len(users_todo)==0: print "Short texts fetched and stored for all "+\ str(len(usernames_in_csv))+" confirmed cross-site editors. Exiting." return print str(len(crosssite_usernames))+" cross-site usernames total, and "+\ str(len(users_todo))+" users not yet in spreadsheet of short texts " # Prompt how many users to fetch short texts for desired_num_users = prompt_and_print.prompt_num_entries_to_build(csv_string, usernames_in_csv) num_to_append = desired_num_users - len(usernames_in_csv) if len(users_todo) < num_to_append: print "Only "+str(len(users_todo))+" cross-site usernames available. If you want "+\ "want "+str(desired_num_users)+" total users' short texts in the short text csv, you'll "+\ "have to re-run script and choose to first fetch more cross-site usernames." shorttexts_rows = [] progress_count = 1 for username in users_todo: try: if len(usernames_in_csv) >= desired_num_users: # have enough so exit break if progress_count%10==0: print "Querying for short texts posted on "+siteNameStr+" by cross-site usernames..."+\ " Number usernames whose short texts have been fetched so far: "+str(progress_count) progress_count = progress_count+1 # For the usernames that have been confirmed to belong to the same # individual person on Wikipedia and the given site, get the short # texts those users have written on that site shorttexts_response = site.get_user_short_texts(username) # fetch from site user_shorttexts = shorttexts_response[site.get_shorttext_response_key()] for shorttext_id in user_shorttexts: try: # create row for user's short texts shorttext_text = user_shorttexts[shorttext_id].decode('utf-8') shorttext_row = [shorttext_id, shorttext_text, username] shorttexts_rows.append(shorttext_row) except: continue # ignore problematic short texts # keep track that we'll be adding this username to the csv usernames_in_csv.append(username) # also check whether rate limit reached rate_limited = shorttexts_response[site.get_rate_limit_key()] if rate_limited: break # reached rate limit, so break except: continue # ignore problematic users # update the spreadsheet with any new users' short texts that have been fetched csv_util.append_to_spreadsheet(csv_string, shorttexts_csv_path, shorttexts_in_csv, shorttexts_rows)
def build_entities_dataset(shorttext_rows, site): siteNameStr = str(site.siteName) # Load or create/initialize the spreadsheet of users' short texts entity_csv_path = __get_entities_csv_path__(site) output_str = __get_detected_entities_output_str__(site) headers = [COLUMN_ENTITY_ID, __COLUMN_ENTITY_STRING__, COLUMN_SHORTTEXT_ID, COLUMN_SHORTTEXT_STRING, COLUMN_USERNAME] entities_in_csv = csv_util.load_or_initialize_csv(entity_csv_path, output_str, headers, COLUMN_ENTITY_ID) shorttexts_in_csv = csv_util.get_all_column_values(entity_csv_path, COLUMN_SHORTTEXT_ID) print "A total of "+str(len(shorttext_rows))+" short texts available to detect and resolve entities in..." # Load the cache of ambiguous entity objects ne_objs = pkl_util.load_pickle(output_str, __get_ne_cache_path__(site)) if ne_objs is None: ne_objs = [] # Load the cache of short texts that contain no entities # and that we don't need to keep querying services with entityless_shorttexts = get_entityless_shorttexts(site) # Load the cache of problematic short texts that we can # go back and look at later.. problematic_shorttexts = get_problematic_shorttexts(site) # Prompt how many users to fetch short texts for desired_num_entities = prompt_and_print.prompt_num_entries_to_build(output_str, shorttexts_in_csv) entities_rows = [] progress_count = 1 all_shorttexts_done = True for shorttext_row in shorttext_rows: shorttext_id = shorttext_row[0] if shorttext_id in shorttexts_in_csv or shorttext_id in entityless_shorttexts or shorttext_id in problematic_shorttexts: # already did entities for this shorttext (and either successfully # detected some, successfully detected none, or encountered an error) continue all_shorttexts_done = False try: if len(entities_in_csv) >= desired_num_entities: # have enough so exit break if progress_count%10==0: print "Detecting named entities in short texts posted on "+siteNameStr+\ " by cross-site usernames... Number of short texts whose entities have been fetched so far: \n"+\ str(len(entities_in_csv)) progress_count = progress_count+1 original_shorttext = shorttext_row[1] username = shorttext_row[2] # get the entities contained in each short text # clean the short text before attempting to detect entities in it clean_shorttext = text_util.format_text_for_NER(original_shorttext, site) if clean_shorttext=='': # whole string was invalid, perhaps a URL or # some other content that gets totally filtered problematic_shorttexts.append(shorttext_id) continue detected_entities = named_entity_finder.find_and_construct_named_entities(shorttext_id, original_shorttext, username, site) if len(detected_entities)==0: entityless_shorttexts.append(shorttext_id) for ne_obj in detected_entities: # cache this entity object ne_objs.append(ne_obj) # make a row in the spreadsheet for this entity ne_id = ne_obj.get_entity_id() entity_row = [ne_id, ne_obj.surface_form, shorttext_id, original_shorttext, username] entities_rows.append(entity_row) # keep track that we'll be adding this entity to the csv entities_in_csv.append(ne_id) except Exception as st_e: print "Problematic short text "+str(shorttext_row[1]), st_e if 'referenced before assignment' in str(st_e): raise # it's a server error so we need to stop problematic_shorttexts.append(shorttext_id) continue # update the spreadsheet with any new users' short texts that have been fetched csv_util.append_to_spreadsheet(output_str, entity_csv_path, entities_in_csv, entities_rows, False) # update the cache of ambiguous surface form objects pkl_util.write_pickle(output_str, ne_objs, __get_ne_cache_path__(site)) pkl_util.write_pickle(__entityless_output_str__, entityless_shorttexts, __get_entityless_cache_path__(site)) pkl_util.write_pickle(__problematic_output_str__, problematic_shorttexts, __get_problematic_cache_path__(site)) print "Cached a total of "+str(len(ne_objs))+" ambiguous named entities" if all_shorttexts_done: print "Completed detecting and resolving entities in all short texts available." else: print "More short texts available to detect and resolve entities for."