def make_usernames_csv_for_turk():
    print "Creating csv file of usernames for evaluation on Mechanical Turk..."
    twitter_site = short_text_websites.get_twitter_site()
    usernames_to_evaluate = crosssite_username_dataset_mgr.get_usernames_to_evaluate_mturk(twitter_site)
    print str(len(usernames_to_evaluate))+" unconfirmed usernames available:"
    print usernames_to_evaluate
    
    # dump to csv
    username_rows = [[username] for username in usernames_to_evaluate]
    csv_util.write_to_spreadsheet(__usernames_to_judge_csv_path__, username_rows, None)
def __update_confirmed_usernames__(site, confirmed_usernames, confirmed_value):
    usernames_csv_path = __get_usernames_csv_path__(site)
    headers = csv_util.query_csv_for_headers(usernames_csv_path)
    
    username_col_index = headers.index(COLUMN_USERNAME)
    confirmed_col_index = headers.index(__COLUMN_SAME_INDIVIDUAL__)
    
    updated_rows = []
    rows = csv_util.query_csv_for_rows(usernames_csv_path, False)
    for row in rows:
        if row[username_col_index] in confirmed_usernames:
            row[confirmed_col_index] = confirmed_value
        updated_rows.append(row)
    csv_util.write_to_spreadsheet(usernames_csv_path, updated_rows)
def make_tweet_entities_csv_for_turk():
    twitter_site = short_text_websites.get_twitter_site()
    entities_to_evaluate = entity_dataset_mgr.get_valid_ne_candidates(twitter_site)
    if entities_to_evaluate is None:
        print "No ambiguous entities + candidates in cache. Run run_all_dataset_generators "+\
        "script and choose to first fetch and store more entities from short texts."
        return
    
    judged_row_plus_headers = csv_util.query_csv_for_rows(__entities_results_csv_path__, False)
    judged_row_num = 0
    already_judged = [] # list of (entity id, candidate link)
    for judge_row in judged_row_plus_headers:
        try:
            if judged_row_num==0: # row 0 is header
                entity_id_col = judge_row.index('Input.entity_id')
                candidate_link_col = judge_row.index('Input.candidate_link') 
            else:
                judged_tuple = (judge_row[entity_id_col], judge_row[candidate_link_col])
                if not judged_tuple in already_judged:
                    already_judged.append(judged_tuple)
            judged_row_num = judged_row_num+1    
        except:
            continue # just ignore a problematic row      
        
    # Determine what entity+candidate tasks we actually want to write to a spreadsheet 
    # and send to mturk since we don't have resources for unlimited mturk tasks
    tasks = {} # NamedEntity object -> candidate judgment tasks we actually want performed
    user_entities = defaultdict(list) # username -> [NamedEntity obj]
    done_shorttexts = [] # list of shorttext id
    random.shuffle(entities_to_evaluate) # so we get a random subset of a user's entities
    for ne_obj in entities_to_evaluate:
        
        # "40 nouns usually enough to establish statistically significant 
        # differences between WSD algorithms" (Santamaria et al., 2010)
        username = ne_obj.username
        if len(user_entities[username]) > 50:
            continue # have enough entities for this user
        
        # limiting our dataset to one named entity per short text
        shorttext_id = ne_obj.shorttext_id
        if shorttext_id in done_shorttexts:
            continue
        
        # no need to create tasks for candidates we already have annotator judgments for
        entity_id = ne_obj.get_entity_id()
        candidate_URLs = ne_obj.get_candidate_wikiURLs()
        valid_candidate_tasks = []
        for candidate_URL in candidate_URLs:
            if ((entity_id, candidate_URL) in already_judged):
                continue
            valid_candidate_tasks.append(candidate_URL)
        if len(valid_candidate_tasks)==0:
            continue # already have annotator judgments for all of this entity's candidates
        if len(candidate_URLs)+len(valid_candidate_tasks) < 2:
            # this would be a non-ambiguous entity, and we should never reach this 
            # point because such entities should have been filtered out by now
            raise
        tasks[entity_id] = valid_candidate_tasks
        user_entities[username].append(ne_obj)
        done_shorttexts.append(shorttext_id)
            
    # put valid entities + candidates in the spreadsheet until reach our limit of tasks
    task_max = 1400    
    
    rows = []
    headers = ['entity_id', 'short_text', 'ambiguous_entity', 'candidate_link']
    rows.append(headers)
    
    for username in user_entities:
        
        # add users until reach our limit on the number of tasks we can afford, 
        # but break at this point in the loop rather than in the inner loop to
        # ensure that we do have at least 50 entities per user (even if this
        # means we go over our task limit a little in order to reach that amount)
        if len(rows) > task_max:
            break
        
        # bypass users who haven't written the minimum number of valid entities
        # required to establish statistical significance between the algorithms
        if len(user_entities[username]) < 50:
            continue
        
        # should be 50 NamedEntity objects per user, and we'll make tasks for their candidates
        for ne_obj in user_entities[username]:
            entity_id = ne_obj.get_entity_id()
        
            # make sure the entity presented to a Turker looks the same as
            # it appears in the short text (ie with the same capitalization)
            original_shorttext = ne_obj.shorttext_str.decode('latin-1')
            surface_form = ne_obj.surface_form
            if not surface_form in original_shorttext:
                surface_form = __match_appearance__(surface_form, original_shorttext)
            
            # shuffle candidates so that they don't appear
            # in wikiminer's/dbpedia's ranking order and bias the turker
            candidate_URLs = tasks[entity_id]
            random.shuffle(candidate_URLs)
            choices = candidate_URLs[:] # copy (list slicing)
            for choice in choices:
                # make a separate row for each candidate link 
                # rather than putting all links in a single cell
                row = [entity_id, original_shorttext, surface_form, choice]
                rows.append(row)
            
            if len(rows)%50==0:
                # write the rows every once in a while in case we reach an error
                print "Updating spreadsheet..."+str(len(rows))
                csv_util.write_to_spreadsheet(__entities_to_judge_csv_path__, rows)
        
    # dump to csv
    csv_util.write_to_spreadsheet(__entities_to_judge_csv_path__, rows)