Пример #1
0
def save_matches(df_matched, track=track):
    if track == "python":
        # Connecting to the database
        matches_db = df_matched[[
            'id_user_1', 'id_user_2', 'score_u1', 'score_u2'
        ]]
        matches_db.columns = [
            'fk_user_1_id', 'fk_user_2_id', 'algo_score_u1', 'algo_score_u2'
        ]
        matches_db["fk_round_id"] = conf_data["round_num_id"]
        save = "db"
        conn = dbq.connect()
        dbq.m_single_insert(conn, matches_db)
    else:
        save = ".csv"
        to_path = conf_data["filepath"][track] + str(
            conf_data["dates"]["round_num"]
        ) + "/" + dt_today + conf_data["file_name"]["buddy_results"]
        df_matched.to_csv(to_path,
                          sep=',',
                          decimal=".",
                          encoding='utf-8',
                          index=False,
                          header=True)
    return print("saved {} to {}".format(track, save))
Пример #2
0
def prep_db_data(track="python"):
    """
    Function that reads data stored in db
    :return: pd.df, ready to be transformed (transform_data)
    """
    conn = dbq.connect()
    df_raw = pd.read_sql_query(dbq.signup_info, conn)
    conn.close()
    if df_raw.shape[0] == 0:
        print("no data in local db, referring to .csv instead")
        file_path = conf_data["filepath"][track] + str(conf_data["round_num"]) + \
                    conf_data["file_name"]["signups"]
        df_raw = pd.read_csv(file_path, usecols=dbq.var)

    return make_df(df_raw)
Пример #3
0
def prior_buddy_check(idx_dict, track):
    """
    THIS FUNCTION NEEDS TESTING!
    Retrieving prior buddy-combinations in order to set prior combinations to zero.
    This function is run in scoring_alg()
    :return: dictionary with all prior combinations by indeces
    """

    if track == "python":
        connection = dbq.connect()
        df_m = pd.read_sql_query(dbq.prior_part, connection)
        connection.close()
        # df_m = pd.read_csv("datafiles_sens/" + track + "/"+ conf_data["file_name"]["prior_matches"],  usecols=["fk_round_id", "fk_user_1_id", "fk_user_2_id"]) # aka db "matches" table
        # df_m.dropna(how="all", inplace=True)
        # print("retrieving matches for {} from .csv file. Fix db connection (or make sure .csv files are up to date)!".format(track))

    else:
        #read prior matches
        df_m = pd.read_csv("datafiles_sens/" + track + "/" +
                           conf_data["file_name"]["prior_matches"],
                           usecols=[
                               "fk_round_id", "fk_user_1_id", "fk_user_2_id"
                           ])  # aka db "matches" table
        df_m.dropna(how="all", inplace=True)

    # get lists of all participants prior this round, all participants of this round that have been in prior rounds too
    df_prev = df_m[df_m["fk_round_id"] != conf_data["round_num_id"]]
    userid_current = idx_dict.values()
    userid_past = {
        *list(df_prev["fk_user_1_id"]), *list(df_prev["fk_user_2_id"])
    }
    userid_repeating = set(list(set(userid_current) & set(userid_past)))

    # get a dictionary with all indices of prior combinations
    prior_combinations = {}
    for p_userid in userid_repeating:  # for each of repeating signups get the previous buddy/buddies that have also signed up for this round
        comb1 = set(df_m.loc[(df_m['fk_user_1_id'] == p_userid),
                             "fk_user_2_id"]) & set(userid_repeating)
        comb2 = set(df_m.loc[(df_m['fk_user_2_id'] == p_userid),
                             "fk_user_1_id"]) & set(userid_repeating)
        past_buddies = {*list(comb1), *list(comb2)}
        for pb in past_buddies:
            prior_combinations[pb] = p_userid

    return prior_combinations