Beispiel #1
0
def params_4(gamma_settings_4):

    # Probability columns
    params = Params(gamma_settings_4, spark="supress_warnings")

    params._generate_param_dict()
    yield params
Beispiel #2
0
def sqlite_con_1(gamma_settings_1, params_1):

    # Create the database and the database table
    con = sqlite3.connect(":memory:")
    con.row_factory = sqlite3.Row
    cur = con.cursor()
    cur.execute("create table test1 (unique_id, mob, surname)")
    cur.execute("insert into test1 values (?, ?, ?)", (1, 10, "Linacre"))
    cur.execute("insert into test1 values (?, ?, ?)", (2, 10, "Linacre"))
    cur.execute("insert into test1 values (?, ?, ?)", (3, 10, "Linacer"))
    cur.execute("insert into test1 values (?, ?, ?)", (4, 7, "Smith"))
    cur.execute("insert into test1 values (?, ?, ?)", (5, 8, "Smith"))
    cur.execute("insert into test1 values (?, ?, ?)", (6, 8, "Smith"))
    cur.execute("insert into test1 values (?, ?, ?)", (7, 8, "Jones"))

    # Create comparison table
    rules = [
        "l.mob = r.mob",
        "l.surname = r.surname",
    ]

    sql = "select * from test1 limit 1"
    cur.execute(sql)
    one = cur.fetchone()
    columns = one.keys()

    sql = _sql_gen_block_using_rules("dedupe_only",
                                     columns,
                                     rules,
                                     table_name_dedupe="test1")
    df = pd.read_sql(sql, con)
    df = df.drop_duplicates(["unique_id_l", "unique_id_r"])
    df = df.sort_values(["unique_id_l", "unique_id_r"])
    df.to_sql("df_comparison1", con, index=False)

    sql = _sql_gen_add_gammas(gamma_settings_1, table_name="df_comparison1")

    df = pd.read_sql(sql, con)
    df.to_sql("df_gammas1", con, index=False)

    sql = _sql_gen_gamma_prob_columns(params_1, gamma_settings_1, "df_gammas1")
    df = pd.read_sql(sql, con)
    df.to_sql("df_with_gamma_probs1", con, index=False)

    sql = _sql_gen_expected_match_prob(params_1, gamma_settings_1,
                                       "df_with_gamma_probs1")
    df = pd.read_sql(sql, con)
    df.to_sql("df_with_match_probability1", con, index=False)

    sql = _sql_gen_intermediate_pi_aggregate(
        params_1, table_name="df_with_match_probability1")
    df = pd.read_sql(sql, con)
    df.to_sql("df_intermediate1", con, index=False)

    sql = _sql_gen_pi_df(params_1, "df_intermediate1")

    df = pd.read_sql(sql, con)
    df.to_sql("df_pi1", con, index=False)

    # Create a new parameters object and run everything again for a second iteration
    # Probability columns
    gamma_settings_it_2 = copy.deepcopy(gamma_settings_1)
    gamma_settings_it_2["proportion_of_matches"] = 0.540922141
    gamma_settings_it_2["comparison_columns"][0]["m_probabilities"] = [
        0.087438272, 0.912561728
    ]
    gamma_settings_it_2["comparison_columns"][0]["u_probabilities"] = [
        0.441543191, 0.558456809
    ]
    gamma_settings_it_2["comparison_columns"][1]["m_probabilities"] = [
        0.173315146,
        0.326240275,
        0.500444578,
    ]
    gamma_settings_it_2["comparison_columns"][1]["u_probabilities"] = [
        0.340356209,
        0.160167628,
        0.499476163,
    ]

    params2 = Params(gamma_settings_it_2, spark="supress_warnings")

    params2._generate_param_dict()

    sql = _sql_gen_gamma_prob_columns(params2, gamma_settings_it_2,
                                      "df_gammas1")
    df = pd.read_sql(sql, con)
    df.to_sql("df_with_gamma_probs1_it2", con, index=False)

    sql = _sql_gen_expected_match_prob(params2, gamma_settings_it_2,
                                       "df_with_gamma_probs1_it2")
    df = pd.read_sql(sql, con)
    df.to_sql("df_with_match_probability1_it2", con, index=False)

    sql = _sql_gen_intermediate_pi_aggregate(
        params2, table_name="df_with_match_probability1_it2")
    df = pd.read_sql(sql, con)
    df.to_sql("df_intermediate1_it2", con, index=False)

    sql = _sql_gen_pi_df(params2, "df_intermediate1_it2")

    df = pd.read_sql(sql, con)
    df.to_sql("df_pi1_it2", con, index=False)

    yield con