示例#1
0
def size_test():
    n = 50
    m = 10
    d = 5
    K = 4
    print("\n\nRunning Size Test")

    train_X = generate_data(n, m, 'uniform')
    L = generate_loss_matrix(d, m, 'uniform')
    U = generate_utility_matrix(d, m, 'uniform')
    U_X = np.matmul(train_X, U.T)
    
    max_val = np.amax(U_X)
    
    learned_betas, learned_predictions, _, alphas = train_erm(train_X, L)
    final_loss = compute_final_loss(alphas, L, train_X, learned_predictions)
    welfare = compute_welfare(alphas, U, train_X, learned_predictions)
    opt_loss = get_optimal_loss(L, train_X)
    
    print("Optimal loss is: ", opt_loss)
    print("ERM loss is: ", final_loss)
    print("Welfare is: ", welfare)
    
    #conjugate_UX = max_val*np.ones((n, d)) - U_X
    #learned_betas, learned_predictions, _, alphas = train_erm(train_X, conjugate_UX)
    #best_welfare = compute_welfare(alphas, U, train_X, learned_predictions)
    #print("Best possible welfare is: ", best_welfare)
    
    learned_betas, learned_predictions, _, opt_alphas = train_erm_welfare(train_X, L, U, lamb=10)
    final_loss = compute_final_loss(opt_alphas, L, train_X, learned_predictions)
    welfare = compute_welfare(opt_alphas, U, train_X, learned_predictions)
    print("ERM with welfre loss is: ", final_loss)
    print("Welfare is: ", welfare)
示例#2
0
def size_test():
    n = 50
    m = 14
    d = 5
    K = 4
    print("\nRunning Size Test")

    train_X = generate_data(n, m, 'uniform')
    train_X = normalize(train_X, axis=1, norm='l1')
    group_dist = [0.25, 0.25, 0.25, 0.25]
    samples_by_group = define_groups(train_X, group_dist)
    L = generate_loss_matrix(d, m, 'uniform')
    U = generate_utility_matrix_var(d, m, 'uniform', 0)

    erm_betas, learned_predictions, learned_pred_group, alphas = train_erm(train_X, L, U, \
            samples_by_group)
    final_loss = compute_final_loss(alphas, L, train_X, learned_predictions)
    avg_envy, violations = total_average_envy(alphas, U, train_X,
                                              learned_predictions)
    opt_loss = get_optimal_loss(L, train_X)

    print("Optimal loss is: ", opt_loss)
    print("ERM loss is: ", final_loss)
    print("ERM average envy: ", avg_envy, "ERM total violations: ", violations)

    start_time = time.time()
    cons_betas, learned_predictions, learned_pred_group, opt_alphas = \
            train_erm_envy_free(train_X, L, U, samples_by_group, lamb=100)
    final_loss = compute_final_loss(opt_alphas, L, train_X,
                                    learned_predictions)
    avg_envy, violations = total_average_envy(opt_alphas, U, train_X,
                                              learned_predictions)
    print("ERM-Envy Free loss is: ", final_loss)
    print("ERM-Envy Free average envy: ", avg_envy, "ERM total violations: ",
          violations)
    end_time = time.time()
    print("Time is: ", end_time - start_time)
    print(learned_predictions)

    test_X = generate_data(n, m, 'uniform')
    group_dist = [0.25, 0.25, 0.25, 0.25]
    test_s_by_group = define_groups(test_X, group_dist)
    st_learned_predictions, st_learned_pred_group = \
            get_all_predictions(erm_betas, test_X, test_s_by_group, K)
    st_avg_envy, st_envy_violations = total_average_envy(opt_alphas, U, train_X, \
            st_learned_predictions)
    #print("ERM get this much envy on test: ", st_avg_envy, "Violations: ", st_envy_violations)

    st_learned_predictions, st_learned_pred_group = \
            get_all_predictions(cons_betas, test_X, test_s_by_group, K)
    st_avg_envy, st_envy_violations = total_average_envy(opt_alphas, U, test_X, \
            st_learned_predictions)
def size_test():
    n = 60
    m = 16
    d = 5
    K = 4
    print("\nRunning Size Test")

    train_X = generate_data(n, m, 'uniform')
    group_dist = [0.25, 0.25, 0.25, 0.25]
    samples_by_group = define_groups(train_X, group_dist)
    L = generate_loss_matrix(d, m, 'uniform')
    U = generate_utility_matrix(d, m, 'uniform')

    erm_betas, learned_predictions, learned_pred_group, alphas = train_erm(train_X, L, U, \
            samples_by_group)
    final_loss = compute_final_loss(alphas, L, train_X, learned_predictions)
    total_equi, violations = total_group_equi(alphas, U, samples_by_group,
                                              learned_pred_group)
    opt_loss = get_optimal_loss(L, train_X)

    print("Optimal loss is: ", opt_loss)
    print("ERM loss is: ", final_loss)
    print("ERM total equi: ", total_equi, "ERM total violations: ", violations)

    start_time = time.time()
    cons_betas, learned_predictions, learned_pred_group, opt_alphas = \
            train_erm_equi(train_X, L, U, samples_by_group, lamb=3)
    final_loss = compute_final_loss(opt_alphas, L, train_X,
                                    learned_predictions)
    total_equi, violations = total_group_equi(alphas, U, samples_by_group,
                                              learned_pred_group)
    print("ERM-equi loss is: ", final_loss)
    print("ERM-equi total equi: ", total_equi, "ERM total violations: ",
          violations)
    end_time = time.time()
    print("Time is: ", end_time - start_time)

    test_X = generate_data(100, m, 'uniform')
    group_dist = [0.25, 0.25, 0.25, 0.25]
    test_s_by_group = define_groups(test_X, group_dist)
    st_learned_predictions, st_learned_pred_group = \
            get_all_predictions(erm_betas, test_X, test_s_by_group, K)
    st_total_equi, st_envy_violations = total_group_equi(opt_alphas, U, test_s_by_group, \
            st_learned_pred_group)
    print("ERM get this much equi diff on test: ", st_total_equi)

    st_learned_predictions, st_learned_pred_group = \
            get_all_predictions(cons_betas, test_X, test_s_by_group, K)
    st_total_equi, st_envy_violations = total_group_equi(opt_alphas, U, test_s_by_group, \
            st_learned_pred_group)
    print("ERM-Equi get this much equi diff on test: ", st_total_equi)
def size_test():
    n = 60
    m = 16
    d = 5
    K = 4
    Lambda = 1000
    print("\nRunning Size Test")

    train_X = generate_data(n, m, 'uniform')
    group_dist = [0.25, 0.25, 0.25, 0.25]
    samples_by_group = define_groups(train_X, group_dist)
    L = generate_loss_matrix(d, m, 'uniform')
    U = generate_utility_matrix(d, m, 'uniform')

    erm_betas, learned_predictions, learned_pred_group, alphas = train_erm(train_X, L, U, \
            samples_by_group)
    final_loss = compute_final_loss(alphas, L, train_X, learned_predictions)
    min_welfare = min_group_welfare(alphas, U, samples_by_group,
                                    learned_pred_group)
    opt_loss = get_optimal_loss(L, train_X)

    print("Optimal loss is: ", opt_loss)
    print("ERM loss is: ", final_loss)
    print("ERM min_welfare: ", min_welfare)

    start_time = time.time()
    cons_betas, learned_predictions, learned_predictions_group, opt_alphas = \
            train_erm_min_welfare(train_X, L, U, samples_by_group, lamb=Lambda)
    final_loss = compute_final_loss(opt_alphas, L, train_X,
                                    learned_predictions)
    min_welfare = min_group_welfare(opt_alphas, U, samples_by_group,
                                    learned_predictions_group)
    print("ERM-min_welfare loss is: ", final_loss)
    print("ERM-min_welfare min welfare is: ", min_welfare)
    end_time = time.time()
    print("Time is: ", end_time - start_time)

    test_X = generate_data(100, m, 'uniform')
    group_dist = [0.25, 0.25, 0.25, 0.25]
    test_s_by_group = define_groups(test_X, group_dist)
    st_learned_predictions, st_learned_pred_group = \
            get_all_predictions(erm_betas, test_X, test_s_by_group, K)
    min_welfare = min_group_welfare(opt_alphas, U, test_s_by_group, \
            st_learned_pred_group)
    print("ERM get this much min welfare on test: ", min_welfare)

    st_learned_predictions, st_learned_pred_group = \
            get_all_predictions(cons_betas, test_X, test_s_by_group, K)
    min_welfare = min_group_welfare(opt_alphas, U, test_s_by_group, \
            st_learned_pred_group)
    print("ERM-min welfare get this much min welfare on test: ", min_welfare)
def test_erm_equi():
    Lambda = 10
    print("Group Envy Free Test: ")
    print("First compute ERM solution: ")
    train_X = np.array([[0.8, 0.3, 1.5, 0.1], \
                        [0.3, 1.1, 1.7, 0.9], \
                        [1.0, 1.4, 0.5, 1.2],
                        [0.3, 0.5, 1.2, 1.3],
                        [1.0, 0.2, 0.7, 0.9],
                        [0.7, 1.5, 1.9, 0.3],
                        [0.2, 0.9, 1.7, 0.3],
                        [0.1, 0.2, 1.9, 1.3],
                        [0.7, 0.277, 0.9, 1.1],
                        [1.0, 1.2, 0.7, 0.9],
                        [0.1, 0.8, 0.3, 0.5], \
                        ]) # 11 x 5
    group_dist = [0.25, 0.25, 0.25, 0.25]
    samples_by_group = define_groups(train_X, group_dist)
    L = np.array([[0.3, 0.1, 0.4, 0.2],\
                  [0.7, 0.4, 0.1, 0.7],\
                  [0.3, 0.55, 0.7, 0.3],\
                  [0.4, 0.1, 0.4, 0.2]])
    U = np.array([[.1, 0.3, 0.3, 0.9],\
                  [0.5, 0.9, 0.1, 0.5],\
                  [0.3, 0.55, 0.7, 0.3],\
                  [0.1, 0.9, 0.9, 0.1]])

    learned_betas, learned_pred_all, learned_pred_group, opt_alphas = \
            train_erm(train_X, L, U, samples_by_group)
    total_equi, violations = total_group_equi(opt_alphas, U, samples_by_group,
                                              learned_pred_group)
    loss = compute_final_loss(opt_alphas, L, train_X, learned_pred_all)
    optimal_loss = get_optimal_loss(L, train_X)

    print("Optimal Loss is:", optimal_loss)
    print("ERM loss is: ", loss)
    print("ERM total equi diff: ", total_equi, "ERM total violations: ",
          violations)
    print("")

    print("Now do wth Equitability constraint with lambda: ", Lambda)
    learned_betas, learned_pred_all, learned_pred_group, opt_alphas = \
            train_erm_equi(train_X, L, U, samples_by_group, lamb=Lambda)
    total_equi, violations = total_group_equi(opt_alphas, U, samples_by_group, \
            learned_pred_group)
    loss = compute_final_loss(opt_alphas, L, train_X, learned_pred_all)
    print("ERM-equi Loss is : ", loss)
    print("ERM-equi total equi diff: ", total_equi,
          "ERM-equi total violations: ", violations)
示例#6
0
def test_erm_envy_free():
    Lambda = 10
    print("Envy Free Test: ")
    print("First compute ERM solution: ")
    train_X = np.array([[0.8, 0.3, 1.5, 0.1], \
                        [0.3, 1.1, 1.7, 0.9], \
                        [1.1, 1.4, 0.5, 1.2],
                        [0.3, 0.5, 1.2, 1.3],
                        [1.0, 0.2, 0.7, 0.9],
                        [0.7, 1.5, 1.9, 0.3],
                        [0.2, 0.9, 1.7, 0.3],
                        [0.1, 0.2, 1.9, 1.3],
                        [0.7, 0.277, 0.9, 1.1],
                        [1.0, 1.2, 0.7, 0.9],
                        [0.1, 0.8, 0.3, 0.5], \
                        ]) # 11 x 5
    group_dist = [0.25, 0.25, 0.25, 0.25]
    samples_by_group = define_groups(train_X, group_dist)
    L = np.array([[0.3, 0.1, 0.4, 0.2],\
                  [0.7, 0.4, 0.1, 0.7],\
                  [0.3, 0.55, 0.7, 0.3],\
                  [0.4, 0.1, 0.4, 0.2]])
    U = np.array([[.1, 0.3, 0.3, 0.9],\
                  [0.5, 0.9, 0.1, 0.5],\
                  [0.3, 0.55, 0.7, 0.3],\
                  [0.1, 0.9, 0.9, 0.1]])

    learned_betas, learned_pred_all, learned_pred_group, opt_alphas = \
            train_erm(train_X, L, U, samples_by_group)
    avg_envy, violations = total_average_envy(opt_alphas, U, train_X,
                                              learned_pred_all)
    loss = compute_final_loss(opt_alphas, L, train_X, learned_pred_all)
    optimal_loss = get_optimal_loss(L, train_X)

    print("Optimal Loss is:", optimal_loss)
    print("ERM loss is: ", loss)
    print("ERM total envy: ", avg_envy, "violations: ", violations)
    print("")

    print("Now do wth Envy Free constraint with lambda: ", Lambda)
    learned_betas, learned_pred_all, learned_pred_group, opt_alphas = \
            train_erm_envy_free(train_X, L, U, samples_by_group, lamb=Lambda)
    avg_envy, violations = total_average_envy(opt_alphas, U, train_X,
                                              learned_pred_all)
    loss = compute_final_loss(opt_alphas, L, train_X, learned_pred_all)
    print("ERM-Envy Free Loss is : ", loss)
    print("ERM-Envy Free avg envy: ", avg_envy, "violations: ", violations)
def test_erm_equi():
    Lambda = 1000
    print("Group Envy Free Test: ")
    print("First compute ERM solution: ")
    train_X = np.array([[0.8, 0.3, 1.5, 0.1], \
                        [0.3, 1.1, 1.7, 0.9], \
                        [1.0, 1.4, 0.5, 1.2],
                        [0.3, 0.5, 1.2, 1.3],
                        [1.0, 0.2, 0.7, 0.9],
                        [0.7, 1.5, 1.9, 0.3],
                        [0.2, 0.9, 1.7, 0.3],
                        [0.1, 0.2, 1.9, 1.3],
                        [0.7, 0.277, 0.9, 1.1],
                        [1.0, 1.2, 0.7, 0.9],
                        [0.1, 0.8, 0.3, 0.5], \
                        ]) # 11 x 5
    group_dist = [0.25, 0.25, 0.25, 0.25]
    samples_by_group = define_groups(train_X, group_dist)
    L = np.array([[0.3, 0.1, 0.4, 0.2],\
                  [0.7, 0.4, 0.1, 0.7],\
                  [0.3, 0.55, 0.7, 0.3],\
                  [0.4, 0.1, 0.4, 0.2]])
    U = np.array([[.1, 0.3, 0.3, 0.9],\
                  [0.5, 0.9, 0.1, 0.5],\
                  [0.3, 0.55, 0.7, 0.3],\
                  [0.1, 0.9, 0.9, 0.1]])

    learned_betas, learned_pred_all, learned_pred_group, opt_alphas = \
            train_erm(train_X, L, U, samples_by_group)
    min_welfare = min_group_welfare(opt_alphas, U, samples_by_group,
                                    learned_pred_group)
    loss = compute_final_loss(opt_alphas, L, train_X, learned_pred_all)
    optimal_loss = get_optimal_loss(L, train_X)

    print("Optimal Loss is:", optimal_loss)
    print("ERM loss is: ", loss)
    print("ERM min welfare: ", min_welfare)
    print("")

    print("Now do with min welfare constraint with lambda: ", Lambda)
    learned_betas, learned_pred_all, learned_pred_group, opt_alphas = \
            train_erm_min_welfare(train_X, L, U, samples_by_group, lamb=Lambda)
    cons_min_welfare = min_group_welfare(opt_alphas, U, samples_by_group, \
            learned_pred_group)
    loss = compute_final_loss(opt_alphas, L, train_X, learned_pred_all)
    print("ERM-Min Welfare Loss is : ", loss)
    print("ERM-Min Welfare min welfare: ", cons_min_welfare)
示例#8
0
def test_erm_welfare():
    print("Test 1: n=8, d=4, m=4, Loss mat, Utility is identity")
    train_X = np.array([[0.4, 0.3, 0.5, 0.1], \
                        [0.3, 0.1, 0.7, 0.9], \
                        [1.1, 0.4, 0.5, 1.2],
                        [0.3, 0.5, 1.2, 1.3],
                        [1.3, 0.2, 0.7, 0.9],
                        [1.7, 1.5, 1.9, 0.3],
                        [2.2, 0.9, 1.7, 0.3],
                        [0.1, 0.2, 1.9, 1.3],
                        ]) # 8 x 4
    L = np.array([[0.3, 0.1, 0.4, 0.2],\
                  [0.2, 0.4, 0.1, 0.7],\
                  [0.3, 0.55, 0.7, 0.3],\
                  [0.4, 0.1, 0.4, 0.2]])
    U = np.array([[0.4, 0.2, 0.3, 0.4],\
                  [0.5, 0.1, 0.9, 0.5],\
                  [0.3, 0.4, 0.6, 0.5],\
                  [0.2, 0.3, 0.6, 0.1]])
    U_X = np.matmul(train_X, U.T)
    n, d = U_X.shape
    max_val = np.amax(U_X)

    learned_betas, learned_predictions, _, opt_alphas = train_erm(train_X, L, U, groups=None, lamb=0)
    final_loss = compute_final_loss(opt_alphas, L, train_X, learned_predictions)
    welfare = compute_welfare(opt_alphas, U, train_X, learned_predictions)
    opt_loss = get_optimal_loss(L, train_X)

    print("Optimal loss is: ", opt_loss)
    print("ERM loss is: ", final_loss)
    print("Welfare is: ", welfare)
    
    #conjugate_UX = max_val*np.ones((n, d)) - U_X
    #learned_betas, learned_predictions, _, opt_alphas = train_erm(train_X, conjugate_UX)
    #best_welfare = compute_welfare(opt_alphas, U, train_X, learned_predictions)
    #print("Best possible welfare is: ", best_welfare)

    learned_betas, learned_predictions, _, opt_alphas = train_erm_welfare(train_X, L, U, lamb=10)
    final_loss = compute_final_loss(opt_alphas, L, train_X, learned_predictions)
    welfare = compute_welfare(opt_alphas, U, train_X, learned_predictions)
    print("ERM with welfre loss is: ", final_loss)
    print("Welfare is: ", welfare)