Ejemplo n.º 1
0
def ablate_pickle(pickle_in, pickle_out, num_remove):
    laws = helper.load_pickle(pickle_in)

    for i in range(num_remove):
        laws.pop(random.choice(list(laws.keys())))

    helper.dump_pickle(pickle_out, laws)
    return
Ejemplo n.º 2
0
    fake_train_labels = np.array([[1.0],[0.0],[0.0],[1.0]])  
    #Validation data (At initialization, it is a copy of training data)
    fake_valid_images = np.copy(fake_train_images)
    fake_valid_labels = np.copy(fake_train_labels)
   
    mse_tolerance = 0.01
    num_petridish_iter = 1 
    num_petridish_points = 20
    num_top_points = 2 #Number of points to be checked in petridish for ground-truth evaluation
    perf_thresh = 0.97 #If performance reaches this mark then stop petridish
    slope_thresh = 0.5 #Do not include points with slope less than this threshold

    #Load the ground truth 
    dir_path = os.path.dirname(os.path.realpath(__file__))+'/'
    ground_truth_fname = dir_path+'mar8_search_slope_full.pkl'
    full_sorted_c_r_list = helper.load_pickle(ground_truth_fname)
    #full_sorted_c_r_list = [(c, r) for c, r in full_sorted_c_r_list if c > 0.35 and c < 0.39]
    #train_sorted_c_r_list = helper.randomly_select_K(full_sorted_c_r_list, perf_thresh, num_petridish_points, slope_thresh)
    train_sorted_c_r_list = [(0.3736363636363636, 0.9676633298397064), (0.3938383838383838, 0.9666700025399526), (0.414040404040404, 0.9661333322525024),  (0.45444444444444443, 0.9645633300145467), (0.4948484848484848, 0.9625533382097881), (0.515050505050505, 0.9617333332697551), (0.5756565656565656, 0.9596799969673157), (0.616060606060606, 0.9568799952665965), (0.6766666666666666, 0.9540533363819123), (0.717070707070707, 0.9526066660881043), (0.818080808080808, 0.9479466597239177), (0.8988888888888888, 0.9435500025749206), (0.9796969696969696, 0.9413000007470449), (1.0605050505050504, 0.9373233377933502), (1.2625252525252524, 0.9289600014686584), (1.3029292929292928, 0.9281666696071624), (1.424141414141414, 0.9239499926567077), (1.525151515151515, 0.921233328183492)]
    #train_sorted_c_r_list = [(0.6362626262626262, 0.9567066649595897), (1.2019191919191918, 0.9321533342202505), (1.2827272727272727, 0.928876664241155), (1.3433333333333333, 0.9264966626962026), (1.3837373737373737, 0.9261333306630453), (1.7271717171717171, 0.9155666649341583)]
    #train_sorted_c_r_list = [(0.45444444444444443, 0.9613), (0.12, 0.957), (1.1211111111111112, 0.9369), (1.5655555555555554, 0.9167), (2.01, 0.9076), (0.01, 0.8999)]#[(0.45444444444444443, 0.9645633300145467), (0.11101010101010099, 0.9554066697756449), (1.121111111111111, 0.9354000012079875), (1.5655555555555554, 0.9201899985472362), (2.01, 0.908623335758845), (0.01, 0.901146666208903)]#, (0.2322, 0.9727)]
    (lines, best_scores, train_mse, test_mse) = main(num_petridish_iter = num_petridish_iter, num_top_points = num_top_points, perf_thresh = perf_thresh, layer_sizes= layer_sizes, L2_reg= L2_reg, param_scale = param_scale, batch_size = batch_size, num_epochs = num_epochs, step_size = step_size, hyper_iter = hyper_iter, hyper_step_size = hyper_step_size, hyper_decay = hyper_decay, hyper_decay_after = hyper_decay_after, hyper_decay_every = hyper_decay_every, hyper_L2_reg = hyper_L2_reg, rank_loss_scaling_factor = rank_loss_scaling_factor, mse_tolerance = mse_tolerance, outputFname = outputFname, train_sorted_c_r_list = train_sorted_c_r_list, full_sorted_c_r_list=full_sorted_c_r_list, fake_train_images = fake_train_images, fake_train_labels = fake_train_labels, fake_valid_images = fake_valid_images, fake_valid_labels = fake_valid_labels)
 
    import datetime
    now = datetime.datetime.now() 
    expt_id = '{:02d}'.format(now.month) + '{:02d}'.format(now.day) + '{:02d}'.format(now.hour) +'{:02d}'.format(now.minute)   
    print ("Results file is results/results_"+expt_id) 
    with open ("results/results_"+expt_id, 'w') as fwr:
        fwr.writelines(lines)
        fwr.writelines(str(best_scores)+"\n")
    #for line in lines:
Ejemplo n.º 3
0
import helper
from collections import Counter
import scipy.stats
import numpy as np

x1 = helper.load_pickle('x.pickle')
# x2 = helper.load_pickle('x2_cvx.pickle')
# x3 = helper.load_pickle('x3_cvx.pickle')

import pandas as pd


def count_partic(laws, members):
    for k, v in members.items():
        v['took_part'] = 0

    for vote_id, kmmbrs2votes in laws.items():
        for kmmbr_id, vote_result in kmmbrs2votes["kmmbrs2votes"].items():
            if vote_result > 0: members[kmmbr_id]['took_part'] += 1

        for k, v in members.items():
            v['took_part'] = v['took_part'] / len(laws)
    #print('members','\n', members)
    return members


def count_won(laws, members):
    for k, v in members.items():
        v['won'] = 0

    for vote_id, kmmbrs2votes in laws.items():
Ejemplo n.º 4
0
import helper
import pandas as pd
''' Creates a csv readable for humans -- votes.csv. 
7057 law ids (rows) x 143 kmmbrs ids (cols) 
1 - for the law, 2 - opposed to the law, 0 - didn't attend. 
In addition, we dump the same data structure to a pickle -- 'law2vector.pickle'
'''

#todo fix laws
laws = helper.load_pickle(
    'laws.pickle'
)  # laws = {<vote_id>: { "kmmbrs2votes": {<kmmbr_id>: <vote_result>}}
kmmbrs = helper.load_pickle(
    'kmmrs.pickle'
)  # kmmbrs = {<kmmbr_id> : {kmmbr_name: <kmmbr_name> , faction_id: <faction_id>, faction_name: <faction_name> }}

kmmbrs_num = len(kmmbrs)
kmmbrs_lst = list(kmmbrs.keys())


# convert kmmbrs2votes dict to a fixed list:
# creates: new_laws = {<vote_id>: [1,2,0,...,1]}}
def fix_members_lst(laws, kmmbrs):
    new_laws = {}
    for vote_id, kmmbrs2votes in laws.items():
        l = [0] * len(
            kmmbrs)  # create a fixed list of the number of the knesset members
        for kmmbr_id, vote_result in kmmbrs2votes["kmmbrs2votes"].items():
            inx = kmmbrs[kmmbr_id]['idx']
            l[inx] = vote_result
        new_laws[vote_id] = l
Ejemplo n.º 5
0
from numpy import array, eye, hstack, ones, vstack, zeros
import numpy
import cvxopt
import helper
import datetime
import numpy as np

A = helper.load_pickle('A.pickle')
b = helper.load_pickle('b.pickle')

#toy example
# A= [[-1,-1,0,0,0], [0,0,-1,0,0],[0,0,0,-1,-1], [-1,0,0,0,0],[0,-1,-1,-1,0],[0,0,0,0,-1]]
# b=[-1,0,0,0,-1,0]

c = [1]*len(A[0])

def cvxopt_solve_min(b, A, solver=None):
    n= len(A[0])
    c = ones(n)

    # cvxopt constraint format: G * x <= h

    h1 = b
    G1 = A
    G2 = -eye(n)
    h2 = 0 * ones(n)

    c = cvxopt.matrix(c)
    G = cvxopt.matrix(vstack([G1, G2]))
    h = cvxopt.matrix(hstack([h1, h2]))
    sol = cvxopt.solvers.lp(c, G, h, solver=solver)
Ejemplo n.º 6
0
import helper
import datetime

A = helper.load_pickle('A.pickle')  #[:3000]
b = helper.load_pickle('b.pickle')  #[:3000]
c = [1] * len(A[0])

from scipy.optimize import linprog
res = linprog(c,
              A_ub=A,
              b_ub=b,
              bounds=(0, None),
              options={"disp": True},
              method='interior-point')

print(res)

# save in a txt:
import os
i = 0
while os.path.exists("scipylp(%s).txt" % i):
    i += 1

with open("scipylp(%s).txt" % i, "w") as text_file:
    print(f"Date and Time': {datetime.datetime.now()}", file=text_file)
    print(f"Parameters: method='interior-point'. \n Output\n: {res}",
          file=text_file)
Ejemplo n.º 7
0
def minus(A, b):
    # multiply A and b by -1
    newA = []
    for a in A:
        new_a = [-1 if x == 1 else 0 for x in a]
        newA.append(new_a)
    b = [-1 if x == 1 else 0 for x in b]
    return newA, b


# Includes additional constraint that sum-total of payoffs must EXCEED a certain amount
# This should be DONE AFTER the A,b = minus(A,b) step
def minpay(A, b, payout):
    dim = len(A[0])
    A.append([-1] * dim)
    b.append(-1 * payout)
    return A, b


laws = helper.load_pickle(
    LAW_PICKLE
)  #'law2vector.pickle' is of the same sturcture as votes.csv. that is a vector of all the vote results for each law
A, b = iterate(laws)
A, b = minus(A, b)
A, b = minpay(A, b, 18.81)
helper.init_pickle(A_PICKLE)
helper.init_pickle(B_PICKLE)
helper.dump_pickle(A_PICKLE, A)
helper.dump_pickle(B_PICKLE, b)
Ejemplo n.º 8
0
from cvxopt import matrix, solvers


import helper
A = helper.load_pickle('../A.pickle')
b = helper.load_pickle('../b.pickle')
#print(A)
import numpy as np
import scipy.io

scipy.io.savemat('Ab.mat', dict(A=A, b=b))

with open("A.txt", "w") as text_file:
    print(f"{A}", file=text_file)
with open("b.txt", "w") as text_file:
    print(f"{b}", file=text_file)