Ejemplo n.º 1
0
# below are the pre-calculated log likelihood (not negative!) values for l_INI, with alpha_INI = 10, 1, 0.01
# the more positive these values are, the more likely the sample follows max-min sampling

from cal_L_INI import cal_L_INI

l_INI_10 = -cal_L_INI(10.)
l_INI_1 = cal_L_INI(1.)
l_INI_001 = -cal_L_INI(.01)



sample_size = 100
num_ini_guess = 2
alpha = 10.0
soln = CovarianceEstimate(X, y, bounds=bounds, xbounds=xbounds, alpha=alpha, sample_size=sample_size,
                          num_ini_guess=num_ini_guess, initial_guess=initial_guess, l_INI=l_INI_10[:(n_trajectory-2)])
# x_temp =np.random.normal(initial_guess, scale=0.1, size=(1,31))
# # x_temp = np.ones((31,))*10.0
f0 = soln.model.obj(initial_guess, alpha=alpha, l_INI=l_INI_10[:(n_trajectory-2)])
print f0
# sig_test = np.zeros(31)
# sig_test[-1] = 2.6
# soln.model.f_path(sig_test)
[obj_set, sigma_set] = soln.solve(plot=False)

# # pick the best solution
obj = obj_set.min(axis=0)
sigma = sigma_set[obj_set.argmin(axis=0), :]
print obj, sigma

# # load bounds
Ejemplo n.º 2
0
        {
            'mix': pre.pca.mixing_.tolist(),
            'unmix': pre.pca.components_.tolist(),
            'mean': pre.pca.mean_.tolist()
        },
        outfile,
        sort_keys=True,
        indent=4,
        ensure_ascii=False)

np.savetxt('mix_scaled_pWorse_init.txt',
           X[:2])  # first two plays for later init.

bounds = np.array(31 * [[-1., 1.]])

soln = CovarianceEstimate(X, y, bounds=bounds, alpha=10.)
# sig_test = np.zeros(31)
# sig_test[-1] = 2.6
# soln.model.f_path(sig_test)
[obj_set, sigma_set] = soln.solve(plot=True)

# # pick the best solution
obj = obj_set.min(axis=0)
sigma = sigma_set[obj_set.argmin(axis=0), :]
print obj, sigma

# # load bounds
# from numpy import loadtxt
# bounds = loadtxt("ego_bounds.txt", comments="#", delimiter=",", unpack=False)
#
# # store sigma for simulation
Ejemplo n.º 3
0
  0.15516951,  1.22674106,  0.65270824,  1.16261199,  1.38351062,  0.98991564,
  1.62832714,  1.33700206,  1.32064881,  1.43865368,  0.84637371,  0.96145962,
  1.86322818,  0.76696031,  1.39329787,  0.75810048,  1.59063005,  1.49758157,
  0.86818537,  1.01713967,  1.21994997,  0.50326903,  0.26969321])
l_INI_001 = -np.array([ 0.0077654,   0.01148891,  0.0085491,   0.00846845,  0.00377641,  0.00445976,
  0.00094088,  0.01169417,  0.00588484,  0.01099119,  0.01322844,  0.00925459,
  0.0157014,   0.01277546,  0.01259828,  0.0137948,   0.00783676,  0.00910085,
  0.01805538,  0.00703961,  0.01333193,  0.0069961,   0.01534839,  0.01438826,
  0.00808909,  0.00955123,  0.01161055,  0.00448607,  0.00210488])



sample_size = 100
num_ini_guess = 2
alpha = 10.0
soln = CovarianceEstimate(X, y, bounds=bounds, xbounds=xbounds, alpha=alpha, sample_size=sample_size,
                          num_ini_guess=num_ini_guess, initial_guess=initial_guess, l_INI=l_INI_10[:(n_trajectory-2)])
# x_temp =np.random.normal(initial_guess, scale=0.1, size=(1,31))
# # x_temp = np.ones((31,))*10.0
f0 = soln.model.obj(initial_guess, alpha=alpha, l_INI=l_INI_10[:(n_trajectory-2)])
print f0
# sig_test = np.zeros(31)
# sig_test[-1] = 2.6
# soln.model.f_path(sig_test)
[obj_set, sigma_set] = soln.solve(plot=False)

# # pick the best solution
obj = obj_set.min(axis=0)
sigma = sigma_set[obj_set.argmin(axis=0), :]
print obj, sigma

# # load bounds
Ejemplo n.º 4
0
# # Parameters are there to speed up after saving a pkl.
pre = Preprocess(pca_model='../eco_full_pca.pkl', all_dat='../all_games.pkl')
# pre = Preprocess()
# pre.get_json('alluser_control.json')  # uncomment this to create the pkl file needed!!
# pre.train_pca()
X, y = pre.ready_player_one(3)

from sklearn.preprocessing import StandardScaler, MinMaxScaler

# scale = StandardScaler()
scale = MinMaxScaler((-1., 1.))
X = scale.fit_transform(X)
#
bounds = np.array(30 * [[-1., 1.]])
# # get sigma estimate that maximizes the sum of expected improvements
soln = CovarianceEstimate(X, y, bounds=bounds)
[obj_set, sigma_set] = soln.solve()

# # pick the best solution
obj = obj_set.min(axis=0)
sigma = sigma_set[obj_set.argmin(axis=0), :]
print obj, sigma

# # load bounds
# from numpy import loadtxt
# bounds = loadtxt("ego_bounds.txt", comments="#", delimiter=",", unpack=False)
#
# # store sigma for simulation
# # TODO: need to specify file name based on settings, e.g., optimization algorithm and input data source (best player?)

file_address = 'p3_slsqp_sigma_oldICA.json'
Ejemplo n.º 5
0
    json.dump(temp.components_.tolist(), f, sort_keys=True, indent=4, ensure_ascii=False)
f.close()


with open('pWorse_range_transform.json', 'wb') as outfile:
    json.dump({'range':scale.scale_.tolist(), 'min':scale.min_.tolist()},
              outfile, sort_keys=True, indent=4, ensure_ascii=False)
with open('pWorse_ICA_transform.json', 'wb') as outfile:
    json.dump({'mix':pre.pca.mixing_.tolist(), 'unmix':pre.pca.components_.tolist(), 'mean':pre.pca.mean_.tolist()},
              outfile, sort_keys=True, indent=4, ensure_ascii=False)

np.savetxt('mix_scaled_pWorse_init.txt', X[:2])  # first two plays for later init.

bounds = np.array(31*[[-1., 1.]])

soln = CovarianceEstimate(X, y, bounds=bounds, alpha=10.)
# sig_test = np.zeros(31)
# sig_test[-1] = 2.6
# soln.model.f_path(sig_test)
[obj_set, sigma_set] = soln.solve(plot=True)

# # pick the best solution
obj = obj_set.min(axis=0)
sigma = sigma_set[obj_set.argmin(axis=0), :]
print obj, sigma

# # load bounds
# from numpy import loadtxt
# bounds = loadtxt("ego_bounds.txt", comments="#", delimiter=",", unpack=False)
#
# # store sigma for simulation
Ejemplo n.º 6
0
# # Parameters are there to speed up after saving a pkl.
pre = Preprocess(pca_model='../eco_full_pca.pkl', all_dat='../all_games.pkl')
# pre = Preprocess()
# pre.get_json('alluser_control.json')  # uncomment this to create the pkl file needed!!
# pre.train_pca()
X, y = pre.ready_player_one(2)

from sklearn.preprocessing import StandardScaler, MinMaxScaler

# scale = StandardScaler()
scale = MinMaxScaler((-1., 1.))
X = scale.fit_transform(X)
#
bounds = np.array(30*[[-1., 1.]])
# # get sigma estimate that maximizes the sum of expected improvements
soln = CovarianceEstimate(X, y, bounds=bounds)
[obj_set, sigma_set] = soln.solve()

# # pick the best solution
obj = obj_set.min(axis=0)
sigma = sigma_set[obj_set.argmin(axis=0), :]
print obj, sigma

# # load bounds
# from numpy import loadtxt
# bounds = loadtxt("ego_bounds.txt", comments="#", delimiter=",", unpack=False)
#
# # store sigma for simulation
# # TODO: need to specify file name based on settings, e.g., optimization algorithm and input data source (best player?)

file_address = 'p3_slsqp_sigma_oldICA.json'
Ejemplo n.º 7
0
# # delete the parameters if performing first-time or new player.
# # Parameters are there to speed up after saving a pkl.
pre = Preprocess(pca_model='eco_full_pca.pkl', all_dat='all_games.pkl')
# pre = Preprocess()
# pre.get_json('alluser_control.json')  # uncomment this to create the pkl file needed!!
# pre.train_pca()
X, y = pre.ready_player_one(2)

from sklearn.preprocessing import StandardScaler, MinMaxScaler

# scale = StandardScaler()
scale = MinMaxScaler((-1., 1.))
X = scale.fit_transform(X)
#
# # get sigma estimate that maximizes the sum of expected improvements
soln = CovarianceEstimate(X, y)
[obj_set, sigma_set] = soln.solve()

# # pick the best solution
obj = obj_set.min(axis=0)
sigma = sigma_set[obj_set.argmin(axis=0), :]
print obj, sigma

# # load bounds
# from numpy import loadtxt
# bounds = loadtxt("ego_bounds.txt", comments="#", delimiter=",", unpack=False)
#
# # store sigma for simulation
# # TODO: need to specify file name based on settings, e.g., optimization algorithm and input data source (best player?)

file_address = 'p3_slsqp_sigma_oldICA.json'