Ejemplo n.º 1
0
def regret_vs_m(algorithms, m_vals, N, T, epsilon, simulations=10):
    models = []
    regret = np.zeros((len(algorithms), len(m_vals), simulations))
    for m_indx, m in enumerate(m_vals):
        model = Parallel.create(N, m, epsilon)
        models.append(model)
        print "built model {0}".format(m)
        for s in xrange(simulations):
            for a_indx, algorithm in enumerate(algorithms):
                regret[a_indx, m_indx, s] = algorithm.run(T, model)

    return regret, models


experiment = Experiment(1)
experiment.log_code()

# Experiment 1
N = 50
epsilon = .3
simulations = 10000
T = 400
algorithms = [
    GeneralCausal(truncate='None'),
    ParallelCausal(),
    SuccessiveRejects(),
    AlphaUCB(2),
    ThompsonSampling()
]
m_vals = range(2, N, 2)
Ejemplo n.º 2
0
def regret_vs_T(model, algorithms, T_vals, simulations=10):
    regret = np.zeros((len(algorithms), len(T_vals), simulations))
    pulls = np.zeros((len(algorithms), len(T_vals), model.K), dtype=int)
    for T_indx, T in enumerate(T_vals):

        for a_indx, algorithm in enumerate(algorithms):
            for s in xrange(simulations):
                regret[a_indx, T_indx, s] = algorithm.run(T, model)
                if algorithm.best_action is not None:
                    pulls[a_indx, T_indx, algorithm.best_action] += 1
        print T

    return regret, pulls


experiment = Experiment(6)
experiment.log_code()

N = 50
N1 = 1
pz = .4
q = (0.00001, 0.00001, .4, .65)
epsilon = .3
pY = ParallelConfounded.pY_epsilon_best(q, pz, epsilon)

simulations = 10000

model = ScaleableParallelConfounded(q, pz, pY, N1, N - N1)

T_vals = range(25, 626, 25)
Ejemplo n.º 3
0
# -*- coding: utf-8 -*-
"""
Created on Fri Oct  7 07:41:12 2016

@author: finn
"""

from experiment_config import Experiment

e = Experiment(1)


def returnthing():
    return 5


v1 = 4
v2 = [1, 2, 3]
x = returnthing()

e.log_state(globals())

del v1
del v2
del x

d = e.read_state(e.state_filename)

#import shelve
#d = {}
#db = shelve.open(e.state_filename)
Ejemplo n.º 4
0
        model = ScaleableParallelConfounded(q,pz,pY,N1,N-N1,compute_m = False)
        eta = [0,0,1.0/(N1+2.0),0,0,0,1-N1/(N1+2.0)]
        model.compute_m(eta_short = eta)
       
        print N1,model.m
        m_vals.append(model.m)
        models.append(model)
        for a_indx, algorithm in enumerate(algorithms):
            for s in xrange(simulations):
                regret[a_indx,m_indx,s] = algorithm.run(T,model)
                
    
    return m_vals,regret,models
    
   
experiment = Experiment(4)
experiment.log_code()
    
N = 50
N1_vals = range(1,N,3)
pz = .4
q = (0.00001,0.00001,.4,.65)
epsilon = .3
simulations = 10000
T = 400
algorithms = [SuccessiveRejects(),GeneralCausal(),AlphaUCB(2),ThompsonSampling()]


epsilon = .3
pY = ParallelConfounded.pY_epsilon_best(q,pz,epsilon)
Ejemplo n.º 5
0

def regret_vs_T(model, algorithms, T_vals, simulations=10):

    regret = np.zeros((len(algorithms), len(T_vals), simulations))

    for T_indx, T in enumerate(T_vals):
        for a_indx, algorithm in enumerate(algorithms):
            for s in xrange(simulations):
                regret[a_indx, T_indx, s] = algorithm.run(T, model)
        print T

    return regret


experiment = Experiment(3)
experiment.log_code()

simulations = 10000
N = 50
m = 2
epsilon = .3
model = Parallel.create(N, m, epsilon)
T_vals = range(10, 6 * model.K, 25)
algorithms = [
    GeneralCausal(truncate='None'),
    ParallelCausal(),
    SuccessiveRejects(),
    AlphaUCB(2),
    ThompsonSampling()
]
Ejemplo n.º 6
0
# -*- coding: utf-8 -*-
"""
Created on Fri Oct  7 08:04:42 2016

@author: finn
"""
from experiment_config import Experiment
e = Experiment(1)
globals().update(e.read_state("results/experiment_state1_20161007_0842.shelve"))


    

#db = shelve.open("a.shelve")
#db["a"] = 4
#db["b"] = [1,2,3]
#for key in db:
#    print key
#db.close()
#
#print "newly opened"
#db = shelve.open("a.shelve")
#for key in db:
#    print key
#db.close()
#