Esempio n. 1
0
 def __init__(self, human, model, n = 10000):
     self.human = human
     self.model = model
     self.subject = self.human.keys()
     self.n = n
     self.nb_repeat = 8
     self.nb_blocs = 4
     self.nb_trials = 39
     self.nb_param = len(self.model.bounds.keys())
     self.p_order = self.model.bounds.keys()
     self.cats = CATS(self.nb_trials)
     self.rt = dict()
     self.state = dict()
     self.action = dict()
     self.responses = dict()
     self.indice = dict()
     self.hrt = dict()        
     for s in self.human.keys():
         self.rt[s] = np.array([self.human[s][i]['rt'][0:self.nb_trials,0] for i in range(1,self.nb_blocs+1)])
         self.rt[s] = np.tile(self.rt[s], (self.nb_repeat,1))
         self.state[s] = np.array([self.human[s][i]['sar'][0:self.nb_trials,0] for i in range(1,self.nb_blocs+1)])
         self.state[s] = np.tile(self.state[s], (self.nb_repeat,1))
         self.action[s] = np.array([self.human[s][i]['sar'][0:self.nb_trials,1] for i in range(1,self.nb_blocs+1)])
         self.action[s] = np.tile(self.action[s], (self.nb_repeat,1))
         self.responses[s] = np.array([self.human[s][i]['sar'][0:self.nb_trials,2] for i in range(1,self.nb_blocs+1)])
         self.responses[s] = np.tile(self.responses[s], (self.nb_repeat,1))
         step, indice = getRepresentativeSteps(self.rt[s], self.state[s], self.action[s], self.responses[s])
         self.hrt[s] = computeMeanRepresentativeSteps(step)[0]
         self.hrt[s] = self.center(self.hrt[s])
Esempio n. 2
0
# -----------------------------------

# -----------------------------------
# PARAMETERS + INITIALIZATION
# -----------------------------------
beta = 1.7
length_memory = 100
noise_width = 0.01

correlation = "Z"

nb_trials = 42
nb_blocs = 100

cats = CATS(nb_trials)

bww = BayesianWorkingMemory('bmw', cats.states, cats.actions, length_memory,
                            noise_width, 1.0)
bww.setEntropyEvolution(nb_blocs, nb_trials)
# -----------------------------------

# -----------------------------------
# Training session
# -----------------------------------
modelTest(createStimulusList(0, 0))
#----------------------------------

#----------------------------------
# DATA Extraction
#---------------------------------
Esempio n. 3
0
# ARGUMENT MANAGER
# -----------------------------------

# -----------------------------------
# FONCTIONS
# -----------------------------------

# -----------------------------------

# -----------------------------------
# PARAMETERS + INITIALIZATION
# -----------------------------------
nb_blocs = 10
nb_trials = 80

cats = CATS(nb_trials)
models = dict({
    "fusion": FSelection(cats.states, cats.actions),
    "qlearning": QLearning(cats.states, cats.actions),
    "bayesian": BayesianWorkingMemory(cats.states, cats.actions),
    "selection": KSelection(cats.states, cats.actions),
    "mixture": CSelection(cats.states, cats.actions)
})
# ------------------------------------

# ------------------------------------
# Parameter testing
# ------------------------------------
with open("../Sferes/parameters.pickle", 'r') as f:
    p_test = pickle.load(f)
# tmp = dict({k:[] for k in p_test['distance']['S9']['fusion'].keys()})
Esempio n. 4
0
gamma = 0.9  # discount factor
init_cov = 10  # initialisation of covariance matrice
kappa = 0.1  # unscentered transform parameters
beta = 1.7  # soft-max
sigma = 0.02  # reward rate update
tau = 0.08  # time step
noise_width = 0.01  # noise of model-based
w_0 = 0.5  # initial weigh for collins model

correlation = "Z"
length_memory = 10

nb_trials = human.responses['meg'].shape[1]
nb_blocs = human.responses['meg'].shape[0]

cats = CATS()

selection = CSelection(
    KalmanQLearning('kalman', cats.states, cats.actions, gamma, beta, eta,
                    var_obs, init_cov, kappa),
    BayesianWorkingMemory('bmw', cats.states, cats.actions, length_memory,
                          noise_width, 1.0), w_0)

inter = 6
# -----------------------------------

# -----------------------------------

# -----------------------------------
# PARAMETERS Testing
# -----------------------------------
Esempio n. 5
0
parameters= {'alpha': 0.5,
			 'beta': 5.0,
			 'gain': 6.0,
			 'gamma': 0.1,
			 'length': 7,
			 'noise': 0.0001,
			 'sigma': 1.0,
			 'threshold': .5,
			 'reward': 0.5}


# -----------------------------------
nb_blocs = 1
nb_trials = 40

cats = CATS(nb_trials)
model = FSelection(cats.states, cats.actions, parameters)



Hbs = []
Hfs = []

model.startExp()
for i in xrange(nb_blocs):
	Hbs.append([])
	Hfs.append([])
	cats.reinitialize()
	model.startBloc()
	#cats.set_devaluation_interval(5)
	for j in xrange(nb_trials):
Esempio n. 6
0
# PARAMETERS + INITIALIZATION
# -----------------------------------
eta = 0.0001     # variance of evolution noise v
var_obs = 0.05   # variance of observation noise n
gamma = 0.63     # discount factor
init_cov = 10   # initialisation of covariance matrice
kappa = 0.1      # unscentered transform parameters
beta = 1.7
length_memory = 11
noise_width = 0.0106
correlation = "Z"

nb_trials = 42
nb_blocs = 42

cats = CATS(nb_trials)

models = dict({'kalman':KalmanQLearning('kalman', cats.states, cats.actions, gamma, beta, eta, var_obs, init_cov, kappa),
               'bmw':BayesianWorkingMemory('bmw', cats.states, cats.actions, length_memory, noise_width, 1.0)})

cats = CATS_MODELS(nb_trials, models.keys())

human = HLearning(dict({'meg':('../../PEPS_GoHaL/Beh_Model/',42), 'fmri':('../../fMRI',39)}))
sweep = Sweep_performances(human, cats, nb_trials, nb_blocs)
data = dict()
data['human'] = extractStimulusPresentation2(human.responses['meg'], human.stimulus['meg'], human.action['meg'], human.responses['meg'])


# -----------------------------------

w = dict({1:[],2:[],3:[]})
Esempio n. 7
0
# -----------------------------------

# -----------------------------------
# PARAMETERS + INITIALIZATION
# -----------------------------------
eta = 0.0001  # variance of evolution noise v
var_obs = 0.05  # variance of observation noise n
beta = 3.0  # rate of exploration
gamma = 0.9  # discount factor
sigma = 0.02  # updating rate of the average reward
init_cov = 10  # initialisation of covariance matrice
kappa = 0.1  # unscentered transform parameters

nb_trials = 40
nb_blocs = 100
cats = CATS()

responses = []
stimulus = []
action_list = []
reaction = []
# -----------------------------------

# -----------------------------------
#Kalman Learning session
# -----------------------------------

for i in xrange(nb_blocs):
    values = createQValuesDict(cats.states, cats.actions)
    covariance = createCovarianceDict(
        len(cats.states) * len(cats.actions), init_cov, eta)
Esempio n. 8
0
# -----------------------------------
# PARAMETERS + INITIALIZATION
# -----------------------------------
eta = 0.0001     # variance of evolution noise v
var_obs = 0.05   # variance of observation noise n
beta = 3.0       # rate of exploration
gamma = 0.9      # discount factor
alpha = 0.8
init_cov = 10   # initialisation of covariance matrice
kappa = 0.1      # unscentered transform parameters

nb_trials = 42
nStepEm = 2000
pOutset = 0.2
cats = CATS()
Responses = dict()


# -----------------------------------
#Kalman Learning session
# -----------------------------------
Kdata = []
for i in xrange(200):
    answer = []
    values = createQValuesDict(cats.states, cats.actions)
    covariance = createCovarianceDict(len(cats.states)*len(cats.actions), init_cov, eta)
    cats.reinitialize(nb_trials, 'meg')
    for j in xrange(nb_trials):
        KalmanQlearning(j, values, covariance, False)
    Kdata.append(list(answer))
Esempio n. 9
0
length_memory = 9  # size of working memory
threshold = 56.0  # inference threshold
sigma = 0.00002  # updating rate of the average reward
gain = 84.0
alpha = 0.39
#########################
#optimization parameters
n_run = 1
n_grid = 30
maxiter = 10000
maxfun = 10000
xtol = 0.01
ftol = 0.01
disp = True
#########################
cats = CATS(0)

models = dict({
    'kalman':
    KalmanQLearning('kalman', cats.states, cats.actions, gamma, beta, eta,
                    var_obs, init_cov, kappa),
    'bwm_v1':
    BayesianWorkingMemory('v1', cats.states, cats.actions, length_memory,
                          noise, threshold),
    'bwm_v2':
    BayesianWorkingMemory('v2', cats.states, cats.actions, length_memory,
                          noise, threshold),
    'qlearning':
    QLearning('q', cats.states, cats.actions, gamma, alpha, beta),
    'fusion':
    FSelection("test", cats.states, cats.actions, alpha, beta, gamma,
Esempio n. 10
0
# -----------------------------------
human = HLearning(
    dict({
        'meg': ('../../PEPS_GoHaL/Beh_Model/', 48),
        'fmri': ('../../fMRI', 39)
    }))

# -----------------------------------

# -----------------------------------
# PARAMETERS + INITIALIZATION
# -----------------------------------
nb_blocs = 4
nb_trials = 48
nb_repeat = 100
cats = CATS(nb_trials)
models = dict({
    "fusion": FSelection(cats.states, cats.actions),
    "qlearning": QLearning(cats.states, cats.actions),
    "bayesian": BayesianWorkingMemory(cats.states, cats.actions),
    "selection": KSelection(cats.states, cats.actions),
    "mixture": CSelection(cats.states, cats.actions)
})

# ------------------------------------
# Parameter testing
# ------------------------------------
with open("extremum.pickle", 'r') as f:
    p_test = pickle.load(f)

colors_m = dict({
Esempio n. 11
0
# ARGUMENT MANAGER
# -----------------------------------

# -----------------------------------
# FONCTIONS
# -----------------------------------

# -----------------------------------

# -----------------------------------
# PARAMETERS + INITIALIZATION
# -----------------------------------
nb_blocs = 1
nb_trials = 80

cats = CATS(nb_trials)
models = dict({"fusion":FSelection(cats.states, cats.actions),
                "qlearning":QLearning(cats.states, cats.actions),
                "bayesian":BayesianWorkingMemory(cats.states, cats.actions),
                "selection":KSelection(cats.states, cats.actions),
                "mixture":CSelection(cats.states, cats.actions)})
# ------------------------------------

# ------------------------------------
# Parameter testing
# ------------------------------------
with open("../Sferes/parameters.pickle", 'r') as f:
	p_test = pickle.load(f)

with open("../Sferes/parameters_single.pickle", 'r') as f:
	p_single = pickle.load(f)
Esempio n. 12
0
class SamplingPareto():
    """ Simple sampling of parameters to
    draw pareto front """

    def __init__(self, human, model, n = 10000):
        self.human = human
        self.model = model
        self.subject = self.human.keys()
        self.n = n
        self.nb_repeat = 8
        self.nb_blocs = 4
        self.nb_trials = 39
        self.nb_param = len(self.model.bounds.keys())
        self.p_order = self.model.bounds.keys()
        self.cats = CATS(self.nb_trials)
        self.rt = dict()
        self.state = dict()
        self.action = dict()
        self.responses = dict()
        self.indice = dict()
        self.hrt = dict()        
        for s in self.human.keys():
            self.rt[s] = np.array([self.human[s][i]['rt'][0:self.nb_trials,0] for i in range(1,self.nb_blocs+1)])
            self.rt[s] = np.tile(self.rt[s], (self.nb_repeat,1))
            self.state[s] = np.array([self.human[s][i]['sar'][0:self.nb_trials,0] for i in range(1,self.nb_blocs+1)])
            self.state[s] = np.tile(self.state[s], (self.nb_repeat,1))
            self.action[s] = np.array([self.human[s][i]['sar'][0:self.nb_trials,1] for i in range(1,self.nb_blocs+1)])
            self.action[s] = np.tile(self.action[s], (self.nb_repeat,1))
            self.responses[s] = np.array([self.human[s][i]['sar'][0:self.nb_trials,2] for i in range(1,self.nb_blocs+1)])
            self.responses[s] = np.tile(self.responses[s], (self.nb_repeat,1))
            step, indice = getRepresentativeSteps(self.rt[s], self.state[s], self.action[s], self.responses[s])
            self.hrt[s] = computeMeanRepresentativeSteps(step)[0]
            self.hrt[s] = self.center(self.hrt[s])

    def _convertStimulus(self, s):
            return (s == 1)*'s1'+(s == 2)*'s2' + (s == 3)*'s3'

    def center(self, x):
        x = x - np.median(x)
        x = x / float(np.percentile(x, 75)-np.percentile(x, 25))
        return x

    def evaluate(self, s):
        p_test = {k:np.random.uniform(self.model.bounds[k][0],self.model.bounds[k][1]) for k in self.model.bounds.keys()}
        self.model.setAllParameters(p_test)
        self.model.startExp()
        for i in xrange(self.nb_repeat):
            for j in xrange(self.nb_blocs):
                self.cats.reinitialize()
                self.cats.stimuli = np.array(map(self._convertStimulus, self.human[s][j+1]['sar'][:,0]))
                self.model.startBloc()
                for k in xrange(self.nb_trials):
                    state = self.cats.getStimulus(k)
                    action = self.model.chooseAction(state)
                    reward = self.cats.getOutcome(state, action)
                    self.model.updateValue(reward)
        self.model.reaction = np.array(self.model.reaction)
        self.model.action = np.array(self.model.action)
        self.model.responses = np.array(self.model.responses)
        self.model.value = np.array(self.model.value)
        step, indice = getRepresentativeSteps(self.model.reaction, self.state[s], self.model.action, self.model.responses)
        hrtm = computeMeanRepresentativeSteps(step)[0]
        hrtm = self.center(hrtm)

        rt = -np.sum(np.power(hrtm-self.hrt[s], 2))

        choice = np.sum(np.log(self.model.value))
        return np.array([choice, rt])

    def multiSampling(self, s):
        n = 100
        data = np.zeros((n, 3))
        pareto = np.array([[-1, -1000., -1000.0]])
        p = np.zeros((n,self.nb_param+1))        
        good = np.zeros((1, self.nb_param+1))
        good[0,0] = -1.0
        for i in xrange(1,self.n+1):
            ind = (i-1)%n            
            data[ind,0] = i
            p[ind,0] = i
            data[ind,1:] = self.evaluate(s)
            p[ind,1:] = np.array([self.model.parameters[k] for k in self.p_order])
            if i%n == 0:
                pareto, good = self.constructParetoFrontier(np.vstack((data, pareto)), np.vstack((p, good)))
                
        return dict({s:np.hstack((pareto, good[:,1:]))})

    def constructParetoFrontier(self, front, param):
        front = front[front[:,1].argsort()][::-1]
        pareto_frontier = [front[0]]
        for pair in front[1:]:
            if pair[2] >= pareto_frontier[-1][2]:
                pareto_frontier.append(pair)
        pareto_frontier = np.array(pareto_frontier)        
        good = np.array([param[param[:,0] == i][0] for i in pareto_frontier[:,0]])
        return pareto_frontier, good

    def run(self):
        subject = self.subject
        pool = Pool(len(subject))
        self.data = pool.map(unwrap_self_multiSampling, zip([self]*len(subject), subject))
        tmp = dict()
        for i in self.data:
            s = i.keys()[0]
            tmp[s] = i[s]            
        self.data = tmp

    def save(self, output_file):
        output = open(output_file, 'wb')
        pickle.dump(self.data, output)
        output.close()
Esempio n. 13
0
# PARAMETERS + INITIALIZATION
# -----------------------------------
eta = 0.0001  # variance of evolution noise v
var_obs = 0.05  # variance of observation noise n
gamma = 0.63  # discount factor
init_cov = 10  # initialisation of covariance matrice
kappa = 0.1  # unscentered transform parameters
beta = 1.7
length_memory = 11
noise_width = 0.0106
correlation = "Z"

nb_trials = 42
nb_blocs = 42

cats = CATS(nb_trials)

models = dict({
    'kalman':
    KalmanQLearning('kalman', cats.states, cats.actions, gamma, beta, eta,
                    var_obs, init_cov, kappa),
    'bmw':
    BayesianWorkingMemory('bmw', cats.states, cats.actions, length_memory,
                          noise_width, 1.0)
})

cats = CATS_MODELS(nb_trials, models.keys())

human = HLearning(
    dict({
        'meg': ('../../PEPS_GoHaL/Beh_Model/', 42),
Esempio n. 14
0
# -----------------------------------
# PARAMETERS + INITIALIZATION
# -----------------------------------
eta = 0.0001  # variance of evolution noise v
var_obs = 0.05  # variance of observation noise n
beta = 3.0  # rate of exploration
gamma = 0.9  # discount factor
alpha = 0.8
init_cov = 10  # initialisation of covariance matrice
kappa = 0.1  # unscentered transform parameters

nb_trials = 42
nStepEm = 2000
pOutset = 0.2
cats = CATS()
Responses = dict()

# -----------------------------------
#Kalman Learning session
# -----------------------------------
Kdata = []
for i in xrange(200):
    answer = []
    values = createQValuesDict(cats.states, cats.actions)
    covariance = createCovarianceDict(
        len(cats.states) * len(cats.actions), init_cov, eta)
    cats.reinitialize(nb_trials, 'meg')
    for j in xrange(nb_trials):
        KalmanQlearning(j, values, covariance, False)
    Kdata.append(list(answer))