def __init__(self): NRLSampler.__init__(self) # Comment setting of beta parameter since it is properly handled in the base class # self.beta = self.parameters[self.P_BETA] # habituation speed sampling parameters self.sampleHabitFlag = self.parameters[self.P_SAMPLE_HABITS] # Habituation initialization self.habits = self.parameters[self.P_HABITS_INI] if self.parameters.has_key(self.P_TRUE_HABITS): # load true habits if exist self.trueHabits = params[self.P_TRUE_HABITS] else: self.trueHabits = None # parametre pour la Laplacienne tronquee self.Lexp = self.parameters[self.P_HAB_ALGO_PARAM] # pour la sauvegarde des donnees self.habitsHistory = None self.outputRatio = self.parameters[ self.P_OUTPUT_RATIO] # affiche ou pas les ratio if self.outputRatio: self.ratioHistory = None # pour voir les ratio self.ratiocourbeHistory = None # pour voir la courbe des ratio self.labelsColors = self.parameters[self.P_LABELS_COLORS]
def saveCurrentValue(self): NRLSampler.saveCurrentValue(self) if self.keepSamples: if (self.iteration % self.sampleHistoryPace) == 0: # if self.habitsHistory != None: #self.habitsHistory = concatenate((self.habitsHistory,[self.habits])) # else: #self.habitsHistory = [self.habits] if self.habitsHistory != None: self.habitsHistory = concatenate( (self.habitsHistory, [self.habits])) else: self.habitsHistory = array([self.habits]) if self.nrlsHistory != None: self.nrlsHistory = concatenate( (self.nrlsHistory, [self.currentValue])) else: self.nrlsHistory = array([self.currentValue]) if self.outputRatio: if self.ratioHistory != None: self.ratioHistory = concatenate( (self.ratioHistory, [self.ratio])) else: self.ratioHistory = array([self.ratio]) if self.ratiocourbeHistory != None: self.ratiocourbeHistory = concatenate( (self.ratiocourbeHistory, [self.ratiocourbe])) else: self.ratiocourbeHistory = array([self.ratiocourbe])
def checkAndSetInitValue(self, variables): NRLSampler.checkAndSetInitLabels(self, variables) NRLSampler.checkAndSetInitNRL(self, variables) mixt_par = self.get_variable('mixt_params') mixt_par.checkAndSetInitValue(variables) weights_par = variables[self.samplerEngine.I_WEIGHTING_PROBA_NRLS_BAR] weights_par.checkAndSetInitValue(variables)
def updateObsersables(self): NRLSampler.updateObsersables(self) self.obs += 1 self.cumulHabits += self.habits #self.meanHabits = self.cumulHabits / self.nbItObservables self.meanHabits = self.cumulHabits / self.obs temp = zeros((self.nbConditions, self.nbVox), dtype=float) for c in xrange(self.nbClasses): putmask(temp, self.labels == c, self.habits) self.cumulHabitsCond[c, :, :] += temp self.voxelActivity[c, :, :] += self.labels == c self.meanHabitsCond = self.cumulHabitsCond / self.voxelActivity
def linkToData(self, dataInput): NRLSampler.linkToData(self, dataInput) # recuperation de X (matrice (nbCond * ny * nh) ) self.varSingleCondXtrials = self.dataInput.varSingleCondXtrials # pour les onsets #self.nbTrials=zeros((self.nbSessions,self.nbConditions), dtype=int) # for isess in xrange(self.nbSessions): self.onsets = self.dataInput.onsets self.lastonset = 0. self.deltaOns = {} self.nbTrials = zeros(self.nbConditions, dtype=int) for nc in xrange(self.nbConditions): self.deltaOns[nc] = numpy.diff(self.onsets[nc]) self.nbTrials[nc] = len(self.onsets[nc]) self.lastonset = max(self.lastonset, self.onsets[nc][self.nbTrials[nc] - 1]) # astuce pour les donnees reelles -> pour remettre 'a zero' tout les 4 # occurences for trial in xrange(self.nbTrials[nc] - 1): if (((trial + 1) % 4) == 0): self.deltaOns[nc][trial] = 100. else: # on divise les deltaOns par 4. pour prendre plus en compte # les valeurs anterieures d'habituation self.deltaOns[nc][trial] = self.deltaOns[nc][trial] / 4. # calcul du dernier onset -> pour afficher les timesNRLs self.lastonset = int(self.lastonset + 1) # print self.lastonset # determination du nbGammas utile dans spExtract -> mais il y a une # erreur pour Xmask (dans sparsedot self.nbGammas = range(self.nbConditions) self.nnulls = range(self.nbConditions) self.Xmask = range(self.nbConditions) for nc in xrange(self.nbConditions): self.Xmask[nc] = range(self.nbTrials[nc]) self.nbGammas[nc] = zeros(self.nbTrials[nc], dtype=int) self.nnulls[nc] = zeros( (self.varSingleCondXtrials[nc, :, :] == 0).sum()) for i in xrange(self.nbTrials[nc]): #self.nbGammas[j][i] = (self.varSingleCondXtrials[j,:,:] == (i + 1)).sum() self.Xmask[nc][i] = transpose( where(self.varSingleCondXtrials[nc, :, :] == (i + 1))) self.nbGammas[nc][i] = shape(self.Xmask[nc][i])[0] logger.info('deltaOns :') logger.info(self.deltaOns)
def linkToData(self, dataInput): NRLSampler.linkToData(self, dataInput) # recuperation de X (matrice (nbCond * ny * nh) ) self.varSingleCondXtrials = self.dataInput.varSingleCondXtrials # pour les onsets #self.nbTrials=zeros((self.nbSessions,self.nbConditions), dtype=int) # for isess in xrange(self.nbSessions): self.onsets = self.dataInput.onsets self.lastonset = 0. self.deltaOns = {} self.nbTrials = zeros(self.nbConditions, dtype=int) for nc in xrange(self.nbConditions): self.deltaOns[nc] = numpy.diff(self.onsets[nc]) self.nbTrials[nc] = len(self.onsets[nc]) self.lastonset = max( self.lastonset, self.onsets[nc][self.nbTrials[nc] - 1]) # astuce pour les donnees reelles -> pour remettre 'a zero' tout les 4 # occurences for trial in xrange(self.nbTrials[nc] - 1): if (((trial + 1) % 4) == 0): self.deltaOns[nc][trial] = 100. else: # on divise les deltaOns par 4. pour prendre plus en compte # les valeurs anterieures d'habituation self.deltaOns[nc][trial] = self.deltaOns[nc][trial] / 4. # calcul du dernier onset -> pour afficher les timesNRLs self.lastonset = int(self.lastonset + 1) # print self.lastonset # determination du nbGammas utile dans spExtract -> mais il y a une # erreur pour Xmask (dans sparsedot self.nbGammas = range(self.nbConditions) self.nnulls = range(self.nbConditions) self.Xmask = range(self.nbConditions) for nc in xrange(self.nbConditions): self.Xmask[nc] = range(self.nbTrials[nc]) self.nbGammas[nc] = zeros(self.nbTrials[nc], dtype=int) self.nnulls[nc] = zeros( (self.varSingleCondXtrials[nc, :, :] == 0).sum()) for i in xrange(self.nbTrials[nc]): #self.nbGammas[j][i] = (self.varSingleCondXtrials[j,:,:] == (i + 1)).sum() self.Xmask[nc][i] = transpose( where(self.varSingleCondXtrials[nc, :, :] == (i + 1))) self.nbGammas[nc][i] = shape(self.Xmask[nc][i])[0] logger.info('deltaOns :') logger.info(self.deltaOns)
def initObservables(self): NRLSampler.initObservables(self) # sauvegarde des habits et nrls self.nrlsHistory = None self.meanHabits = None # Toutes les habituations par voxels self.cumulHabits = zeros((self.nbConditions, self.nbVox), dtype=float) self.meanHabitsCond = zeros( (self.nbClasses, self.nbConditions, self.nbVox), dtype=float) self.cumulHabitsCond = zeros( (self.nbClasses, self.nbConditions, self.nbVox), dtype=float) self.voxelActivity = zeros( (self.nbClasses, self.nbConditions, self.nbVox), dtype=int) self.obs = 0
def jde_analyse(fdata, contrasts, output_dir): from pyhrf.jde.models import BOLDGibbsSampler as BG from pyhrf.jde.hrf import RHSampler from pyhrf.jde.nrl.bigaussian import NRLSampler sampler = BG(nb_iterations=250, hrf_var=RHSampler(do_sampling=False, val_ini=np.array([0.05])), response_levels=NRLSampler(contrasts=contrasts)) analyser = JDEMCMCAnalyser(sampler=sampler) analyser.set_gzip_outputs(True) tt = FMRITreatment(fdata, analyser, output_dir=output_dir) tt.run(parallel='local')
def jde_analyse(data=None, nbIterations=3, hrfModel='estimated', hrfNorm=1., hrfTrick=False, sampleHrfVar=True, hrfVar=1e-5, keepSamples=False, samplesHistPace=1): """ """ if data is None: data = pyhrf.data.get_default_data() data.keep_only_rois([2]) if hrfModel == 'estimated': sampleHRF = True elif hrfModel == 'canonical': sampleHRF = False else: raise Exception('Unknown hrf model %s' % hrfModel) hrfSampler = HRFSampler({ HRFSampler.P_SAMPLE_FLAG: sampleHRF, HRFSampler.P_NORMALISE: hrfNorm, HRFSampler.P_TRICK: hrfTrick, }) hrfVarSampler = RHSampler({ RHSampler.P_SAMPLE_FLAG: sampleHrfVar, }) nrlSampler = NRLSampler({ NRLSampler.P_KEEP_SAMPLES: keepSamples, NRLSampler.P_SAMPLE_HIST_PACE: samplesHistPace, }) sampler = BOLDGibbsSampler({ BOLDGibbsSampler.P_NB_ITERATIONS: nbIterations, BOLDGibbsSampler.P_RH: hrfVarSampler, BOLDGibbsSampler.P_HRF: hrfSampler, BOLDGibbsSampler.P_NRLS: nrlSampler, }) analyser = JDEAnalyser({ JDEAnalyser.P_SAMPLER: sampler, JDEAnalyser.P_OUTPUT_FILE: None, }) result = analyser.analyse(data) output = analyser.outputResults(result) return output
def getOutputs(self): outputs = NRLSampler.getOutputs(self) axes_names = ['condition', 'voxel'] axes_domains = {'condition': self.dataInput.cNames} outputs['pm_Habits'] = xndarray(self.meanHabits, axes_names=axes_names, axes_domains=axes_domains, value_label="Habituation") axes_names = ['condition', 'numeroVox', 'time'] axes_domains = { 'condition': self.dataInput.cNames, 'numeroVox': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'time': range(self.lastonset) } outputs['pm_timeNrls'] = xndarray(self.outputTimeNRLs, axes_names=axes_names, axes_domains=axes_domains, value_label="TimeNRLs") dt = self.samplerEngine.get_variable('hrf').dt rpar = self.dataInput.paradigm.get_joined_and_rastered(dt) parmask = [where(rpar[c] == 1) for c in self.dataInput.cNames] tnrl = zeros( (self.nbVox, self.nbConditions, len(rpar[rpar.keys()[0]])), dtype=float) for i in xrange(self.nbVox): for j in xrange(self.nbConditions): tnrl[i, j, parmask[j]] = self.timeNrls[j][i, :] # TODO: dirac outputs to use less space axes_domains = { 'condition': self.dataInput.cNames, 'time': arange(0, self.dataInput.paradigm.get_t_max(), dt) } outputs['pm_tNrls'] = xndarray( tnrl, axes_names=['voxel', 'condition', 'time'], axes_domains=axes_domains, value_label="nrl") if self.keepSamples: axes_names = ['iteration', 'condition', 'voxel'] axes_domains = { 'classe': ['inactif', 'actif'], 'condition': self.dataInput.cNames, 'iteration': self.sampleHistoryIterations } outputs['pm_Habits_hist'] = xndarray(self.habitsHistory, axes_names=axes_names, axes_domains=axes_domains, value_label="Habituation") if self.outputRatio: axes_names = ['iteration', 'condition', 'voxel', 'ratio'] axes_domains = { 'condition': self.dataInput.cNames, 'iteration': self.sampleHistoryIterations, 'ratio': [1, 2] } outputs['pm_ratio_hist'] = xndarray(self.ratioHistory, axes_names=axes_names, axes_domains=axes_domains, value_label="Ratio") axes_names = [ 'iteration', 'condition', 'voxel', 'courbe', 'ratio' ] axes_domains = { 'condition': self.dataInput.cNames, 'iteration': self.sampleHistoryIterations, 'ratio': [1, 2, 3, 4, 5], 'courbe': arange(0., 1., 0.01) } outputs['pm_ratiocourbe_hist'] = xndarray( self.ratiocourbeHistory, axes_names=axes_names, axes_domains=axes_domains, value_label="Courbe Ratio") axes_names = ['condition', 'voxel'] axes_domains = {'condition': self.dataInput.cNames} outputs['pm_compteur'] = xndarray(self.compteur, axes_names=axes_names, axes_domains=axes_domains, value_label="compteur") return outputs
def computeVarYTildeOpt(self, varXh): NRLSampler.computeVarYTildeOpt(self, varXh) matPl = self.samplerEngine.get_variable('drift').matPl self.varYbar = self.varYtilde - matPl
def checkAndSetInitValue(self, variables): # init NRL and Labels NRLSampler.checkAndSetInitValue(self, variables) self.checkAndSetInitHabit(variables)
def cleanObservables(self): NRLSampler.cleanObservables(self) del self.cumulHabits
def getOutputs(self): outputs = NRLSampler.getOutputs(self) axes_names = ['condition', 'voxel'] axes_domains = {'condition': self.dataInput.cNames} outputs['pm_Habits'] = xndarray(self.meanHabits, axes_names=axes_names, axes_domains=axes_domains, value_label="Habituation") axes_names = ['condition', 'numeroVox', 'time'] axes_domains = {'condition': self.dataInput.cNames, 'numeroVox': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'time': range(self.lastonset)} outputs['pm_timeNrls'] = xndarray(self.outputTimeNRLs, axes_names=axes_names, axes_domains=axes_domains, value_label="TimeNRLs") dt = self.samplerEngine.get_variable('hrf').dt rpar = self.dataInput.paradigm.get_joined_and_rastered(dt) parmask = [where(rpar[c] == 1) for c in self.dataInput.cNames] tnrl = zeros((self.nbVox, self.nbConditions, len(rpar[rpar.keys()[0]])), dtype=float) for i in xrange(self.nbVox): for j in xrange(self.nbConditions): tnrl[i, j, parmask[j]] = self.timeNrls[j][i, :] # TODO: dirac outputs to use less space axes_domains = {'condition': self.dataInput.cNames, 'time': arange(0, self.dataInput.paradigm.get_t_max(), dt)} outputs['pm_tNrls'] = xndarray(tnrl, axes_names=[ 'voxel', 'condition', 'time'], axes_domains=axes_domains, value_label="nrl") if self.keepSamples: axes_names = ['iteration', 'condition', 'voxel'] axes_domains = {'classe': ['inactif', 'actif'], 'condition': self.dataInput.cNames, 'iteration': self.sampleHistoryIterations} outputs['pm_Habits_hist'] = xndarray(self.habitsHistory, axes_names=axes_names, axes_domains=axes_domains, value_label="Habituation") if self.outputRatio: axes_names = ['iteration', 'condition', 'voxel', 'ratio'] axes_domains = {'condition': self.dataInput.cNames, 'iteration': self.sampleHistoryIterations, 'ratio': [1, 2]} outputs['pm_ratio_hist'] = xndarray(self.ratioHistory, axes_names=axes_names, axes_domains=axes_domains, value_label="Ratio") axes_names = [ 'iteration', 'condition', 'voxel', 'courbe', 'ratio'] axes_domains = {'condition': self.dataInput.cNames, 'iteration': self.sampleHistoryIterations, 'ratio': [1, 2, 3, 4, 5], 'courbe': arange(0., 1., 0.01) } outputs['pm_ratiocourbe_hist'] = xndarray(self.ratiocourbeHistory, axes_names=axes_names, axes_domains=axes_domains, value_label="Courbe Ratio") axes_names = ['condition', 'voxel'] axes_domains = {'condition': self.dataInput.cNames} outputs['pm_compteur'] = xndarray(self.compteur, axes_names=axes_names, axes_domains=axes_domains, value_label="compteur") return outputs