Exemplo n.º 1
0
 def test_brf_physio_reg(self):
     """ Validate estimation of BRF at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir)
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_samplers(['brf'], fdata, nb_its=100,
                                  check_fv='raise')
Exemplo n.º 2
0
 def test_prls(self):
     """ Validate estimation of PRLs at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir)
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_parameters(['perf_response_levels'], fdata, simu,
                                    nItMax=100, estimateC=True)
Exemplo n.º 3
0
 def test_prf_var(self):
     """ Validate estimation of PRF """
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir)
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_samplers(['prf_var'], fdata, nb_its=20,
                                  check_fv='raise')
Exemplo n.º 4
0
 def test_perf_baseline(self):
     """ Validate estimation of drift at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir, spatial_size='normal')
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_samplers(['perf_baseline'], fdata, nb_its=100,
                                  check_fv='raise')
Exemplo n.º 5
0
 def test_prf(self):
     """ Validate estimation of PRF """
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir)
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_parameters(['prf'], fdata, simu, nItMax=20,
                                    estimateG=True)
Exemplo n.º 6
0
 def test_sigmaG(self):
     """ Validate estimation of drift at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir, spatial_size='normal')
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_parameters(['sigma_G'], fdata, simu,
                                    nItMax=100, estimateSigmaG=True)
     print 'pyhrf_view %s/*mixt_params*perf*nii' % self.tmp_dir
Exemplo n.º 7
0
 def test_noise_var(self):
     """ Validate estimation of noise variances at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir)
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_parameters(['noise_var'], fdata, simu, nItMax=100,
                                    estimateNoise=True)
     print 'pyhrf_view %s/*noise*nii' % self.tmp_dir
Exemplo n.º 8
0
 def test_brf_physio_nonreg(self):
     """ Validate estimation of BRF at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir, spatial_size='normal')
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_samplers(['brf'], fdata, nb_its=100,
                                  check_fv='raise',
                                  rf_prior_type='physio_stochastic_not_regularized')
Exemplo n.º 9
0
    def test_mu(self):
        """ Validate estimation of mu """
        # pyhrf.verbose.set_verbosity(2)
        pyhrf.logger.setLevel(logging.INFO)

        from pyhrf.sandbox.physio import simulate_asl_physio_rfs
        simu = simulate_asl_physio_rfs(self.tmp_dir)
        fdata = FmriData.from_simulation_dict(simu)
        self._test_specific_samplers(['truebrf'], fdata, nb_its=20,
                                     mu_prior_type='regularized',
                                     check_fv='raise')
Exemplo n.º 10
0
 def test_prf_physio_det(self):
     """ Validate estimation of BRF at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir, spatial_size='normal')
     print simu['prf'].shape
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_samplers(['prf'], fdata, nb_its=100,
                                  check_fv='raise',
                                  rf_prior_type='physio_deterministic')
Exemplo n.º 11
0
    def test_all(self):
        """ Validate estimation of full ASL model at high SNR"""
        # pyhrf.verbose.set_verbosity(2)
        pyhrf.logger.setLevel(logging.INFO)
        from pyhrf.jde.asl import simulate_asl
        simu = simulate_asl(self.tmp_dir, spatial_size='normal')
        fdata = FmriData.from_simulation_dict(simu)
        np.random.seed(25430)
        v = ['bold_response_levels', 'perf_response_levels', 'drift', 'drift_var',
             'brf', 'brf_var', 'prf', 'labels', 'bold_mixt_params',
             'perf_mixt_params', 'perf_baseline', 'perf_baseline_var']

        self._test_specific_samplers(v, fdata, nb_its=500, check_fv='print')
Exemplo n.º 12
0
 def test_perf_baseline_var(self):
     """ Validate estimation of drift at high SNR"""
     # pyhrf.verbose.set_verbosity(2)
     pyhrf.logger.setLevel(logging.INFO)
     from pyhrf.jde.asl import simulate_asl
     simu = simulate_asl(self.tmp_dir, spatial_size='normal')
     perf_baseline = simu['perf_baseline']
     perf_baseline_mean = simu['perf_baseline_mean']
     print 'perf_baseline_mean = ', perf_baseline_mean
     print 'perf_baseline_mean emp = ', np.mean(perf_baseline)
     perf_baseline_var = simu['perf_baseline_var']
     print 'perf_baseline_var = ', perf_baseline_var
     print 'perf_baseline_var emp = ', np.var(perf_baseline)
     fdata = FmriData.from_simulation_dict(simu)
     self._test_specific_samplers(['perf_baseline_var'], fdata, nb_its=15,
                                  check_fv='raise')
Exemplo n.º 13
0
    def test_perfusion(self):
        """ Validate estimation of perfusion component at high SNR"""
        # pyhrf.verbose.set_verbosity(2)
        pyhrf.logger.setLevel(logging.INFO)
        from pyhrf.jde.asl import simulate_asl
        simu = simulate_asl(self.tmp_dir, spatial_size='normal')
        fdata = FmriData.from_simulation_dict(simu)
        np.random.seed(25430)
        v = ['perf_response_levels', 'prf']

        mem.cache(self._test_specific_parameters)(v, fdata, simu,
                                                  nItMax=100,
                                                  estimateG=True,
                                                  estimateC=True,
                                                  estimateSigmaG=True)
        print 'pyhrf_view %s/*nii' % self.tmp_dir
Exemplo n.º 14
0
    def test_all(self):
        """ Validate estimation of full ASL model at high SNR"""
        # pyhrf.verbose.set_verbosity(2)
        pyhrf.logger.setLevel(logging.INFO)
        from pyhrf.jde.asl import simulate_asl
        simu = simulate_asl(self.tmp_dir, spatial_size='normal')
        fdata = FmriData.from_simulation_dict(simu)
        np.random.seed(25430)
        v = ['bold_response_levels', 'perf_response_levels',
             'brf', 'brf_var', 'prf', 'labels', 'bold_mixt_params',
             'perf_mixt_params', 'drift_perf_baseline']

        self._test_specific_parameters(v, fdata, simu,
                                       estimateSigmaH=False, nItMax=100,
                                       nItMin=10, estimateBeta=True,
                                       estimateSigmaG=True, PLOT=False,
                                       constrained=True, fast=False,
                                       estimateH=True, estimateG=True,
                                       estimateA=True, estimateC=True,
                                       estimateZ=True, estimateLA=True,
                                       estimateMP=True)
        print 'pyhrf_view %s/*nii' % self.tmp_dir
Exemplo n.º 15
0
    def __init__(self,
                 fmri_data=FmriData.from_vol_ui(),
                 analyser=JDEMCMCAnalyser(),
                 output_dir='./',
                 make_outputs=True,
                 result_dump_file=DEFAULT_DUMP_FILE):

        xmlio.XmlInitable.__init__(self)

        self.analyser = analyser
        self.output_dir = output_dir

        if result_dump_file is None:
            if self.output_dir is not None and DEFAULT_DUMP_FILE is not None:
                self.result_dump_file = op.join(self.output_dir,
                                                DEFAULT_DUMP_FILE)
            else:
                self.result_dump_file = None
        else:
            self.result_dump_file = op.join(self.output_dir, result_dump_file)

        self.data = fmri_data

        self.make_outputs = make_outputs
Exemplo n.º 16
0
def create_treatment_surf(boldFiles,
                          parcelFile,
                          meshFile,
                          dt,
                          tr,
                          paradigmFile,
                          nbIterations=4000,
                          writeXmlSetup=True,
                          parallelize=False,
                          outputDir=None,
                          outputSuffix=None,
                          outputPrefix=None,
                          contrasts=';',
                          beta=.6,
                          estimBeta=True,
                          pfMethod='ps',
                          estimHrf=True,
                          hrfVar=.01,
                          roiIds=None,
                          nbClasses=2,
                          gzip_rdump=False,
                          simulation_file=None,
                          make_outputs=True):
    if roiIds is None:
        roiIds = np.array([], dtype=int)

    outDump = make_outfile(DEFAULT_DUMP_FILE, outputDir, outputPrefix,
                           outputSuffix)
    if gzip_rdump:
        outDump += '.gz'

    if contrasts is not None:
        cons = dict(
            ("con_%d" % i, ce) for i, ce in enumerate(";".split(contrasts)))
    else:
        cons = {}

    if nbClasses == 2:
        sampler = BG(
            **{
                'nb_iterations':
                nbIterations,
                # level of spatial correlation = beta
                'beta':
                BS(
                    **{
                        'val_ini': np.array([beta]),
                        'do_sampling': estimBeta,
                        'pf_method': pfMethod,
                    }),
                # HRF
                'hrf':
                HS(**{
                    'do_sampling': estimHrf,
                }),
                # HRF variance
                'hrf_var':
                HVS(**{
                    'do_sampling': False,
                    'val_ini': np.array([hrfVar]),
                }),
                # neural response levels (stimulus-induced effects)
                'response_levels':
                NS(**{
                    'contrasts': cons,
                }),
            })

    elif nbClasses == 3:
        raise NotImplementedError('3 class model not maintained')

    analyser = JDEMCMCAnalyser(sampler, dt=dt)

    fmri_data = FmriData.from_surf_files(paradigmFile, boldFiles, tr, meshFile,
                                         parcelFile)

    if simulation_file is not None:
        f_simu = open(simulation_file)
        simulation = cPickle.load(f_simu)
        f_simu.close()
        fmri_data.simulation = simulation

    tjde = FMRITreatment(fmri_data, analyser, outputDir)
    # print 'make_outputs:', make_outputs

    sxml = xmlio.to_xml(tjde)
    if writeXmlSetup is not None and outputDir is not None:
        outSetupXml = make_outfile(DEFAULT_CFG_FILE_JDE, outputDir,
                                   outputPrefix, outputSuffix)
        logger.info("Writing XML setup to: %s", outSetupXml)
        f = open(outSetupXml, 'w')
        f.write(sxml)
        f.close()
    else:
        outSetupXml = None

    return tjde, outSetupXml
Exemplo n.º 17
0
def parse_data_options(options):
    """ Return an FmriData object corresponding to input options """

    from pyhrf.core import DEFAULT_BOLD_SURF_FILE, DEFAULT_BOLD_VOL_FILE, \
        DEFAULT_SIMULATION_FILE
    # If SPM.mat is provided, retrieve paradigm from it for all sessions.
    # Leave data file pathes to unknown.
    if options.spmFile is not None:

        paradigm, tr = load_paradigm_from_mat(options.spmFile)
        nb_sessions = len(paradigm)

        if options.inputDataType == 'volume':
            SessDataClass = FMRISessionVolumicData
            if options.func_data_file is None:
                data_fns = [DEFAULT_BOLD_VOL_FILE] * nb_sessions
            else:
                data_fns = options.func_data_file
            if options.mask_file is None:
                options.mask_file = DEFAULT_MASK_VOL_FILE
            fmriDataInit = FmriData.from_vol_ui
        elif options.inputDataType == 'surface':
            SessDataClass = FMRISessionSurfacicData
            if options.func_data_file is None:
                data_fns = [DEFAULT_BOLD_SURF_FILE] * nb_sessions
            else:
                data_fns = options.func_data_file
            if options.mask_file is None:
                options.mask_file = DEFAULT_MASK_SURF_FILE

            fmriDataInit = FmriData.from_surf_ui
        elif options.inputDataType == 'simulation':
            SessDataClass = FMRISessionSimulationData
            data_fns = DEFAULT_SIMULATION_FILE
            fmriDataInit = FmriData.from_simu_ui

        sessions_data = []

        if len(data_fns) != nb_sessions:
            raise Exception('Inconsistent number of data files and sessions: '
                            '%d sessions in paradigm, %d data files' %
                            (nb_sessions, len(data_fns)))

        # TODO: check nb of sessions and nb of data files

        for isess, sess in enumerate(sorted(paradigm.keys())):
            sessions_data.append(
                SessDataClass(paradigm[sess]['onsets'],
                              paradigm[sess]['stimulusLength'],
                              data_fns[isess]))

        if options.inputDataType == 'surface':

            return fmriDataInit(sessions_data=sessions_data,
                                tr=tr,
                                mask_file=options.mask_file,
                                mesh_file=options.mesh_file)

        return fmriDataInit(sessions_data=sessions_data,
                            tr=tr,
                            mask_file=options.mask_file)

    # unstack & take 1st set of onsets for each condition to get only one
    # session
    onsets = unstack_trees(eval('pyhrf.paradigm.onsets_%s' %
                                options.paradigm))[0]
    durations = unstack_trees(
        eval('pyhrf.paradigm.durations_%s' % options.paradigm))[0]

    if options.paradigm_csv is not None:
        onsets, durations = load_paradigm_from_csv(options.paradigm_csv)
        from pyhrf.tools import apply_to_leaves
        onsets = apply_to_leaves(onsets, lambda x: x[0])
        durations = apply_to_leaves(durations, lambda x: x[0])

    # Set data type:
    if options.inputDataType == 'volume':
        if options.data_scenario == 'default':
            SessDataClass = FMRISessionVolumicData
            sd = SessDataClass(onsets, durations)
            if options.mask_file is None:
                options.mask_file = DEFAULT_MASK_VOL_FILE
        elif options.data_scenario == 'small':
            sd = SessDataClass(onsets, durations)
            if options.mask_file is None:
                options.mask_file = DEFAULT_MASK_SMALL_VOL_FILE
        elif options.data_scenario == 'realistic':
            sd = FMRISessionVolumicData(onsets, durations,
                                        REALISTIC_REAL_DATA_BOLD_VOL_FILE)
            if options.mask_file is None:
                options.mask_file = REALISTIC_REAL_DATA_MASK_VOL_FILE
        else:
            raise Exception("Uknown data scenario: %s" % options.data_scenario)

        if options.func_data_file is not None:
            sessions_data = []
            sessions_data.append(
                SessDataClass(onsets, durations, options.func_data_file))
        else:
            sessions_data = [sd]
        if hasattr(options, 'tr') and options.tr is not None:
            tr = options.tr
            res = FmriData.from_vol_ui(sessions_data=sessions_data,
                                       tr=tr,
                                       mask_file=options.mask_file)
        else:
            res = FmriData.from_vol_ui(sessions_data=sessions_data,
                                       mask_file=options.mask_file)

        return res

    elif options.inputDataType == 'surface':
        mask_fn = DEFAULT_MASK_SURF_FILE
        mesh_fn = DEFAULT_MESH_FILE

        if options.data_scenario == 'default':
            # TODO: create a bigger surface default dataset
            sd = FMRISessionSurfacicData(onsets, durations)
        if options.data_scenario == 'small':
            sd = FMRISessionSurfacicData(onsets, durations)
        elif options.data_scenario == 'realistic':
            raise NotImplementedError('Realistic surfacic dataset not yet '
                                      'available (TODO)')

        return FmriData.from_surf_ui(sessions_data=[sd],
                                     mask_file=mask_fn,
                                     mesh_file=mesh_fn)

    elif options.inputDataType == 'simulation':
        if options.data_scenario == 'default':
            sd = FMRISessionSimulationData(onsets, durations)
        elif options.data_scenario == 'small':
            raise NotImplementedError('Small artificial dataset not yet '
                                      'available (TODO)')
        if options.data_scenario == 'realistic':
            raise NotImplementedError('Realistic artificial dataset not yet '
                                      'available (TODO)')

        return FmriData.from_simu_ui(sessions_data=[sd])
Exemplo n.º 18
0
 def test_from_vol_ui_default(self):
     fmri_data = FmriData.from_vol_ui()
Exemplo n.º 19
0
def main():
    """Run when calling the script"""

    start_time = time.time()

    if not os.path.isdir(config["output_dir"]):
        try:
            os.makedirs(config["output_dir"])
        except OSError as e:
            print("Ouput directory could not be created.\n"
                  "Error was: {}".format(e.strerror))
            sys.exit(1)

    bold_data = FmriData.from_vol_files(
        mask_file=config["parcels_file"], paradigm_csv_file=config["onsets_file"],
        bold_files=config["bold_data_file"], tr=config["tr"]
    )

    compute_contrasts, contrasts_def = load_contrasts_definitions(config["def_contrasts_file"])

    jde_vem_analyser = JDEVEMAnalyser(
        hrfDuration=config["hrf_duration"], sigmaH=config["sigma_h"], fast=True,
        computeContrast=compute_contrasts, nbClasses=2, PLOT=False,
        nItMax=config["nb_iter_max"], nItMin=config["nb_iter_min"], scale=False,
        beta=config["beta"], estimateSigmaH=True, estimateHRF=config["estimate_hrf"],
        TrueHrfFlag=False, HrfFilename='hrf.nii', estimateDrifts=True,
        hyper_prior_sigma_H=config["hrf_hyperprior"], dt=config["dt"], estimateBeta=True,
        contrasts=contrasts_def, simulation=False, estimateLabels=True,
        LabelsFilename=None, MFapprox=False, estimateMixtParam=True,
        constrained=False, InitVar=0.5, InitMean=2.0, MiniVemFlag=False, NbItMiniVem=5,
        zero_constraint=config["zero_constraint"], drifts_type=config["drifts_type"]
    )

    processing_jde_vem = FMRITreatment(
        fmri_data=bold_data, analyser=jde_vem_analyser,
        output_dir=config["output_dir"], make_outputs=True
    )

    if not config["parallel"]:
        processing_jde_vem.run()
    else:
        processing_jde_vem.run(parallel="local")


    if config["save_processing_config"]:
        # Let's canonicalize all paths
        config_save = dict(config)
        for file_nb, bold_file in enumerate(config_save["bold_data_file"]):
            config_save["bold_data_file"][file_nb] = os.path.abspath(bold_file)
        config_save["parcels_file"] = os.path.abspath(config_save["parcels_file"])
        config_save["onsets_file"] = os.path.abspath(config_save["onsets_file"])
        if config_save["def_contrasts_file"]:
            config_save["def_contrasts_file"] = os.path.abspath(config_save["def_contrasts_file"])
        config_save["output_dir"] = os.path.abspath(config_save["output_dir"])
        config_save_filename = "{}_processing.json".format(
            datetime.datetime.today()
        ).replace(" ", "_")
        config_save_path = os.path.join(config["output_dir"], config_save_filename)
        with open(config_save_path, 'w') as json_file:
            json.dump(config_save, json_file, sort_keys=True, indent=4)

    print("")
    print("Total computation took: {} seconds".format(format_duration(time.time() - start_time)))
Exemplo n.º 20
0
def simulate_subjects(output_dir,
                      snr_scenario='high_snr',
                      spatial_size='tiny',
                      hrf_group=None,
                      nb_subjects=15,
                      vhrf=0.1,
                      vhrf_group=0.1):
    '''
    Simulate daata for multiple subjects (5 subjects by default)
    '''
    drift_coeff_var = 1.
    drift_amplitude = 10.

    lmap1, lmap2, lmap3 = 'random_small', 'random_small', 'random_small'

    if snr_scenario == 'low_snr':  # low snr
        vars_noise = np.zeros(nb_subjects) + 1.5
        conditions = [
            Condition(name='audio',
                      m_act=3.,
                      v_act=.3,
                      v_inact=.3,
                      label_map=lmap1),
            Condition(name='video',
                      m_act=2.5,
                      v_act=.3,
                      v_inact=.3,
                      label_map=lmap2),
            Condition(name='damier',
                      m_act=2,
                      v_act=.3,
                      v_inact=.3,
                      label_map=lmap3),
        ]
    else:  # high snr

        vars_noise = np.zeros(nb_subjects) + .2
        conditions = [
            Condition(name='audio',
                      m_act=13.,
                      v_act=.2,
                      v_inact=.1,
                      label_map=lmap1),
            # Condition(name='video', m_act=11.5, v_act=.2, v_inact=.1,
            # label_map=lmap2),
            # Condition(name='damier', m_act=10, v_act=.2, v_inact=.1,
            # label_map=lmap3),
        ]
    vars_hrfs = np.zeros(nb_subjects) + vhrf

    # Common variable across subjects:
    labels_vol = sim.create_labels_vol(conditions)
    labels = sim.flatten_labels_vol(labels_vol)

    # use smooth multivariate gaussian prior:
    if hrf_group is None:  # simulate according to gaussian prior
        var_hrf_group = 0.1
        hrf_group = sim.create_gsmooth_hrf(dt=0.6,
                                           hrf_var=var_hrf_group,
                                           normalize_hrf=False)
        n = (hrf_group**2).sum()**.5
        hrf_group /= n
        var_hrf_group /= n**2

    simu_subjects = []
    simus = []

    for isubj in xrange(nb_subjects):
        if output_dir is not None:
            out_dir = op.join(output_dir, 'subject_%d' % isubj)
            if not op.exists(out_dir):
                os.makedirs(out_dir)
        else:
            out_dir = None
        s = simulate_single_subject(out_dir,
                                    conditions,
                                    vars_hrfs[isubj],
                                    labels,
                                    labels_vol,
                                    vars_noise[isubj],
                                    drift_coeff_var,
                                    drift_amplitude,
                                    hrf_group,
                                    dt=0.6,
                                    dsf=4,
                                    var_hrf_group=vhrf_group)
        if 0:
            print 'simu subj %d:' % isubj
            print 'vhs:', s['var_subject_hrf']
            print 'hg:', s['hrf_group']
            print 'vhg:', s['var_hrf_group']

        simus.append(s)
        simu_subjects.append(FmriData.from_simulation_dict(s))
    simu_subjects = FmriGroupData(simu_subjects)

    return simu_subjects
Exemplo n.º 21
0
    def test_multisession_simu(self):
        fd1 = FmriData.from_simu_ui()
        fd2 = FmriData.from_simu_ui()

        fd_msession = merge_fmri_sessions([fd1, fd2])
        self.assertEqual(fd_msession.nbSessions, 2)
Exemplo n.º 22
0
def main():

    pyhrf.verbose.set_verbosity(1)

    output_dir = './'
    bold_file = get_data_file_name('real_data_vol_4_regions_BOLD.nii.gz')
    mask_file = get_data_file_name('real_data_vol_4_regions_mask.nii.gz')
    paradigm_file = get_data_file_name('paradigm_loc.csv')

    contrasts = pyhrf.paradigm.default_contrasts_loc

    experiments = [ ('audio', 'audio-video',
                     {'axial':17, 'coronal':42, 'sagittal':9}, 1,
                     'temporal', -1., 1.05),
                     ('calculaudio', 'computation-sentences',
                     {'axial':30, 'coronal':25, 'sagittal':53}, 11,
                     'parietal', -1.55, 1.05),
                   ]


    parcellation_dir = get_dir(output_dir, 'parcellation')

    # Load data
    fdata = FmriData.from_vol_files(mask_file=mask_file,
                                    bold_files=[bold_file],
                                    paradigm_csv_file=paradigm_file)

    glm_hcano_output_dir = get_dir(output_dir, 'glm_cano')
    if 1:
        # GLM with canonical HRF
        print 'GLM with canonical HRF'
        glm_analyse(fdata, contrasts, output_dir=glm_hcano_output_dir,
                    output_prefix='glm_hcano_')

    if 1:
        # GLM with basis set
        print 'GLM with basis set'
        glm_hderiv_output_dir = get_dir(output_dir, 'glm_hderivs')
        glm_analyse(fdata, contrasts, hrf_model="Canonical with Derivative",
                    output_dir=glm_hderiv_output_dir,
                    output_prefix='glm_hderivs_')

    parcellation_file =  op.join(parcellation_dir,  'parcellation_func.nii.gz')
    if 1:
        # parcellation from results of GLM basis set

        mask_file = op.join(glm_hderiv_output_dir, 'glm_hderivs_mask.nii.gz')
        beta_files = [op.join(glm_hderiv_output_dir,'glm_hderivs_beta_%s.nii.gz'%c)\
                              for c in fdata.get_condition_names()]
        make_parcellation_from_files(beta_files, mask_file, parcellation_file,
                                     nparcels=20, method='ward_and_gkm')
    jde_output_dir = get_dir(output_dir, 'jde')
    if 1:
        # JDE
        print 'JDE'
        fdata_parc = FmriData.from_vol_files(mask_file=parcellation_file,
                                             bold_files=[bold_file],
                                             paradigm_csv_file=paradigm_file)
        jde_analyse(fdata_parc, contrasts, jde_output_dir)

    glm_hcano_rs_output_dir = get_dir(output_dir,'glm_hcano_rescaled_on_jde')
    if 1:
        # GLM hcano rescaled onto JDE (provide the same results as normal
        # GLM hcano but with effects resized so that there is a consistency btw
        # X^m.h in JDE and the corresponding column of the design matrix in GLM
        print 'GLM rescaled'
        rescale_factor_file = op.join(jde_output_dir, 'scale_factor_for_glm.nii.gz')
        compute_jde_glm_rescaling(jde_output_dir, glm_hcano_output_dir,
                                  rescale_factor_file)

        glm_analyse(fdata, contrasts, output_dir=glm_hcano_rs_output_dir,
                    output_prefix='glm_hcano_rs_',
                    rescale_factor_file=rescale_factor_file)

    ## Outputs
    for condition_of_interest, contrast_of_interest, point_of_interest, \
        parcel_of_interest, plot_label, ymin, ymax  in experiments:

        if 'temporal' in plot_label:
            paradigm_tag = 'loc_av'
        else:
            paradigm_tag = 'loc'

        paradigm_file = get_data_file_name('paradigm_%s.csv' %paradigm_tag)
        fir_mask = op.join(parcellation_dir, 'mask_single_voxel_for_fir_%s.nii.gz'\
                           %paradigm_tag)
        make_mask_from_points([point_of_interest], mask_file, fir_mask)
        fdata_fir = FmriData.from_vol_files(mask_file=fir_mask,
                                            bold_files=[bold_file],
                                            paradigm_csv_file=paradigm_file)

        glm_fir_output_dir = get_dir(output_dir, 'glm_fir_%s' %paradigm_tag)
        if 1:
            # GLM FIR
            print 'GLM FIR'
            glm_analyse(fdata_fir, contrasts={}, hrf_model="FIR",
                        output_dir=glm_fir_output_dir, output_prefix='glm_fir_',
                        fir_delays=range(11))



        rfir_output_dir = get_dir(output_dir, 'rfir_%s' %paradigm_tag)
        if 1:
            # Regularized FIR
            print 'RFIR'
            rfir_analyse(fdata_fir, output_dir=rfir_output_dir)


        fig_dir = get_dir(output_dir, 'figs')
        plot_detection_results(fig_dir, point_of_interest, condition_of_interest,
                               contrast_of_interest, parcellation_file,
                               plot_label, jde_output_dir,
                               glm_hcano_rs_output_dir)

        plot_estimation_results(fig_dir, point_of_interest, parcel_of_interest,
                                condition_of_interest, plot_label,
                                glm_fir_output_dir, rfir_output_dir,
                                jde_output_dir, ymin, ymax)