Beispiel #1
0
class ParallelTest(unittest.TestCase):
    def _test_remote_map_basic(self, mode):

        r = remote_map(foo, [(1, ), (2, )], [{'b': 5}, {'b': 10}], mode=mode)

    def test_remote_map_serial(self):
        self._test_remote_map_basic(mode='serial')

    @unittest.skipIf(not tools.is_importable('joblib'),
                     'joblib (optional dep) is N/A')
    def test_remote_map_local(self):
        self._test_remote_map_basic(mode='local')

    @unittest.skipIf(not tools.is_importable('joblib'),
                     'joblib (optional dep) is N/A')
    def test_remote_map_local_cartesian_args(self):
        from pyhrf.tools import cartesian_combine_args

        varying_args = {'b': range(10)}
        fixed_args = {'a': 3}
        args_comb = cartesian_combine_args(varying_args, fixed_args)

        r = remote_map(foo, lkwargs=args_comb, mode='local')

        if 0:
            print 'result:'
            print r

    if cfg['parallel-cluster']['enable_unit_test'] == 1:

        def test_remote_map_cluster_many_jobs(self):
            print 'cfg:', cfg['parallel-cluster']['enable_unit_test']
            remote_map(foo, [(5, 6)] * 10, mode='remote_cluster')

        def test_remote_map_cluster_exception(self):
            self.assertRaises(RemoteException,
                              remote_map,
                              foo_raise, [(1, ), (2, )], [{
                                  'b': 5
                              }, {
                                  'b': 10
                              }],
                              mode='remote_cluster')

        def test_remote_map_cluster_basic(self):
            self._test_remote_map_basic(mode='remote_cluster')

        def test_remote_map_cluster_cartesian_args(self):
            from pyhrf.tools import cartesian_combine_args

            varying_args = {'b': range(3)}
            fixed_args = {'a': 3}
            args_comb = cartesian_combine_args(varying_args, fixed_args)

            r = remote_map(foo, lkwargs=args_comb, mode='remote_cluster')

            if 0:
                print 'result:'
                print r
Beispiel #2
0
class ParcellationMethodTest(unittest.TestCase):

    def setUp(self):
        self.p1 = np.array([[1, 1, 3, 3],
                            [1, 0, 0, 0],
                            [0, 0, 2, 2],
                            [0, 2, 2, 2],
                            [0, 0, 2, 4]], dtype=np.int32)

    @unittest.skipIf(not tools.is_importable('sklearn') or
                     not tools.is_importable('munkres'),
                     'scikit-learn or munkres (optional deps) is N/A')
    def test_ward_spatial_scikit(self):
        from pyhrf.parcellation import parcellation_dist, \
            parcellation_ward_spatial
        from pyhrf.graph import graph_from_lattice, kerMask2D_4n

        X = np.reshape(self.p1, (-1, 1))
        graph = graph_from_lattice(np.ones(self.p1.shape), kerMask2D_4n)

        labels = parcellation_ward_spatial(X, n_clusters=5, graph=graph)

        labels = np.reshape(labels, self.p1.shape)
        # +1 because parcellation_dist sees 0 as background
        dist = parcellation_dist(self.p1 + 1, labels + 1)[0]
        self.assertEqual(dist, 0)

    @unittest.skipIf(not tools.is_importable('sklearn') or
                     not tools.is_importable('munkres'),
                     'scikit-learn or munkres (optional deps) is N/A')
    def test_ward_spatial_scikit_with_mask(self):
        from pyhrf.parcellation import parcellation_dist, parcellation_ward_spatial
        from pyhrf.graph import graph_from_lattice, kerMask2D_4n
        from pyhrf.ndarray import expand_array_in_mask

        if debug:
            print 'data:'
            print self.p1
            print ''

        mask = self.p1 != 0
        graph = graph_from_lattice(mask, kerMask2D_4n)

        X = self.p1[np.where(mask)].reshape(-1, 1)

        labels = parcellation_ward_spatial(X, n_clusters=4, graph=graph)

        labels = expand_array_in_mask(labels, mask)
        #+1 because parcellation_dist sees 0 as background:
        dist = parcellation_dist(self.p1 + 1, labels + 1)[0]
        self.assertEqual(dist, 0)
Beispiel #3
0
class CartesianTest(unittest.TestCase):

    def testCartesianBasic(self):

        domains = [(0, 1, 2), ('a', 'b')]
        cartProd = list(cartesian(*domains))
        assert cartProd == [[0, 'a'], [0, 'b'], [1, 'a'], [1, 'b'], [2, 'a'],
                            [2, 'b']]

    def test_cartesian_apply(self):
        from pyhrf.tools import cartesian_apply
        from pyhrf.tools.backports import OrderedDict

        def foo(a, b, c=1, d=2):
            return a + b + c + d

        # OrderDict to keep track of parameter orders in the result
        # arg values must be hashable
        varying_args = OrderedDict([('a', range(2)),
                                    ('b', range(2)),
                                    ('c', range(2))])

        fixed_args = {'d': 10}

        result_tree = cartesian_apply(varying_args, foo,
                                      fixed_args=fixed_args)

        self.assertEqual(result_tree, {0: {0: {0: 10, 1: 11},
                                           1: {0: 11, 1: 12}},
                                       1: {0: {0: 11, 1: 12},
                                           1: {0: 12, 1: 13}}})

    @unittest.skipIf(not mtools.is_importable('joblib'),
                     'joblib (optional dep) is N/A')
    def test_cartesian_apply_parallel(self):
        from pyhrf.tools import cartesian_apply
        from pyhrf.tools.backports import OrderedDict

        # OrderDict to keep track of parameter orders in the result
        # arg values must be hashable
        varying_args = OrderedDict([('a', range(2)),
                                    ('b', range(2)),
                                    ('c', range(2))])

        fixed_args = {'d': 10}

        result_tree = cartesian_apply(varying_args, foo,
                                      fixed_args=fixed_args,
                                      nb_parallel_procs=4)

        self.assertEqual(result_tree, {0: {0: {0: 10, 1: 11},
                                           1: {0: 11, 1: 12}},
                                       1: {0: {0: 11, 1: 12},
                                           1: {0: 12, 1: 13}}})
Beispiel #4
0
class MeasureTest(unittest.TestCase):

    def setUp(self):
        self.p1 = np.array([[1, 1, 0, 3],
                            [1, 1, 3, 3],
                            [0, 1, 2, 2],
                            [0, 2, 2, 2],
                            [0, 0, 2, 0]], dtype=np.int32)
        self.mask = np.where(self.p1 != 0)
        self.fp1 = self.p1[self.mask]

        self.p2 = np.array([[1, 1, 0, 3],
                            [3, 3, 3, 3],
                            [0, 2, 2, 3],
                            [0, 2, 2, 2],
                            [0, 0, 2, 0]], dtype=np.int32)
        self.fp2 = self.p2[self.mask]

    def test_intersection_matrix(self):

        from pyhrf.cparcellation import compute_intersection_matrix

        im = np.zeros((self.fp1.max() + 1, self.fp2.max() + 1),
                      dtype=np.int32)

        compute_intersection_matrix(self.fp1, self.fp2, im)

        assert_array_equal(im, np.array([[0, 0, 0, 0],
                                         [0, 2, 1, 2],
                                         [0, 0, 5, 1],
                                         [0, 0, 0, 3]], dtype=np.int32),
                           "Intersection graph not OK", 1)

    @unittest.skipIf(not tools.is_importable('munkres'),
                     'munkres (optional dep) is N/A')
    def test_parcellation_distance(self):

        from pyhrf.parcellation import parcellation_dist

        dist, cano_parcellation = parcellation_dist(self.p1, self.p2)
        self.assertEqual(dist, 4)
Beispiel #5
0
class VEMBOLDTest(unittest.TestCase):
    def setUp(self):
        np.random.seed(8652761)

        tmpDir = tempfile.mkdtemp(prefix='pyhrf_tests',
                                  dir=pyhrf.cfg['global']['tmp_path'])
        self.tmp_dir = tmpDir
        self.clean_tmp = True
        simu = simulate_bold(self.tmp_dir, spatial_size='random_small')
        self.data_simu = FmriData.from_simulation_dict(simu)

    def tearDown(self):
        if self.clean_tmp:
            logger.info('Remove tmp dir %s', self.tmp_dir)
            shutil.rmtree(self.tmp_dir)
        else:
            logger.info('Keep tmp dir %s', self.tmp_dir)

    def test_jdevemanalyser(self):
        """ Test BOLD VEM sampler on small simulation with small
        nb of iterations. Estimation accuracy is not tested.
        """
        jde_vem_analyser = JDEVEMAnalyser(beta=.8,
                                          dt=.5,
                                          hrfDuration=25.,
                                          nItMax=2,
                                          nItMin=2,
                                          fast=True,
                                          computeContrast=False,
                                          PLOT=False,
                                          constrained=True)
        tjde_vem = FMRITreatment(fmri_data=self.data_simu,
                                 analyser=jde_vem_analyser,
                                 output_dir=None)
        tjde_vem.run()

    @unittest.skipIf(not tools.is_importable('cvxpy'),
                     'cvxpy (optional dep) is N/A')
    def test_vem_bold_constrained(self):
        """ Test BOLD VEM constraint function.
        Estimation accuracy is not tested.
        """
        data = self.data_simu
        graph = data.get_graph()
        Onsets = data.get_joined_onsets()

        NbIter, nrls, estimated_hrf, \
            labels, noiseVar, mu_k, sigma_k, \
            Beta, L, PL, CONTRAST, CONTRASTVAR, \
            cA, cH, cZ, cAH, cTime, cTimeMean, \
            Sigma_nrls, StimuIndSignal,\
            FreeEnergy = Main_vbjde_Extension_constrained(graph, data.bold, Onsets,
                                                          Thrf=25., K=2, TR=1.,
                                                          beta=1.0, dt=.5,
                                                          NitMax=2, NitMin=2)

    @unittest.skipIf(not tools.is_importable('cvxpy'),
                     'cvxpy (optional dep) is N/A')
    def test_vem_bold_constrained_python(self):
        """ Test BOLD VEM constraint function.
        Estimation accuracy is not tested.
        """
        data = self.data_simu
        graph = data.get_graph()
        Onsets = data.get_joined_onsets()

        m_A, m_H, q_Z, sigma_epsilone, \
            mu_M, sigma_M, Beta, L, \
            PL = Main_vbjde_Python_constrained(graph, data.bold, Onsets,
                                               25., 2, 1., 1.0, .5,
                                               NitMax=2, NitMin=2)
Beispiel #6
0
class CmdParcellationTest(unittest.TestCase):

    def setUp(self):
        from pyhrf.ndarray import MRI3Daxes
        self.tmp_dir = pyhrf.get_tmp_path()

        self.p1 = np.array([[[1, 1, 1, 3],
                             [1, 1, 3, 3],
                             [0, 1, 2, 2],
                             [0, 2, 2, 2],
                             [0, 0, 2, 4]]], dtype=np.int32)

        self.p1_fn = op.join(self.tmp_dir, 'p1.nii')
        xndarray(self.p1, axes_names=MRI3Daxes).save(self.p1_fn)

        self.p2 = self.p1 * 4.5
        self.p2_fn = op.join(self.tmp_dir, 'p2.nii')
        xndarray(self.p2, axes_names=MRI3Daxes).save(self.p2_fn)

        self.mask = (self.p1 > 0).astype(np.int32)
        self.mask_fn = op.join(self.tmp_dir, 'mask.nii')
        xndarray(self.mask, axes_names=MRI3Daxes).save(self.mask_fn)

    def tearDown(self):
        shutil.rmtree(self.tmp_dir)

    @unittest.skipIf(not tools.is_importable('sklearn') or
                     not tools.is_importable('munkres'),
                     'scikit-learn or munkres (optional deps) is N/A')
    def test_ward_spatial_cmd(self):
        from pyhrf.parcellation import parcellation_dist

        output_file = op.join(self.tmp_dir, 'parcellation_output_test.nii')

        nparcels = 4
        cmd = 'pyhrf_parcellate_glm -m %s %s %s -o %s -v %d ' \
            '-n %d -t ward_spatial ' \
            % (self.mask_fn, self.p1_fn, self.p2_fn, output_file,
               logger.getEffectiveLevel(), nparcels)
        if os.system(cmd) != 0:
            raise Exception('"' + cmd + '" did not execute correctly')
        logger.info('cmd: %s', cmd)

        labels = xndarray.load(output_file).data
        logger.info('labels.dtype:%s', str(labels.dtype))
        dist = parcellation_dist(self.p1, labels)[0]
        logger.info('dist:%d', dist)
        self.assertEqual(dist, 0)

    @unittest.skipIf(not tools.is_importable('sklearn') or
                     not tools.is_importable('munkres'),
                     'scikit-learn or munkres (optional deps) is N/A')
    def test_ward_spatial_real_data(self):
        from pyhrf.glm import glm_nipy_from_files

        fn = 'subj0_parcellation.nii.gz'
        mask_file = pyhrf.get_data_file_name(fn)

        bold = 'subj0_bold_session0.nii.gz'
        bold_file = pyhrf.get_data_file_name(bold)

        paradigm_csv_file = pyhrf.get_data_file_name('paradigm_loc_av.csv')
        output_dir = self.tmp_dir
        output_file = op.join(output_dir,
                              'parcellation_output_test_real_data.nii')

        tr = 2.4
        bet = glm_nipy_from_files(bold_file, tr,
                                  paradigm_csv_file, output_dir,
                                  mask_file, session=0,
                                  contrasts=None,
                                  hrf_model='Canonical',
                                  drift_model='Cosine', hfcut=128,
                                  residuals_model='spherical',
                                  fit_method='ols', fir_delays=[0])[0]

        logger.info('betas_files: %s', ' '.join(bet))

        cmd = 'pyhrf_parcellate_glm -m %s %s -o %s -v %d -n %d '\
            '-t ward_spatial ' \
            % (mask_file, ' '.join(bet), output_file,
               logger.getEffectiveLevel(), 10)

        if os.system(cmd) != 0:
            raise Exception('"' + cmd + '" did not execute correctly')
        logger.info('cmd: %s', cmd)

    def test_voronoi_with_seeds(self):

        import os.path as op
        from pyhrf.ndarray import xndarray
        import pyhrf
        fn = 'subj0_parcellation.nii.gz'
        mask_file = pyhrf.get_data_file_name(fn)

        orientation = ['axial', 'coronal', 'sagittal']
        seeds = xndarray.xndarray_like(
            xndarray.load(mask_file)).reorient(orientation)

        seed_coords = np.array([[24, 35, 8],  # axial, coronal, sagittal
                                [27, 35, 5],
                                [27, 29, 46],
                                [31, 28, 46]])

        seeds.data[:] = 0
        seeds.data[tuple(seed_coords.T)] = 1

        seed_file = op.join(self.tmp_dir, 'voronoi_seeds.nii')
        seeds.save(seed_file, set_MRI_orientation=True)

        output_file = op.join(self.tmp_dir, 'voronoi_parcellation.nii')
        cmd = 'pyhrf_parcellate_spatial %s -m voronoi -c %s -o %s -v %d' \
            % (mask_file, seed_file, output_file, logger.getEffectiveLevel())

        if os.system(cmd) != 0:
            raise Exception('"' + cmd + '" did not execute correctly')

        logger.info('cmd: %s', cmd)

        assert op.exists(output_file)
        parcellation = xndarray.load(output_file)

        n_parcels = len(np.unique(parcellation.data)) - 1

        self.assertEqual(n_parcels, len(seed_coords))
Beispiel #7
0
class TreatmentTest(unittest.TestCase):

    def setUp(self):
        self.tmp_dir = pyhrf.get_tmp_path()

    def tearDown(self):
        shutil.rmtree(self.tmp_dir)

    def test_default_treatment(self):

        t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None)
        t.enable_draft_testing()
        t.run()

    @unittest.skipIf(not tools.is_importable('joblib'),
                     'joblib (optional dep) is N/A')
    def test_parallel_local(self):

        t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None)
        t.enable_draft_testing()
        t.run(parallel='local', n_jobs=2)

    def test_pickle_treatment(self):
        t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None)
        t.enable_draft_testing()
        cPickle.loads(cPickle.dumps(t))

    def test_sub_treatment(self):

        t = ptr.FMRITreatment(output_dir=self.tmp_dir)
        t.enable_draft_testing()
        sub_ts = t.split()
        for sub_t in sub_ts:
            sub_t.run()

    def test_jde_estim_from_treatment_pck(self):

        t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None)
        t.enable_draft_testing()
        sub_ts = t.split()
        sub_t_fn = op.join(self.tmp_dir, 'treatment.pck')
        fout = open(sub_t_fn, 'w')
        cPickle.dump(sub_ts[0], fout)
        fout.close()
        cmd = 'pyhrf_jde_estim  -t %s' % sub_t_fn
        if os.system(cmd) != 0:
            raise Exception('"' + cmd + '" did not execute correctly')

    @unittest.skipIf(not tools.is_importable('joblib'),
                     'joblib (optional dep) is N/A')
    def test_default_treatment_parallel_local(self):
        t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None)
        t.enable_draft_testing()
        t.run(parallel='local')

    @unittest.skipIf(not tools.is_importable('joblib'),
                     'joblib (optional dep) is N/A')
    def test_default_jde_cmd_parallel_local(self):
        t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None)
        t.enable_draft_testing()
        t_fn = op.join(self.tmp_dir, 'treatment.pck')
        fout = open(t_fn, 'w')
        cPickle.dump(t, fout)
        fout.close()
        cmd = 'pyhrf_jde_estim -t %s -x local' % t_fn
        if os.system(cmd) != 0:
            raise Exception('"' + cmd + '" did not execute correctly')

    def test_default_treatment_parallel_LAN(self):
        if cfg['parallel-LAN']['enable_unit_test'] == 1:
            t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None,
                                  output_dir=self.tmp_dir)
            t.enable_draft_testing()
            t.run(parallel='LAN')
        else:
            print 'LAN testing is off '\
                '([parallel-LAN][enable_unit_test] = 0 in ~/.pyhrf/config.cfg'

    def test_remote_dir_writable(self):
        if cfg['parallel-LAN']['enable_unit_test'] == 1:
            from pyhrf import grid
            lhosts = cfg['parallel-LAN']['hosts'].split(',')
            res = grid.remote_dir_is_writable(cfg['parallel-LAN']['user'],
                                              lhosts,
                                              cfg['parallel-LAN']['remote_path'])
            bad_hosts = [h for r, h in zip(res, cfg['parallel-LAN']['hosts'])
                         if r == 'no']

            if len(bad_hosts) > 0:
                raise Exception('Remote dir %s is not writable from the '
                                'following hosts:\n %s' % '\n'.join(bad_hosts))

        else:
            print 'LAN testing is off '\
                '([parallel-LAN][enable_unit_test] = 0 in config.cfg'

    def test_default_treatment_parallel_cluster(self):
        if cfg['parallel-cluster']['enable_unit_test'] == 1:
            t = ptr.FMRITreatment(make_outputs=False, result_dump_file=None,
                                  output_dir=self.tmp_dir)
            t.enable_draft_testing()
            t.run(parallel='cluster')
        else:
            print 'Cluster testing is off '\
                '([cluster-LAN][enable_unit_test] = 0 in config.cfg'
Beispiel #8
0
class SimulationTest(unittest.TestCase):
    def setUp(self):
        # called before any unit test of the class
        self.tmp_path = pyhrf.get_tmp_path()  # create a temporary folder
        self.clean_tmp = True

    def tearDown(self):
        # called after any unit test of the class
        if self.clean_tmp:
            logger.info('cleaning temporary folder ...')
            shutil.rmtree(self.tmp_path)

    @unittest.skipUnless(is_importable("PIL"), "Pillow (optional dep) is N/A")
    def test_simulate_asl_full_physio(self):

        r = phy.simulate_asl_full_physio()
        # let's just test the shapes of objects and the presence of some
        # physio-specific simulation items
        item_names = r.keys()
        self.assertIn('perf_stim_induced', item_names)
        self.assertIn('flow_induction', item_names)
        self.assertIn('perf_stim_induced', item_names)
        self.assertEqual(r['labels_vol'].shape, (3, 1, 2, 2))

    @unittest.skipUnless(is_importable("PIL"), "Pillow (optional dep) is N/A")
    def test_simulate_asl_full_physio_outputs(self):

        phy.simulate_asl_full_physio(self.tmp_path)

        def makefn(fn):
            return op.join(self.tmp_path, fn)

        self.assertTrue(op.exists(makefn('flow_induction.nii')))
        self.assertTrue(op.exists(makefn('neural_efficacies_audio.nii')))

    @unittest.skipUnless(is_importable("PIL"), "Pillow (optional dep) is N/A")
    def test_simulate_asl_physio_rfs(self):

        r = phy.simulate_asl_physio_rfs()
        # let's just test the shapes of objects and the presence of some
        # physio-specific simulation items:
        item_names = r.keys()
        self.assertIn('perf_stim_induced', item_names)
        self.assertIn('primary_brf', item_names)
        self.assertIn('perf_stim_induced', item_names)
        self.assertEqual(r['labels_vol'].shape, (3, 1, 2, 2))
        self.assertEqual(r['bold'].shape[1], 4)  # flat spatial axis

    def test_create_tbg_neural_efficacies(self):
        """ Test the generation of neural efficacies from a truncated
        bi-Gaussian mixture
        """
        np.random.seed(25432)
        m_act = 5.
        v_act = .05
        v_inact = .05
        cdef = [Condition(m_act=m_act, v_act=v_act, v_inact=v_inact)]
        npos = 5000
        labels = np.zeros((1, npos), dtype=int)
        labels[0, :npos / 2] = 1
        phy_params = phy.PHY_PARAMS_FRISTON00
        ne = phy.create_tbg_neural_efficacies(phy_params, cdef, labels)

        # check shape consistency:
        self.assertEqual(ne.shape, labels.shape)

        # check that moments are close to theoretical ones
        ne_act = ne[0, np.where(labels[0])]
        ne_inact = ne[0, np.where(labels[0] == 0)]
        m_act_theo = truncnorm.mean(0,
                                    phy_params['eps_max'],
                                    loc=m_act,
                                    scale=v_act**.5)
        v_act_theo = truncnorm.var(0,
                                   phy_params['eps_max'],
                                   loc=m_act,
                                   scale=v_act**.5)
        (ne_act.mean(), m_act_theo)
        npt.assert_approx_equal(ne_act.var(), v_act_theo, significant=2)

        m_inact_theo = truncnorm.mean(0,
                                      phy_params['eps_max'],
                                      loc=0.,
                                      scale=v_inact**.5)
        v_inact_theo = truncnorm.var(0,
                                     phy_params['eps_max'],
                                     loc=0.,
                                     scale=v_inact**.5)
        npt.assert_approx_equal(ne_inact.mean(), m_inact_theo, significant=2)
        npt.assert_approx_equal(ne_inact.var(), v_inact_theo, significant=2)
        npt.assert_array_less(ne, phy_params)
        npt.assert_array_less(0., ne)

    def test_create_physio_brf(self):
        phy_params = phy.PHY_PARAMS_FRISTON00
        dt = .5
        duration = 25.
        brf = phy.create_physio_brf(phy_params,
                                    response_dt=dt,
                                    response_duration=duration)

        if 0:
            import matplotlib.pyplot as plt
            t = np.arange(brf.size) * dt
            plt.plot(t, brf)
            plt.title('BRF')
            plt.show()

        npt.assert_approx_equal(brf[0], 0., significant=4)
        npt.assert_array_almost_equal(brf[-1], 0., decimal=4)

        npt.assert_approx_equal(np.argmax(brf) * dt, 3.5, significant=5)

    def test_create_physio_prf(self):

        phy_params = phy.PHY_PARAMS_FRISTON00
        dt = .5
        duration = 25.
        prf = phy.create_physio_prf(phy_params,
                                    response_dt=dt,
                                    response_duration=duration)

        if 0:
            import matplotlib.pyplot as plt
            t = np.arange(prf.size) * dt
            plt.plot(t, prf)
            plt.title('PRF')
            plt.show()

        npt.assert_approx_equal(prf[0], 0., significant=4)
        npt.assert_array_almost_equal(prf[-1], 0., decimal=4)

        npt.assert_approx_equal(np.argmax(prf) * dt, 2.5, significant=5)

    def test_create_evoked_physio_signal(self):
        import pyhrf.paradigm

        phy_params = phy.PHY_PARAMS_FRISTON00
        tr = 1.
        duration = 20.
        ne = np.array([[10., 5.]])
        nb_conds, nb_vox = ne.shape
        # one single stimulation at the begining
        paradigm = pyhrf.paradigm.Paradigm({'c': [np.array([0.])]}, [duration],
                                           {'c': [np.array([1.])]})
        s, f, hbr, cbv = phy.create_evoked_physio_signals(
            phy_params, paradigm, ne, tr)
        # shape of a signal: (nb_vox, nb_scans)

        if 0:
            import matplotlib.pyplot as plt
            t = np.arange(f[0].size) * tr
            plt.plot(t, f[0])
            plt.title('inflow')
            plt.show()

        self.assertEqual(s.shape,
                         (paradigm.get_rastered(tr)['c'][0].size, nb_vox))

        # check signal causality:
        self.assertEqual(f[0, 0], 1.)
        npt.assert_approx_equal(f[-1, 0], 1., significant=2)

        # non-regression test:
        self.assertEqual(np.argmax(f[:, 0]) * tr, 2)

    def test_phy_integrate_euler(self):
        phy_params = phy.PHY_PARAMS_FRISTON00
        tstep = .05
        nb_steps = 400
        stim_duration = int(1 / tstep)
        stim = np.array([1.] * stim_duration + [0.] *
                        (nb_steps - stim_duration))
        epsilon = .5

        s, f, q, v = phy.phy_integrate_euler(phy_params, tstep, stim, epsilon)

        # signal must be causal:
        self.assertEqual(f[0], 1.)
        npt.assert_approx_equal(f[-1], 1., significant=3)

        # non-regression checks:
        npt.assert_approx_equal(np.argmax(f) * tstep, 2.3)
        npt.assert_approx_equal(f.max(), 1.384, significant=4)

        if 0:
            import matplotlib.pyplot as plt
            t = np.arange(nb_steps) * tstep
            plt.plot(t, f)
            plt.title('inflow')
            plt.show()