Esempio n. 1
0
    def test_optimize_imports(self):
        from neurolib.optimize.evolution import Evolution

        evolution = Evolution(evalFunction=(lambda f: f), parameterSpace=self.pars)

        from neurolib.optimize.exploration import BoxSearch

        search = BoxSearch(evalFunction=(lambda f: f), parameterSpace=self.pars)
Esempio n. 2
0
    def setUpClass(cls):
        ds = Dataset("hcp")
        model = FHNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
        model.params.duration = 10 * 1000  # ms
        model.params.dt = 0.05
        model.params.bold = True
        parameters = ParameterSpace(
            {
                "x_ext": [
                    np.ones((model.params["N"], )) * a
                    for a in np.linspace(0, 2, 2)
                ],
                "K_gl":
                np.linspace(0, 2, 2),
                "coupling": ["additive", "diffusive"],
            },
            kind="grid",
        )
        search = BoxSearch(
            model=model,
            parameterSpace=parameters,
            filename=f"test_exploration_utils_{randomString(20)}.hdf")

        search.run(chunkwise=True, bold=True)

        search.loadResults()
        # flatten x_ext parameter
        search.dfResults.x_ext = [a[0] for a in list(search.dfResults.x_ext)]

        cls.model = model
        cls.search = search
        cls.ds = ds
Esempio n. 3
0
    def setUpClass(cls):
        # def test_brain_network_postprocessing(self):
        ds = Dataset("hcp")
        model = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
        # Resting state fits
        model.params["mue_ext_mean"] = 1.57
        model.params["mui_ext_mean"] = 1.6
        model.params["sigma_ou"] = 0.09
        model.params["b"] = 5.0
        model.params["signalV"] = 2
        model.params["dt"] = 0.2
        model.params["duration"] = 0.2 * 60 * 1000

        # multi stage evaluation function
        def evaluateSimulation(traj):
            model = search.getModelFromTraj(traj)
            model.randomICs()
            model.params["dt"] = 0.2
            model.params["duration"] = 4 * 1000.0
            model.run(bold=True)

            result_dict = {"outputs": model.outputs}

            search.saveToPypet(result_dict, traj)

        # define and run exploration
        parameters = ParameterSpace({
            "mue_ext_mean": np.linspace(0, 3, 2),
            "mui_ext_mean": np.linspace(0, 3, 2)
        })
        search = BoxSearch(
            evalFunction=evaluateSimulation,
            model=model,
            parameterSpace=parameters,
            filename=f"test_brain_postprocessing_{randomString(20)}.hdf",
        )
        search.run()
        cls.model = model
        cls.search = search
        cls.ds = ds
    def setUpClass(cls):
        ds = Dataset("hcp")
        model = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
        model.params.duration = 11 * 1000  # ms
        model.params.dt = 0.2
        parameters = ParameterSpace(
            {
                "mue_ext_mean": np.linspace(0, 3, 2),
                "mui_ext_mean": np.linspace(0, 3, 2),
                "b": [0.0, 10.0],
            },
            kind="grid",
        )
        search = BoxSearch(
            model=model,
            parameterSpace=parameters,
            filename=f"test_exploration_utils_{randomString(20)}.hdf")

        search.run(chunkwise=True, bold=True)

        search.loadResults()

        cls.model = model
        cls.search = search
        cls.ds = ds
Esempio n. 5
0
    def test_multimodel_explore(self):
        start = time.time()

        DELAY = 13.0
        fhn_net = FitzHughNagumoNetwork(np.random.rand(2, 2), np.array([[0.0, DELAY], [DELAY, 0.0]]))
        model = MultiModel(fhn_net)
        parameters = ParameterSpace({"*noise*sigma": [0.0, 0.05], "*epsilon*": [0.5, 0.6]}, allow_star_notation=True)
        search = BoxSearch(model, parameters, filename="test_multimodel.hdf")
        search.run()
        search.loadResults()
        dataarray = search.xr()
        self.assertTrue(isinstance(dataarray, xr.DataArray))
        self.assertTrue(isinstance(dataarray.attrs, dict))
        self.assertListEqual(list(dataarray.attrs.keys()), list(parameters.dict().keys()))

        end = time.time()
        logging.info("\t > Done in {:.2f} s".format(end - start))
Esempio n. 6
0
    def test_single_node(self):
        logging.info("\t > BoxSearch: Testing ALN single node ...")
        start = time.time()

        aln = ALNModel()
        parameters = {
            "mue_ext_mean": np.linspace(0, 3, 2).tolist(),
            "mui_ext_mean": np.linspace(0, 3, 2).tolist()
        }
        search = BoxSearch(aln, parameters)
        search.initializeExploration()
        search.run()
        search.loadResults()

        for i in search.dfResults.index:
            search.dfResults.loc[i, "max_r"] = np.max(
                search.results[i]["rates_exc"][:,
                                               -int(1000 / aln.params["dt"]):])

        end = time.time()
        logging.info("\t > Done in {:.2f} s".format(end - start))
Esempio n. 7
0
    def test_single_node(self):
        start = time.time()

        model = ALNModel()
        parameters = ParameterSpace({"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)})
        search = BoxSearch(model, parameters, filename="test_single_nodes.hdf")
        search.run()
        search.loadResults()
        dataarray = search.xr()
        self.assertTrue(isinstance(dataarray, xr.DataArray))
        self.assertFalse(dataarray.attrs)

        for i in search.dfResults.index:
            search.dfResults.loc[i, "max_r"] = np.max(
                search.results[i]["rates_exc"][:, -int(1000 / model.params["dt"]) :]
            )

        end = time.time()
        logging.info("\t > Done in {:.2f} s".format(end - start))
Esempio n. 8
0
    def test_circle_exploration(self):
        def explore_me(traj):
            pars = search.getParametersFromTraj(traj)
            # let's calculate the distance to a circle
            computation_result = abs((pars["x"] ** 2 + pars["y"] ** 2) - 1)
            result_dict = {"distance": computation_result}
            search.saveOutputsToPypet(result_dict, traj)

        parameters = ParameterSpace({"x": np.linspace(-2, 2, 2), "y": np.linspace(-2, 2, 2)})
        search = BoxSearch(evalFunction=explore_me, parameterSpace=parameters, filename="test_circle_exploration.hdf")
        search.run()
        search.loadResults(pypetShortNames=False)

        for i in search.dfResults.index:
            search.dfResults.loc[i, "distance"] = search.results[i]["distance"]

        search.dfResults
Esempio n. 9
0
    def test_circle_exploration(self):
        def explore_me(traj):
            pars = search.getParametersFromTraj(traj)
            # let's calculate the distance to a circle
            computation_result = abs((pars["x"] ** 2 + pars["y"] ** 2) - 1)
            result_dict = {"scalar_result": computation_result, "list_result": [1, 2, 3, 4], "array_result": np.ones(3)}
            search.saveToPypet(result_dict, traj)

        parameters = ParameterSpace({"x": np.linspace(-2, 2, 2), "y": np.linspace(-2, 2, 2)})
        search = BoxSearch(evalFunction=explore_me, parameterSpace=parameters, filename="test_circle_exploration.hdf")
        search.run()
        search.loadResults(pypetShortNames=False)

        # call the result dataframe
        search.dfResults

        # test integrity of dataframe
        for i in search.dfResults.index:
            self.assertEqual(search.dfResults.loc[i, "scalar_result"], search.results[i]["scalar_result"])
            self.assertListEqual(search.dfResults.loc[i, "list_result"], search.results[i]["list_result"])
            np.testing.assert_array_equal(search.dfResults.loc[i, "array_result"], search.results[i]["array_result"])
Esempio n. 10
0
    def test_fhn_brain_network_exploration(self):
        ds = Dataset("hcp")
        model = FHNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
        model.params.duration = 10 * 1000  # ms
        model.params.dt = 0.2
        model.params.bold = True
        parameters = ParameterSpace(
            {
                "x_ext": [np.ones((model.params["N"],)) * a for a in np.linspace(0, 2, 2)],
                "K_gl": np.linspace(0, 2, 2),
                "coupling": ["additive", "diffusive"],
            },
            kind="grid",
        )
        search = BoxSearch(model=model, parameterSpace=parameters, filename="test_fhn_brain_network_exploration.hdf")

        search.run(chunkwise=True, bold=True)

        pu.getTrajectorynamesInFile(os.path.join(paths.HDF_DIR, "test_fhn_brain_network_exploration.hdf"))
        search.loadDfResults()
        search.getRun(0, pypetShortNames=True)
        search.getRun(0, pypetShortNames=False)
        search.loadResults()
Esempio n. 11
0
    def test_brain_network(self):
        from neurolib.utils.loadData import Dataset

        ds = Dataset("hcp")
        aln = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat, bold=True)
        # Resting state fits
        aln.params["mue_ext_mean"] = 1.57
        aln.params["mui_ext_mean"] = 1.6
        aln.params["sigma_ou"] = 0.09
        aln.params["b"] = 5.0
        aln.params["signalV"] = 2
        aln.params["dt"] = 0.2
        aln.params["duration"] = 0.2 * 60 * 1000

        # multi stage evaluation function
        def evaluateSimulation(traj):
            model = search.getModelFromTraj(traj)
            defaultDuration = model.params["duration"]
            invalid_result = {"fc": [0] * len(ds.BOLDs)}

            # -------- stage wise simulation --------

            # Stage 1 : simulate for a few seconds to see if there is any activity
            # ---------------------------------------
            model.params["dt"] = 0.1
            model.params["duration"] = 3 * 1000.0
            model.run()

            # check if stage 1 was successful
            if np.max(model.rates_exc[:, model.t > 500]) > 300 or np.max(
                    model.rates_exc[:, model.t > 500]) < 10:
                search.saveOutputsToPypet(invalid_result, traj)
                return invalid_result, {}

            # Stage 2: simulate BOLD for a few seconds to see if it moves
            # ---------------------------------------
            model.params["dt"] = 0.2
            model.params["duration"] = 20 * 1000.0
            model.run(bold=True)

            if np.std(model.BOLD.BOLD[:, 5:10]) < 0.001:
                search.saveOutputsToPypet(invalid_result, traj)
                return invalid_result, {}

            # Stage 3: full and final simulation
            # ---------------------------------------
            model.params["dt"] = 0.2
            model.params["duration"] = defaultDuration
            model.run()

            # -------- evaluation here --------

            scores = []
            for i, fc in enumerate(ds.FCs):  # range(len(ds.FCs)):
                fc_score = func.matrix_correlation(
                    func.fc(model.BOLD.BOLD[:, 5:]), fc)
                scores.append(fc_score)

            meanScore = np.mean(scores)
            result_dict = {"fc": meanScore}

            search.saveOutputsToPypet(result_dict, traj)

        # define and run exploration
        parameters = ParameterSpace({
            "mue_ext_mean": np.linspace(0, 3, 2),
            "mui_ext_mean": np.linspace(0, 3, 2)
        })
        search = BoxSearch(evalFunction=evaluateSimulation,
                           model=aln,
                           parameterSpace=parameters)
        search.run()
Esempio n. 12
0
def param_search(model, parameters, fname='scz_sleep.hdf', run=True):
    def evaluateSimulation(traj):
        model = search.getModelFromTraj(traj)
        # initiate the model with random initial contitions
        model.randomICs()
        if hasattr(model, "model.j_values_list"):
            jei = model.params["Jei_max"]
            jie = model.params["Jie_max"]
            jii = model.params["Jii_max"]
            if [jie, jei, jii] not in model.j_values_list:
                result = {
                    "max_output": np.nan,
                    "max_amp_output": np.nan,
                    "domfr": np.nan,
                    "up_down_difference": np.nan,
                    "normalized_down_lengths": np.nan,
                    "normalized_down_lengths_mean": np.nan,
                    "normalized_up_lengths_mean": np.nan,
                    "n_local_waves": np.nan,
                    "perc_local_waves": np.nan,
                    "n_global_waves": np.nan,
                    "all_SWS": np.nan,
                    "SWS_per_min": np.nan,
                    "local_waves_isi": np.nan,
                    "global_waves_isi": np.nan,
                    "frontal_normalized_down_lengths": np.nan,
                    "frontal_normalized_down_lengths_mean": np.nan,
                    "frontalnormalized_up_lengths_mean": np.nan,
                    "frontal_n_local_waves": np.nan,
                    "frontal_perc_local_waves": np.nan,
                    "frontal_all_SWS": np.nan,
                    "frontal_SWS_per_min": np.nan,
                    "frontal_n_global_waves": np.nan,
                    "frontal_local_waves_isi": np.nan,
                    "frontal_global_waves_isi": np.nan
                }
                search.saveToPypet(result, traj)
                return
        defaultDuration = model.params['duration']

        # -------- stage wise simulation --------

        # Stage 3: full and final simulation
        # ---------------------------------------
        model.params['duration'] = defaultDuration

        rect_stimulus = construct_stimulus(stim="rect",
                                           duration=model.params.duration,
                                           dt=model.params.dt)
        model.params['ext_exc_current'] = rect_stimulus * 5.0

        model.run()

        # up down difference
        state_length = 2000
        # last_state = (model.t > defaultDuration - state_length)
        # time period in ms where we expect the down-state
        down_window = ((defaultDuration / 2 - state_length < model.t)
                       & (model.t < defaultDuration / 2))
        # and up state
        up_window = ((defaultDuration - state_length < model.t)
                     & (model.t < defaultDuration))
        up_state_rate = np.mean(model.output[:, up_window], axis=1)
        down_state_rate = np.mean(model.output[:, down_window], axis=1)
        up_down_difference = np.max(up_state_rate - down_state_rate)

        # check rates!
        max_amp_output = np.max(
            np.max(model.output[:, up_window], axis=1) -
            np.min(model.output[:, up_window], axis=1))
        max_output = np.max(model.output[:, up_window])

        model_frs, model_pwrs = func.getMeanPowerSpectrum(
            model.output, dt=model.params.dt, maxfr=40, spectrum_windowsize=10)
        model_frs, model_pwrs = func.getMeanPowerSpectrum(
            model.output[:, up_window],
            dt=model.params.dt,
            maxfr=40,
            spectrum_windowsize=5)
        domfr = model_frs[np.argmax(model_pwrs)]

        # -------- SWS analysis all nodes --------
        (normalized_down_lengths, n_local_waves, n_global_waves,
         loca_waves_isi, global_waves_isi) = sws_analysis(model.output, model)

        # -------- SWS analysis frontal nodes --------
        frontal_lobe_nodes = [i - 1 for i in CORTICAL_REGIONS["frontal_lobe"]]
        (frontal_normalized_down_lengths, frontal_n_local_waves,
         frontal_n_global_waves, frontal_loca_waves_isi,
         frontal_global_waves_isi) = sws_analysis(
             model.output[frontal_lobe_nodes, :], model)

        result = {
            "max_output":
            max_output,
            "max_amp_output":
            max_amp_output,
            "domfr":
            domfr,
            "up_down_difference":
            up_down_difference,
            "normalized_down_lengths":
            normalized_down_lengths,
            "normalized_down_lengths_mean":
            np.mean(normalized_down_lengths),
            "normalized_up_lengths_mean":
            100 - np.mean(normalized_down_lengths),
            "n_local_waves":
            n_local_waves,
            "perc_local_waves":
            (n_local_waves * 100 / (n_local_waves + n_global_waves + 1)),
            "n_global_waves":
            n_global_waves,
            "all_SWS":
            n_local_waves + n_global_waves,
            "SWS_per_min": (n_local_waves + n_global_waves) * 3,
            "local_waves_isi":
            np.mean(loca_waves_isi),
            "global_waves_isi":
            np.mean(global_waves_isi),
            "frontal_normalized_down_lengths":
            frontal_normalized_down_lengths,
            "frontal_normalized_down_lengths_mean":
            np.mean(frontal_normalized_down_lengths),
            "frontalnormalized_up_lengths_mean":
            100 - np.mean(frontal_normalized_down_lengths),
            "frontal_n_local_waves":
            frontal_n_local_waves,
            "frontal_perc_local_waves":
            (frontal_n_local_waves * 100 /
             (frontal_n_local_waves + frontal_n_global_waves + 1)),
            "frontal_all_SWS":
            frontal_n_local_waves + frontal_n_global_waves,
            "frontal_SWS_per_min":
            (frontal_n_local_waves + frontal_n_global_waves) * 3,
            "frontal_n_global_waves":
            frontal_n_global_waves,
            "frontal_local_waves_isi":
            np.mean(frontal_loca_waves_isi),
            "frontal_global_waves_isi":
            np.mean(frontal_global_waves_isi)
        }
        search.saveToPypet(result, traj)
        return

    search = BoxSearch(evalFunction=evaluateSimulation,
                       model=model,
                       parameterSpace=parameters,
                       filename=fname)
    if run:
        search.run()
    return search