Esempio n. 1
0
 def test_parse_parameter_estimation_data(self):
     """
     :return:
     """
     data = viz.Parse(self.pe).data
     expected = [4, 6]
     actual = list(data['test_model'].shape)
Esempio n. 2
0
    def test_multiple_models(self):
        """
        I had a bug that made all models have the same
        parameter estimation data. This is the test I used
        to fix the bug
        Returns:

        """
        data = viz.Parse(self.pe)
        self.assertNotEqual(
            float(data['first']['k1']),
            float(data['second']['k1']),
        )
Esempio n. 3
0
 def test_compute_x_odd(self):
     with tasks.ParameterEstimation.Context(self.pe_mod,
                                            self.fname,
                                            context='pl',
                                            parameters='g') as context:
         context.set('method', 'nl2sol')
         context.set('tolerance', 1e-1)
         context.set('iteration_limit', 5)
         context.set('run_mode', True)
         context.set('pe_number', 10)
         config = context.get_config()
     pl = tasks.ParameterEstimation(config)
     data = viz.Parse(pl).data
     p = viz.PlotProfileLikelihoods(self.pe_mod, pl, 1.2)
     print(p.compute_x())
Esempio n. 4
0
 def test_parse(self):
     with tasks.ParameterEstimation.Context(self.pe_mod,
                                            self.fname,
                                            context='pl',
                                            parameters='g') as context:
         context.set('method', 'hooke_jeeves')
         context.set('tolerance', 1e-1)
         context.set('iteration_limit', 5)
         context.set('run_mode', True)
         context.set('pe_number', 10)
         config = context.get_config()
     pe = tasks.ParameterEstimation(config)
     data = viz.Parse(pe).data['A2B']
     expected = (10, 5)
     actual = data.shape
     self.assertEqual(expected, actual)
Esempio n. 5
0
    def test(self):
        """
        Returns:

        """
        with tasks.ParameterEstimation.Context(self.model,
                                               self.fname,
                                               context='pl',
                                               parameters='g') as context:
            context.set('method', 'hooke_jeeves')
            context.set('run_mode', True)
            config = context.get_config()
        pe = tasks.ParameterEstimation(config)
        expected = 11
        actual = viz.Parse(pe)['A2B'].shape[0]
        self.assertEqual(expected, actual)
Esempio n. 6
0
    def test_run(self):
        """
        Returns:

        """
        with tasks.ParameterEstimation.Context(self.pe_mod,
                                               self.fname,
                                               context='pl',
                                               parameters='g') as context:
            context.set('method', 'nl2sol')
            context.set('run_mode', True)
            context.set('pe_number', 12)
            config = context.get_config()
        pe = tasks.ParameterEstimation(config)
        expected = 12
        data = viz.Parse(pe)['A2B']
        actual = data.shape[0]
        self.assertEqual(expected, actual)
Esempio n. 7
0
    def setUp(self):
        super(ProfileLikelihoodTests, self).setUp()
        self.fname = os.path.join(os.path.dirname(__file__), 'timecourse.txt')
        self.data = self.model.simulate(0, 10, 1, report_name=self.fname)

        with tasks.ParameterEstimation.Context(self.model,
                                               self.fname,
                                               context='s',
                                               parameters='g') as context:
            context.set('method', 'hooke_jeeves')
            context.set('run_mode', True)
            context.set('randomize_start_values', True)
            config = context.get_config()
        self.pe = tasks.ParameterEstimation(config)
        data = viz.Parse(self.pe).data
        self.rss = data.loc[0, 'RSS']
        self.pe_mod = self.pe.models['test_model'].model
        self.pe_mod.insert_parameters(df=data, index=0, inplace=True)
Esempio n. 8
0
 def test_compute_x(self):
     with tasks.ParameterEstimation.Context(self.pe_mod,
                                            self.fname,
                                            context='pl',
                                            parameters='g') as context:
         context.set('method', 'nl2sol')
         context.set('tolerance', 1e-1)
         context.set('iteration_limit', 5)
         context.set('run_mode', True)
         context.set('pe_number', 10)
         config = context.get_config()
     pl = tasks.ParameterEstimation(config)
     data = viz.Parse(pl).data
     p = viz.PlotProfileLikelihoods(self.pe_mod, pl, 1.2)
     x = p.compute_x()
     expected = (10, 5)
     actual = x.shape
     self.assertEqual(expected, actual)
Esempio n. 9
0
    def setUp(self):
        super(TruncateDataTests, self).setUp()

        fname = os.path.join(os.path.dirname(__file__), 'report1.txt')
        self.model.simulate(0, 50, 1, report_name=fname)

        with tasks.ParameterEstimation.Context(
                self.model, fname, context='s', parameters='g',
        ) as context:
            context.set('method', 'genetic_algorithm')
            context.set('population_size', 2)
            context.set('number_of_generations', 5)
            context.set('copy_number', 2)
            context.set('pe_number', 2)
            context.set('run_mode', True)
            config = context.get_config()

        self.pe = tasks.ParameterEstimation(config)
        self.data = viz.Parse(self.pe).data
Esempio n. 10
0
    def setUp(self):
        super(ProfileLikelihoodTests, self).setUp()
        ant_str = """
        model new_model
            R1: A -> B ; _k1*A;
            R2: B -> A; k2*B;
            R3: C -> D; _k3*C*B;
            R4: D -> C; k4*D;
            
            A = 100;
            B = 0;
            _k1=0.1;
            k2 = 0.01
            _k3 = 0.01
            k4 = 1
        end
        """
        self.copasi_file = os.path.join(os.path.dirname(__file__),
                                        'test_model.cps')
        self.model = model.loada(ant_str, self.copasi_file)
        self.fname = os.path.join(os.path.dirname(__file__), 'timecourse.txt')
        self.data = self.model.simulate(0, 10, 1, report_name=self.fname)

        with tasks.ParameterEstimation.Context(self.model,
                                               self.fname,
                                               context='s',
                                               parameters='g') as context:
            context.set('method', 'hooke_jeeves')
            context.set('run_mode', True)
            context.set('prefix', '_')
            context.set('randomize_start_values', True)
            config = context.get_config()
        self.pe = tasks.ParameterEstimation(config)
        data = viz.Parse(self.pe).data['test_model']
        self.rss = data.loc[0, 'RSS']
        self.pe_mod = self.pe.models['test_model'].model
        self.pe_mod.insert_parameters(df=data, index=0, inplace=True)
Esempio n. 11
0
            context.set('pl_lower_bound', 1000)
            context.set('pl_upper_bound', 1000)
            #context.set('population_size', 50)
            #context.set('number_of_generations', 300)
            context.set('run_mode', True)  ##defaults to False
            context.set('pe_number', 3)  ## number of repeat items in scan task
            #context.set('copy_number', 2)  ## number of times to copy model
            #context.set('problem', 'Problem1')
            #context.set('fit', 3)
            #context.set('prefix', 'k')
        config = context.get_config()

        pe = tasks.ParameterEstimation(config)

        #myPctMod.open()
        data = viz.Parse(pe)
        print(data)

    myExMod = model.loada(model_string_ex, copasi_ex_filename)

    def my_add_cols(inFName, outFName, sState, i1State, i2State):
        myData = pd.read_csv(
            os.path.join(os.path.dirname(working_directory), 'data', inFName))
        myData.insert(len(myData.columns), 'S_indep', sState)
        myData.insert(len(myData.columns), 'I1_indep', i1State)
        myData.insert(len(myData.columns), 'I2_indep', i2State)
        myData.rename(columns={
            "pN": "Np",
            "pG": "Gp",
            "pD": "Dp",
            "pK": "Kp"
Esempio n. 12
0
# Example SLURM job script for serial (non-parallel) jobs
#
#
# Tell SLURM if you want to be emailed when your job starts, ends, etc.
# Currently mail can only be sent to addresses @ncl.ac.uk
#
#SBATCH --mail-type=ALL
#SBATCH [email protected]
#


"""

if rocket:
    shellString = shellString + "CopasiSE " + copasi_filename
    shellPath = os.path.join(working_dir, "scratch1part2.sh")
    f = open(shellPath, 'w')
    f.write(shellString)
    f.close()
    os.system("sbatch " + shellPath)
else:
    os.system("CopasiSE " + copasi_filename)

while True:
    try:
        myTable = viz.Parse(theTimeCourse)
        break
    except:
        continue

print(myTable)
Esempio n. 13
0
"""

copasi_file = os.path.join(working_directory, 'example_model.cps')

## build model
mod = model.loada(antimony_string, copasi_file)

assert isinstance(mod, model.Model)

## simulate some data, returns a pandas.DataFrame
data = mod.simulate(0, 20, 1)

## write data to file
experiment_filename = os.path.join(working_directory, 'experiment_data.txt')
data.to_csv(experiment_filename)

with tasks.ParameterEstimation.Context(
    mod, experiment_filename,
    context='pl', parameters='gm'
) as context:
    context.set('method', 'hooke_jeeves')
    context.set('pl_lower_bound', 1000)
    context.set('pl_upper_bound', 1000)
    context.set('pe_number', 25) # number of steps in each profile likelihood
    context.set('run_mode', True)
    config = context.get_config()
    
myPE = tasks.ParameterEstimation(config)

data = viz.Parse(myPE).data