Example #1
0
 def test_autogenerate_sbml_from_folder(self):
     #Test if the function is correctly taking in Config files from folder 
     #and outputting to Tempdir
     inputpath = (Path.cwd()/'ConfigSBML')
     output_path_auto = os.path.join(output_path, 'ConfigSBML')
         
     files = [f for f in glob.glob(os.path.join(inputpath,"**/*.ini"), recursive=True)]
     
     for file in files:
         core_model_test = mh.from_config(file)
         system_type = core_model_test['system_type']
         settings_name = 'Setting_test1'
         mh.delete(system_type)
         sh.delete(system_type=system_type, settings_name=settings_name)
         assert system_type not in (mh.list_models()), 'Model was not deleted'
         assert system_type not in (sh.list_settings()), 'settings was not deleted'
         
     sbmlgen.autogenerate_sbml_from_folder(inputpath, output_path_auto)
     
     for outputfile in files:
         placeholder = outputfile.replace(str(inputpath), '')
         placeholder = placeholder.replace('.ini', '')
         placeholder = placeholder.replace('\\', '')
         filename = os.path.join(output_path_auto, 'DatabasetoSBML_' + placeholder +'.xml')
         print('This is the file name:', filename)
         print()
         assert os.path.exists(filename) == True, "File did not output properly" 
 def test_from_config_fail_1(self):
     global config_data
     global user_core_models
     global settings_file_2
     
     user_core_models = [mh.from_config(filename) for filename in model_files]
     user_core_models = {core_model['system_type']: core_model for core_model in user_core_models}
     
     #Read settings
     filename    = settings_file_2
     config_data = sc.from_config(filename)
     
     assert type(config_data) == dict
Example #3
0
    def test_from_config_fail_1(self):
        global config_data
        global user_core_models
        global model_files
        global settings_file_2

        user_core_models = [
            mh.from_config(filename) for filename in model_files
        ]
        user_core_models = {
            core_model['system_type']: core_model
            for core_model in user_core_models
        }

        #Read settings
        #Must raise Exception here
        filename = settings_file_2
        config_data = sm.from_config(filename)
Example #4
0
 def test_config_to_sbml(self):
     #Test if files are output correctly
     filelist = ['TestModel_LogicGate_ORgate_DelayActivation_DelayActivation.ini',
                 'TestModel_CellModel_CellularResources_ProteomeAllocation_RibosomeLimitation.ini']
     
     for file in filelist:
         core_model_test = mh.from_config(file)
         system_type = core_model_test['system_type']
         settings_name = 'Setting_test1'
         mh.delete(system_type)
         sh.delete(system_type=system_type, settings_name=settings_name)
         assert system_type not in (mh.list_models()), 'Model was not deleted'
         assert system_type not in (sh.list_settings()), 'settings was not deleted'
         
     sbmlgen.config_to_sbml(filelist, output_path)
         
     for outputfile in filelist:
         placeholder = outputfile.replace('.ini', '')
         filename = os.path.join(output_path, 'DatabasetoSBML_' + placeholder +'.xml')
         assert os.path.exists(filename) == True, "File did not output properly"
Example #5
0
    The steps automatically convert the information in the .ini files into arguments
    that can be fed directly into BMSS functions. The settings files for different types
    of analysis are all very similar. This allows you to copy and paste sections as 
    appropriate.
    '''
    '''
    1. Reading the Arguments File
    '''

    filename = 'settings_sim_1.ini'
    config_data = sm.from_config(filename)
    '''
    2. Compiling Arguments
    '''

    core_model = mh.from_config('TestModel_Dummy.ini')
    user_core_models = {core_model['system_type']: core_model}

    models, params, config_data = sm.get_models_and_params(
        filename, user_core_models=user_core_models)
    '''
    Models is a dictionary in the form {model_num: compiled model}.
    A compiled model is a dict containing the model function to be integrated 
    as well as other information required for integration, analysis and plotting.
    The keys are as follows:
    'function' : The model function.
    'init'     : A dict of initial values for each scenario you wish to simulate.
    'states'   : A list of names of the state variables in order. Required for plotting.
    'params'   : A list of names of the parameters that will be looked up from params 
                  that we defined earlier.
    'tspan'    : A list of arrays corresponding to time segments in order. 
Example #6
0
if __name__ == '__main__':
    '''
    In this example, we want to characterize a NOT gate system. We want to know 
    if our candidate models will be fully identifiable based on the states 
    measured. This can achieved using the strike-GOLDD algorithm.
    '''
    #Set up core models and sampler arguments
    #Details in Tutorial 7 Part 1
    model_files = [
        'LogicGate_Not_Single.ini',
        'LogicGate_Not_Double.ini',
        'LogicGate_Not_Double_MaturationSecond.ini',
    ]

    user_core_models = [mh.from_config(filename) for filename in model_files]
    user_core_models = {
        core_model['system_type']: core_model
        for core_model in user_core_models
    }

    sg_args, config_data, variables = ssg.get_strike_goldd_args(
        model_files, user_core_models=user_core_models)
    '''
    The optional argument dst allows you to supply your own dictionary to which the
    results will be added after each iteration. This allows you to thread and/or
    save the results before all the iterations have been completed. Just use an
    empty dictionary for dst.
    '''
    #Run strike-goldd algorithm
    #Details in Tutorial 7 Part 2
    model_files = [
        'LogicGate_OR_Double_Delay_Degrade_ResCompete.ini',
        'LogicGate_OR_Double_Delay_Delay_ResCompete.ini',
        'LogicGate_OR_Double_Degrade_Delay_ResCompete.ini',
        'LogicGate_OR_Double_Delay_Degrade.ini',
        'LogicGate_OR_Double_Delay_Delay.ini',
        'LogicGate_OR_Double_Degrade_Delay.ini',
        'LogicGate_OR_Double_Delay.ini',
        'LogicGate_OR_Double_DelayInput2.ini',
        'LogicGate_OR_Double_Degrade.ini',
        'LogicGate_OR_Double_DegradeInput2.ini',
        'LogicGate_OR_Double.ini',
    ]

    #List of model dicts
    core_models_list = [mh.from_config(filename) for filename in model_files]
    print(core_models_list)

    #Nested dict with system_type as first key to store the model dict
    user_core_models = {
        core_model['system_type']: core_model
        for core_model in core_models_list
    }
    print('\n\n', user_core_models)
    '''Run simulation using the same configuration file (run only the first model).
    The steps are as follows:
        1. prepare configuration .ini file (aside from core model information)
            - init, parameter_values, tspan 
        2. get_models_and_params
        3. update models argument with modify_params or/and modify_init if any
        4. integrate models
    if segment == 1:
        new_params[-2] = 1
        new_params[-1] = 1
    else:
        new_params[-2] = 0
        new_params[-1] = 0

    return new_params


if __name__ == '__main__':

    #Set up core models and sampler arguments
    filename = 'CellModel_CellularResources_ProteomeAllocation_RibosomeLimitation.ini'

    core_model = mh.from_config(filename)
    user_core_models = {core_model['system_type']: core_model}

    print('core_model:\n', core_model)

    #Get argument for simulation
    models, params, config_data = sm.get_models_and_params(
        filename, user_core_models=user_core_models)
    print('\nModels:\n', models)
    print('\nparams:\n', params)

    models[1]['int_args']['modify_params'] = modify_params
    print('\nUpdated params:\n', params)

    #Integrate the models numerically
    ym, em = sim.integrate_models(models, params)
    
    Use this method when:
        1. You are not sure if you want to save the model yet
        2. You expect to reformulate the model as you go along
    
    Note: 
        1. A .py file containing a function for numerical integration will be
          generated by get_models_and_params. Do not delete or rename the file 
          while your code is running.
        2. Details on core_model data structure are in Tutorial 1 Part 1
        3. Details on storing core_model in the database are in Tutorial 1 Part 2
        4. Details on the data structures models and params are in Tutorial 2 Part 1
    '''
    #Read model
    #Details in Tutorial 1 Part 1
    core_model = mh.from_config('testmodel.ini')
    user_core_models = {core_model['system_type']: core_model}

    #Read settings
    #Details in Tutorial 2 Part 1
    filename = 'settings_sim_1.ini'
    config_data = sm.from_config(filename)

    #Get arguments for simulation
    models, params, config_data = sm.get_models_and_params(
        filename, user_core_models=user_core_models)
    '''
    The steps for analyzing a model in the database are as follows:
        1. Call the function BMSS.models.setup_sim.get_models_and_params
    
    Use this method when:
 of analysis are all very similar. This allows you to copy and paste sections as 
 appropriate.
 '''
 
 '''
 1. Reading the Settings File
 '''
 
 filename    = 'settings_sen.ini'
 config_data = ss.from_config(filename)
 
 '''
 2. Compiling Arguments
 '''
 
 core_model                    = mh.from_config('Monod_Constitutive_Single_ProductInhibition.ini')
 user_core_models              = {core_model['system_type']: core_model}
 sensitivity_args, config_data = ss.get_sensitivity_args(config_data, user_core_models=user_core_models)
 
 print('Keys in sensitivity_args: ')
 print(sensitivity_args.keys())
     
 '''
 3. Wrapping for Models in Database
 
 For models already in the database, we can combine the above steps into a single 
 function call.
 '''
 
 new_sensitivity_args, new_config_data = ss.get_sensitivity_args(filename)
 
Example #11
0
    def test_from_config(self):
        global core_model_2

        core_model_2 = mh.from_config('pyTestModel.ini')
    of analysis are all very similar. This allows you to copy and paste sections as 
    appropriate.
    '''
    '''
    1. Reading the Settings File
    '''
    filename = 'settings_sg.ini'
    config_data = ssg.from_config(filename)
    '''
    Note: In order to speed up calculations, provide parameter vaues for 
    fixed_parameters where possible.
    '''
    '''
    2. Compiling Arguments
    '''
    core_model = mh.from_config('Monod_Inducible.ini')

    user_core_models = {core_model['system_type']: core_model}

    sg_args, config_data, variables = ssg.get_strike_goldd_args(
        filename, user_core_models=user_core_models, write_file=False)

    print('Keys in sg_args[1]: ')
    print(sg_args[1].keys())
    print()
    '''
    sg_args is a dictionary in the form {model_num: model} where a model contains
    information on the system of equations, the number of unknown parameters and
    other arguments required for running the strike-goldd algorithm.
    
    The keys are as follows:
Example #13
0
    The steps automatically convert the information in the .ini files into arguments
    that can be fed directly into BMSS functions. The settings files for different types
    of analysis are all very similar. This allows you to copy and paste sections as 
    appropriate.
    '''
    '''
    1. Reading the Settings File
    '''

    filename = 'settings_sa.ini'
    config_data = sc.from_config(filename, sampler='sa')
    '''
    2. Compiling Arguments
    '''

    core_model = mh.from_config('Monod_Constitutive_Single.ini')
    user_core_models = {core_model['system_type']: core_model}
    sampler_args, config_data = sc.get_sampler_args(
        config_data, sampler='sa', user_core_models=user_core_models)
    '''
    3. Wrapping for Models in Database
    
    For models already in the database, we can combine the above steps into a single 
    function call.
    '''

    new_sampler_args, new_config_data = sc.get_sampler_args(filename,
                                                            sampler='sa')
    '''
    Note that possible values for sampler are:
        'sa' : simulated annealing