Esempio n. 1
0
def rad_cell_dr(samplefile, radii, end_time, sample_size, percent, specList,
                          suppress=False, dr_species=['rad_cell', 1,1],
                          modelfiles = ['IFN_alpha_altSOCS_ppCompatible','IFN_beta_altSOCS_ppCompatible']):
    from pysb.export import export
    # Write modelfiles
    print("Importing models")
    alpha_model = __import__(modelfiles[0])
    py_output = export(alpha_model.model, 'python')
    with open('ODE_system_alpha.py','w') as f:
        f.write(py_output)
    beta_model = __import__(modelfiles[1])
    py_output = export(beta_model.model, 'python')
    with open('ODE_system_beta.py','w') as f:
        f.write(py_output)
    
    alpha_responses = [[] for i in range(len(specList))]
    beta_responses = [[] for i in range(len(specList))]
    for r in radii:
        courses = rad_cell_point(samplefile, r, end_time, sample_size, percent, specList, 
                                 suppress=True, dose_species=dr_species)
        # courses = [[IFNa spec 1], [IFNb spec 1], [IFNa spec 2], [IFNb spec 2]]
        #           [IFNa spec 1] = [[mean], [low], [high]]
        courses = [[l[-1] for l in s] for s in courses]
        # courses = [[mean dose, low, high]_IFNaS1, [mean dose, low, high]_IFNbS1, ...
        for i in range(len(specList)):
            alpha_responses[i].append(courses[i*2])
            beta_responses[i].append(courses[i*2+1])
    alpha_responses = [[[el[0] for el in alpha_responses[s]],[el[1] for el in alpha_responses[s]],[el[2] for el in alpha_responses[s]]] for s in range(len(alpha_responses))]
    beta_responses =  [[[el[0] for el in beta_responses[s]], [el[1] for el in beta_responses[s]], [el[2] for el in beta_responses[s]]] for s in range(len(alpha_responses))]
    return [alpha_responses, beta_responses]
def main():
    plt.close('all')
    t=linspace(0,3600,num=100)
    testDose = ['I',6.022e18*logspace(-14,-2,num=50)]
    # kSOCS
    yscan = 1e-6*logspace(-2,2,num=50)
    t=linspace(0,3600,num=500)
    # Write modelfile
    import IFN_simplified_model_alpha_ppCompatible as alpha_model
    py_output = export(alpha_model.model, 'python')
    writeFile = "ODE_system_alpha.py"
    with open(writeFile, 'w') as f:
        f.write(py_output)

    import IFN_simplified_model_beta_ppCompatible as beta_model
    py_output = export(beta_model.model, 'python')
    writeFile = "ODE_system_beta.py"
    with open(writeFile, 'w') as f:
        f.write(py_output)

    # Run scans
# =============================================================================
    k3k4_DRparamScan("IFN_simplified_model_alpha_ppCompatible", 'alpha', ['kSOCSon',yscan],
                            testDose, t, [['TotalpSTAT',"Total pSTAT"]],
                            Norm=10000, doseNorm=6.022e18)
Esempio n. 3
0
def main():
    plt.close('all')
    modelfiles = [
        "ifnmodels.IFN_detailed_model_alpha_ppCompatible",
        "ifnmodels.IFN_detailed_model_beta_ppCompatible"
    ]
    # Write modelfiles
    print("Importing models")
    alpha_model = __import__(modelfiles[0], fromlist=['ifnmodels'])
    py_output = export(alpha_model.model, 'python')
    with open('ODE_system_alpha.py', 'w') as f:
        f.write(py_output)
    beta_model = __import__(modelfiles[1], fromlist=['ifnmodels'])
    py_output = export(beta_model.model, 'python')
    with open('ODE_system_beta.py', 'w') as f:
        f.write(py_output)

    p0 = [['kpa', 1, 0.1, 'log'], ['kSTATbinding', 1E-6, 0.4, 'log'],
          ['kSOCSon', 1.70E-6, 0.1, 'log'], ['kd4', 0.87, 0.2, 'log'],
          ['k_d4', 0.86, 0.5, 'log'], ['delR', -1878, 500, 'linear'],
          ['meanR', 2000, 300, 'linear'], ['kloc', 1.23E-3, 0.1, 'log'],
          ['kSOCSmRNA', 1E-3, 0.1, 'log'], ['mRNAdeg', 5E-4, 0.1, 'log'],
          ['mRNAtrans', 1E-3, 0.1, 'log'], ['kSOCS', 5E-3, 0.1, 'log']]

    our_priors_dict = {
        'R1': [100, 12000, None, None],
        'R2': [100, 12000, None, None],
        'kpa': [1.5E-8, 10, np.log(1), 4],
        'kSOCSon': [1.5E-11, 0.07, np.log(1E-6), 4],
        'k_d4': [4E-5, 0.9, np.log(0.006), 1.8],
        'kd4': [0.002, 44, np.log(0.3), 1.8],
        'kSTATbinding': [1E-11, 1, np.log(1E-6), 4],
        'kloc': [1E-5, 10, np.log(1.25E-3), 4],
        'kSOCSmRNA': [1E-7, 10, np.log(1E-3), 4],
        'mRNAdeg': [5E-8, 10, np.log(5E-4), 4],
        'mRNAtrans': [1E-7, 10, np.log(1E-3), 4],
        'kSOCS': [5E-7, 10, np.log(5E-3), 4]
    }

    #   (n, theta_0, beta, rho, chains, burn_rate=0.1, down_sample=1, max_attempts=6,
    #    pflag=True, cpu=None, randomize=True)
    MCMC(1000,
         p0,
         our_priors_dict,
         2,
         1,
         5,
         burn_rate=0.2,
         down_sample=40,
         max_attempts=6)
Esempio n. 4
0
def export_memi(model, formats, version):
    for f in formats:
        model_export = export(model, f)
        extension = (f if f != 'pysb_flat' else 'py')
        fname = 'MEMI%s.%s' % (version, extension)
        with open(fname, 'wb') as fh:
            fh.write(model_export)
Esempio n. 5
0
def check_convert(model, format):
    """ Test exporters run without error """
    exported_file = None
    try:
        exported_file = export.export(model, format)
    except export.ExpressionsNotSupported:
        pass
    except export.CompartmentsNotSupported:
        pass
    except Exception as e:
        # Some example models are deliberately incomplete, so here we
        # will treat any of these "expected" exceptions as a success.
        model_base_name = model.name.rsplit('.', 1)[1]
        exception_class = expected_exceptions.get(model_base_name)
        if exception_class and isinstance(e, exception_class):
            pass
        else:
            raise

    if exported_file is not None:
        if format == 'python':
            # linspace arguments picked to avoid VODE warning
            exec(
                exported_file +
                'Model().simulate(tspan=numpy.linspace(0,1,501))\n',
                {'_use_inline': False})
        elif format == 'pysb_flat':
            exec(exported_file, {'__name__': model.name})
Esempio n. 6
0
def export_memi(model, formats, version):
    for f in formats:
        model_export = export(model, f)
        extension = (f if f != 'pysb_flat' else 'py')
        fname = 'MEMI%s.%s' % (version, extension)
        with open(fname, 'wb') as fh:
            fh.write(model_export)
Esempio n. 7
0
def export_pomi(model, formats):
    for f in formats:
        model_export = export(model, f)
        extension = (f if f != 'pysb_flat' else 'py')
        fname = 'POMI1.0.%s' % extension
        with open(fname, 'wt') as fh:
            fh.write(model_export)
Esempio n. 8
0
def export_pomi(model, formats):
    for f in formats:
        model_export = export(model, f)
        extension = (f if f != 'pysb_flat' else 'py')
        fname = 'POMI1.0.%s' % extension
        with open(fname, 'wt') as fh:
            fh.write(model_export)
Esempio n. 9
0
def export_hello(model, formats):
    for f in formats:
        model_export = export(model, f)
        extension = (f if f != 'pysb_flat' else 'py')
        fname = 'hello_indra_model.%s' % extension
        with open(fname, 'wb') as fh:
            fh.write(model_export.encode('utf-8'))
Esempio n. 10
0
def export_hello(model, formats):
    for f in formats:
        model_export = export(model, f)
        extension = (f if f != 'pysb_flat' else 'py')
        fname = 'hello_indra_model.%s' % extension
        with open(fname, 'wb') as fh:
            fh.write(model_export)
Esempio n. 11
0
def _prepare_kappa(model):
    """Return a Kappa STD with the model loaded."""
    import kappy
    kappa = kappy.KappaStd()
    model_str = export(model, 'kappa')
    kappa.add_model_string(model_str)
    kappa.project_parse()
    return kappa
Esempio n. 12
0
def _prepare_kappa(model):
    """Return a Kappa STD with the model loaded."""
    import kappy
    kappa = kappy.KappaStd()
    model_str = export(model, 'kappa')
    kappa.add_model_string(model_str)
    kappa.project_parse()
    return kappa
Esempio n. 13
0
def profile(processes):
    if processes[0] != 1:
        print("Must test serial time. Please ensure processes[0]==1")
        return 1
    test_priors = {
        'kpa': [1.5E-9, 1, np.log(1), 4],
        'kSOCSon': [1.5E-11, 0.07, np.log(1E-6), 4],
        'k_d4': [4E-5, 0.9, np.log(0.006), 1.8],
        'kd4': [0.002, 44, np.log(0.3), 1.8],
        'R1': [100, 12000, None, None],
        'R2': [100, 12000, None, None]
    }
    plt.close('all')
    modelfiles = [
        'ifnmodels.IFN_alpha_altSOCS_ppCompatible',
        'ifnmodels.IFN_beta_altSOCS_ppCompatible'
    ]
    # Write modelfiles
    alpha_model = __import__(modelfiles[0], fromlist=['ifnmodels'])
    py_output = export(alpha_model.model, 'python')
    with open('ODE_system_alpha.py', 'w') as f:
        f.write(py_output)
    beta_model = __import__(modelfiles[1], fromlist=['ifnmodels'])
    py_output = export(beta_model.model, 'python')
    with open('ODE_system_beta.py', 'w') as f:
        f.write(py_output)
    p0 = [['kpa', 1E-6, 0.1, 'log'], ['kSOCSon', 1E-6, 0.1, 'log'],
          ['kd4', 0.3, 0.2, 'log'], ['k_d4', 0.006, 0.5, 'log'],
          ['delR', 0, 500, 'linear'], ['meanR', 2000, 300, 'linear']]
    # ==========================================================
    times = []
    for p in processes:
        tic = time.clock()
        MCMC(50, p0, test_priors, 8, 1, 8, burn_rate=0.1, down_sample=2, cpu=p)
        toc = time.clock()
        times.append(toc - tic)
    fig, ax = plt.subplots()
    ax.scatter([1] + processes[1:],
               [1] + [times[0] / times[i] for i in range(1, len(times))],
               'b',
               markersize=64)
    ax.set_title('Profiling MCMC')
    ax.set_xlabel('Number of Threads')
    ax.set_ylabel('Speed up')
    plt.savefig('speedup.pdf')
    plt.show()
Esempio n. 14
0
def check_convert(model, format):
    """ Test exporters run without error """
    exported_file = None
    try:
        exported_file = export.export(model, format)
    except export.ExpressionsNotSupported:
        pass
    except export.CompartmentsNotSupported:
        pass
    except Exception as e:
        # Some example models are deliberately incomplete, so here we
        # will treat any of these "expected" exceptions as a success.
        exception_class = expected_exceptions.get(base_name(model))
        if not exception_class or not isinstance(e, exception_class):
            raise

    if exported_file is not None:
        if format == 'python':
            # linspace arguments picked to avoid VODE warning
            exec(exported_file + 'Model().simulate(tspan=numpy.linspace(0,1,501))\n', {'_use_inline': False})
        elif format == 'pysb_flat':
            exec(exported_file, {'__name__': model.name})
        elif format == 'sbml':
            # Skip the simulation comparison if roadrunner not available
            if roadrunner is None:
                raise SkipTest("SBML Simulation test skipped (requires roadrunner)")

            roadrunner.Logger.setLevel(roadrunner.Logger.LOG_ERROR)

            # Simulate SBML using roadrunner
            rr = roadrunner.RoadRunner(exported_file)
            rr.timeCourseSelections = \
                ['__s{}'.format(i) for i in range(len(model.species))] + \
                ['__obs{}'.format(i) for i in range(len(model.observables))]
            rr_result = rr.simulate(0, 10, 100)

            # Simulate original using PySB
            df = ScipyOdeSimulator(model).run(tspan=np.linspace(0, 10, 100)).dataframe

            # Compare species' trajectories
            for sp_idx in range(len(model.species)):
                rr_sp = rr_result[:, sp_idx]
                py_sp = df.iloc[:, sp_idx]
                is_close = np.allclose(rr_sp, py_sp, rtol=1e-4)
                if not is_close:
                    print(pd.DataFrame(dict(rr=rr_sp, pysb=py_sp)))
                    raise ValueError('Model {}, species __s{} trajectories do not match:'.format(
                        model.name, sp_idx))

            # Compare observables' trajectories
            for obs_idx in range(len(model.observables)):
                rr_obs = rr_result[:, obs_idx + len(model.species)]
                py_obs = df.iloc[:, obs_idx + len(model.species)]
                is_close = np.allclose(rr_obs, py_obs, rtol=1e-4)
                if not is_close:
                    print(pd.DataFrame(dict(rr=rr_obs, pysb=py_obs)))
                    raise ValueError('Model {}, observable__o{} "{}" trajectories do not match:'.format(
                        model.name, obs_idx, model.observables[obs_idx].name))
Esempio n. 15
0
def make_contact_map(pysb_model):
    """Return a Kappa contact map."""
    kappa = kappy.KappaStd()
    model_str = export(pysb_model, 'kappa')
    kappa.add_model_string(model_str)
    kappa.project_parse()
    cmap = kappa.analyses_contact_map()
    cm = cm_json_to_graph(cmap)
    return cm
Esempio n. 16
0
def make_contact_map(pysb_model):
    """Return a Kappa contact map."""
    kappa = kappy.KappaStd()
    model_str = export(pysb_model, 'kappa')
    kappa.add_model_string(model_str)
    kappa.project_parse()
    cmap = kappa.analyses_contact_map()
    cm = cm_json_to_graph(cmap)
    return cm
Esempio n. 17
0
 def build_model(self, name):
     if name == '':
         return None
     else:
         model_code = __import__('ifnmodels.' + name, fromlist=['ifnmodels'])
         py_output = export(model_code.model, 'python')
         ODE_filename = "ODE_system_{}_{}.py".format(time.strftime("%Y%m%d-%H%M%S"), randint(100000, 999999))
         with open(ODE_filename, 'w') as f:
             f.write(py_output)
         ODE_system = __import__(ODE_filename[:-3])
         model_obj = ODE_system.Model()
         os.remove(ODE_filename)
         return model_obj
Esempio n. 18
0
def make_influence_map(pysb_model):
    """Return a Kappa influence map."""
    kappa = kappy.KappaStd()
    model_str = export(pysb_model, 'kappa')
    kappa.add_model_string(model_str)
    kappa.project_parse()
    imap = kappa.analyses_influence_map()
    im = im_json_to_graph(imap)
    for param in pysb_model.parameters:
        try:
            im.remove_node(param.name)
        except:
            pass
    return im
Esempio n. 19
0
def make_influence_map(pysb_model):
    """Return a Kappa influence map."""
    kappa = kappy.KappaStd()
    model_str = export(pysb_model, 'kappa')
    kappa.add_model_string(model_str)
    kappa.project_parse()
    imap = kappa.analyses_influence_map()
    im = im_json_to_graph(imap)
    for param in pysb_model.parameters:
        try:
            im.remove_node(param.name)
        except:
            pass
    return im
Esempio n. 20
0
    def generate_im(self, model):
        """Return a graph representing the influence map generated by Kappa

        Parameters
        ----------
        model : pysb.Model
            The PySB model whose influence map is to be generated

        Returns
        -------
        graph : networkx.MultiDiGraph
            A MultiDiGraph representing the influence map
        """
        kappa = kappy.KappaStd()
        model_str = export.export(model, 'kappa')
        kappa.add_model_string(model_str)
        kappa.project_parse()
        imap = kappa.analyses_influence_map(accuracy='medium')
        graph = im_json_to_graph(imap)
        return graph
xmlfile = bnglfile.replace('.bngl', '.xml')
gdatfile = bnglfile.replace('.bngl', '.gdat')
rxnfile = bnglfile.replace('.bngl', '.rxns.tsv')
paramsfile = bnglfile.replace('.bngl', '.params.tsv')

# write all model parameters to a separate file
with open(paramsfile, 'w') as file:
    file.write('parameter\tvalue\n')
    for param in model.parameters:
        file.write(f'{param.name}\t{param.value}\n')
# compress the params file
sp.run(['gzip', '-f', paramsfile.split('/')[-1]], cwd=outdir)

# write BNGL file
with open(bnglfile, 'w') as file:
    file.write(export(model, 'bngl'))

# convert BNGL file to XML for NFSim input
sp.run(['BNG2.pl', '--xml', '--outdir', outdir, bnglfile])

# parameters for NFsim
equilibrium_time = 0  # seconds
tstop = str(1000000)  # seconds
maxcputime = str(100 * 60)  # seconds
osteps = str(100)  # number of samples
seed = str(111)  # random number initial seed
gml = str(1000000)  # max num of mol allowed in simulation
utl = '3'  # max number of bonds to traverse during simulation
network = '-connect'  # whether to infer reaction network connectivity

# print NFSim command
Esempio n. 22
0
def to_potterswheel(model, outfile):
    with open(outfile, 'w') as outfile:
        outfile.write(export(model, 'potterswheel'))
    return None
Esempio n. 23
0
 def save_model(self, name):
     file_text = export(self.model, 'bng_net')
     print(file_text)
     with open(name, 'w') as f:
         f.write(file_text)
Esempio n. 24
0
def to_bngnet(model, outfile):
    with open(outfile, 'w') as outfile:
        outfile.write(export(model, 'bng_net'))
    return None
Esempio n. 25
0
def IFN_2Dscan(modelfile, param1, param2, t_list, spec, custom_params=False,
                  cpu=None, doseNorm=1, suppress=False, verbose=1):
    # initialization
    jobs = Queue()
    result = JoinableQueue()
    if cpu == None or cpu >= cpu_count():
        NUMBER_OF_PROCESSES = cpu_count()-1
    else:
        NUMBER_OF_PROCESSES = cpu
    if verbose != 0: print("Using {} threads".format(NUMBER_OF_PROCESSES))
    # build task list
    params=[]
    if verbose != 0: print("Building tasks")
    if type(custom_params) == list:
        for val1 in param1[1]:
            for val2 in param2[1]:
                params.append([[param1[0],val1],[param2[0],val2]]+[c for c in custom_params])        
    else:
        for val1 in param1[1]:
            for val2 in param2[1]:
                params.append([[param1[0],val1],[param2[0],val2]])        

    # Write modelfile
    imported_model = __import__(modelfile,fromlist=['ifnmodels'])
    py_output = export(imported_model.model, 'python')
    with open('ODE_system.py','w') as f:
        f.write(py_output)
				
    tasks = [[modelfile, t_list, spec, p] for p in params]
    # put jobs on the queue
    if verbose != 0: print("There are {} tasks to compute".format(len(params)))
    if verbose != 0: print("Putting tasks on the queue")
	
    for w in tasks:
        jobs.put(w)
		
    if verbose != 0: print("Computing scan")
	
    # start up the workers
    [Process(target=IFN_2Dscan_helper, args=(i, jobs, result)).start()
            for i in range(NUMBER_OF_PROCESSES)]
    
    # pull in the results from each worker
    pool_results=[]
    for t in range(len(tasks)):
        r = result.get()
        pool_results.append(r)
        result.task_done()
    # tell the workers there are no more jobs
    for w in range(NUMBER_OF_PROCESSES):
        jobs.put(None)
    # close all extra threads
    result.join()
    jobs.close()
    result.close()
    if verbose != 0: print("Done scan")
    response_image = image_builder(pool_results, doseNorm, (len(param1[1]),len(param2[1])))
    # plot heatmap if suppress==False
    if suppress==False:
        	IFN_heatmap(response_image, "response image - {}".format(param1[0]), param2[0])
    #return the scan 
    return response_image
Esempio n. 26
0
Model()

import EGFR_to_RAS_dim as EGFR_to_RAS
EGFR_to_RAS.monomers()
EGFR_to_RAS.KRAS_activation()
EGFR_to_RAS.SOS_dephosphorylation()
EGFR_to_RAS.declare_observables()

import BRAF_module
BRAF_module.monomers()
BRAF_module.BRAF_dynamics()
BRAF_module.observables()

import MEK_ERK_phosphorylation
MEK_ERK_phosphorylation.monomers()
MEK_ERK_phosphorylation.by_BRAF_mut()
MEK_ERK_phosphorylation.MEK_phosphorylates_ERK()
MEK_ERK_phosphorylation.DUSP_phospatase()
MEK_ERK_phosphorylation.PP2A_phosphatase()
MEK_ERK_phosphorylation.ERK_feedback()
MEK_ERK_phosphorylation.MEK_inhibitor()
MEK_ERK_phosphorylation.declare_observables()


from pysb.export import export
matlab_output = export(model, 'matlab')

with open('matlab_files/run_timecourse_test.m', 'w') as f:
    f.write(matlab_output)
Esempio n. 27
0
def check_convert(model, format):
    """ Test exporters run without error """
    exported_file = None
    try:
        if format == 'json':
            exported_file = JsonExporter(model).export(include_netgen=True)
        else:
            exported_file = export.export(model, format)
    except export.ExpressionsNotSupported:
        pass
    except export.CompartmentsNotSupported:
        pass
    except export.LocalFunctionsNotSupported:
        pass
    except Exception as e:
        # Some example models are deliberately incomplete, so here we
        # will treat any of these "expected" exceptions as a success.
        exception_class = expected_exceptions.get(base_name(model))
        if not exception_class or not isinstance(e, exception_class):
            raise

    if exported_file is not None:
        if format == 'python':
            # linspace arguments picked to avoid VODE warning
            exec(
                exported_file +
                'Model().simulate(tspan=numpy.linspace(0,1,501))\n',
                {'_use_inline': False})
        elif format == 'pysb_flat':
            exec(exported_file, {'__name__': model.name})
        elif format == 'sbml':
            # Skip the simulation comparison if roadrunner not available
            if roadrunner is None:
                raise SkipTest(
                    "SBML Simulation test skipped (requires roadrunner)")

            roadrunner.Logger.setLevel(roadrunner.Logger.LOG_ERROR)

            # Simulate SBML using roadrunner
            rr = roadrunner.RoadRunner(exported_file)
            rr.timeCourseSelections = \
                ['__s{}'.format(i) for i in range(len(model.species))] + \
                ['__obs{}'.format(i) for i in range(len(model.observables))]
            rr_result = rr.simulate(0, 10, 100)

            # Simulate original using PySB
            df = ScipyOdeSimulator(model).run(
                tspan=np.linspace(0, 10, 100)).dataframe

            # Compare species' trajectories
            for sp_idx in range(len(model.species)):
                rr_sp = rr_result[:, sp_idx]
                py_sp = df.iloc[:, sp_idx]
                is_close = np.allclose(rr_sp, py_sp, rtol=1e-4)
                if not is_close:
                    print(pd.DataFrame(dict(rr=rr_sp, pysb=py_sp)))
                    raise ValueError(
                        'Model {}, species __s{} trajectories do not match:'.
                        format(model.name, sp_idx))

            # Compare observables' trajectories
            for obs_idx in range(len(model.observables)):
                rr_obs = rr_result[:, obs_idx + len(model.species)]
                py_obs = df.iloc[:, obs_idx + len(model.species)]
                is_close = np.allclose(rr_obs, py_obs, rtol=1e-4)
                if not is_close:
                    print(pd.DataFrame(dict(rr=rr_obs, pysb=py_obs)))
                    raise ValueError(
                        'Model {}, observable__o{} "{}" trajectories do not match:'
                        .format(model.name, obs_idx,
                                model.observables[obs_idx].name))
        elif format == 'json':
            # Round-trip the model by re-importing the JSON
            m = model_from_json(exported_file)
            # Check network generation and force RHS evaluation
            if model.name not in ('pysb.examples.tutorial_b',
                                  'pysb.examples.tutorial_c'):
                ScipyOdeSimulator(m, compiler='cython')
                if sys.version_info.major >= 3:
                    # Only check on Python 3 to avoid string-to-unicode encoding
                    # issues
                    check_model_against_component_list(m,
                                                       model.all_components())
        elif format == 'bngl':
            if model.name.endswith('tutorial_b') or \
                    model.name.endswith('tutorial_c'):
                # Models have no rules
                return
            with tempfile.NamedTemporaryFile(suffix='.bngl',
                                             delete=False) as tf:
                tf.write(exported_file.encode('utf8'))
                # Cannot have two simultaneous file handled on Windows
                tf.close()

                try:
                    m = model_from_bngl(tf.name)
                    # Generate network and force RHS evaluation
                    ScipyOdeSimulator(m, compiler='cython')
                finally:
                    os.unlink(tf.name)
Esempio n. 28
0
def to_pysb(model, outfile):
    with open(outfile, 'w') as outfile:
        outfile.write(export(model, 'pysb_flat'))
    return None
Esempio n. 29
0
def to_kappa(model, outfile):
    with open(outfile, 'w') as outfile:
        outfile.write(export(model, 'kappa'))
    return None
Esempio n. 30
0
def to_stochkit(model, outfile):
    with open(outfile, 'w') as outfile:
        outfile.write(export(model, 'stochkit'))
    return None
Esempio n. 31
0
# # Limited Internalization Sim
# posterior_filename = 'MCMC_Results-03-11-2018\\Reanalysis\\posterior_sample_reanalysis.csv'
# priors_dict={'R1':[100,12000,None,None],'R2':[100,12000,None,None],
#              'kSOCSon':[1.5E-11,0.07,np.log(1E-6),4],
#              'k_d4':[4E-5,0.9,np.log(0.006),1.8],
#              'krec_a1':[3E-7,3E-1,None,None],'krec_a2':[5E-6,5E0,None,None],
#              'krec_b1':[1E-7,1E-1,None,None],'krec_b2':[1E-6,1E0,None,None]}
# modelfiles = ['ifnmodels.IFN_alpha_altSOCS_Internalization_ppCompatible','ifnmodels.IFN_beta_altSOCS_Internalization_ppCompatible']
# =============================================================================


# Make sure modelfile is up to date
# Write modelfiles
from pysb.export import export
alpha_model = __import__(modelfiles[0],fromlist=['ifnmodels'])
py_output = export(alpha_model.model, 'python')
with open('ODE_system_alpha.py','w') as f:
    f.write(py_output)
beta_model = __import__(modelfiles[1],fromlist=['ifnmodels'])
py_output = export(beta_model.model, 'python')
with open('ODE_system_beta.py','w') as f:
    f.write(py_output)
    
# Global data import since this script will be used exclusively on IFN data    
IFN_exps = [ED.data.loc[(ED.data.loc[:,'Dose (pM)']==10) & (ED.data.loc[:,'Interferon']=="Alpha"),['0','5','15','30','60']].values[0],
            ED.data.loc[(ED.data.loc[:,'Dose (pM)']==10) & (ED.data.loc[:,'Interferon']=="Beta"),['0','5','15','30','60']].values[0],
            ED.data.loc[(ED.data.loc[:,'Dose (pM)']==90) & (ED.data.loc[:,'Interferon']=="Alpha"),['0','5','15','30','60']].values[0],
            ED.data.loc[(ED.data.loc[:,'Dose (pM)']==90) & (ED.data.loc[:,'Interferon']=="Beta"),['0','5','15','30','60']].values[0],
            ED.data.loc[(ED.data.loc[:,'Dose (pM)']==600) & (ED.data.loc[:,'Interferon']=="Alpha"),['0','5','15','30','60']].values[0],
            ED.data.loc[(ED.data.loc[:,'Dose (pM)']==600) & (ED.data.loc[:,'Interferon']=="Beta"),['0','5','15','30','60']].values[0]]
Esempio n. 32
0
def to_json(model, outfile):
    with open(outfile, 'w') as outfile:
        outfile.write(export(model, 'json'))
    return None
Esempio n. 33
0
def main():
    plt.close('all')
    #    modelfiles = ['ifnmodels.IFN_alpha_altSOCS_ppCompatible','ifnmodels.IFN_beta_altSOCS_ppCompatible']
    modelfiles = [
        'ifnmodels.IFN_alpha_altSOCS_Internalization_ppCompatible',
        'ifnmodels.IFN_beta_altSOCS_Internalization_ppCompatible'
    ]
    # Write modelfiles
    print("Importing models")
    alpha_model = __import__(modelfiles[0], fromlist=['ifnmodels'])
    py_output = export(alpha_model.model, 'python')
    with open('ODE_system_alpha.py', 'w') as f:
        f.write(py_output)
    beta_model = __import__(modelfiles[1], fromlist=['ifnmodels'])
    py_output = export(beta_model.model, 'python')
    with open('ODE_system_beta.py', 'w') as f:
        f.write(py_output)
# =============================================================================
#   altSOCS model:
#     p0=[['kpa',1.79E-5,0.1,'log'],['kSOCSon',1.70E-6,0.1,'log'],['kd4',0.87,0.2,'log'],
#         ['k_d4',0.86,0.5,'log'],['delR',-1878,500,'linear'],['meanR',2000,300,'linear']]
#
#     our_priors_dict={'R1':[100,12000,None,None],'R2':[100,12000,None,None],
#              'kpa':[1.5E-9,1,np.log(1),4],'kSOCSon':[1.5E-11,0.07,np.log(1E-6),4],
#              'k_d4':[4E-5,0.9,np.log(0.006),1.8],'kd4':[0.002,44,np.log(0.3),1.8]}
# =============================================================================

#   altSOCS model with internalization
    p0_int = [['kpa', 1.79E-5, 0.1, 'log'], ['kSOCSon', 1.70E-6, 0.2, 'log'],
              ['kd4', 0.87, 0.2, 'log'], ['k_d4', 0.86, 0.5, 'log'],
              ['delR', -1878, 500, 'linear'], ['meanR', 2000, 300, 'linear'],
              ['kIntBasal_r1', 1E-4, 0.1, 'log'],
              ['kIntBasal_r2', 2E-4, 0.1, 'log'],
              ['kint_IFN', 5E-4, 0.1, 'log'], ['krec_a1', 3E-4, 0.1, 'log'],
              ['krec_a2', 5E-3, 0.1, 'log'], ['krec_b1', 1E-4, 0.1, 'log'],
              ['krec_b2', 1E-3, 0.1, 'log']]

    int_priors_dict = {
        'R1': [100, 12000, None, None],
        'R2': [100, 12000, None, None],
        'kpa': [1.5E-9, 1, np.log(1), 4],
        'kSOCSon': [1.5E-11, 0.07, np.log(1E-6), 4],
        'k_d4': [4E-5, 0.9, np.log(0.006), 1.8],
        'kd4': [0.002, 44, np.log(0.3), 1.8],
        'kIntBasal_r1': [1E-7, 1E-1, None, None],
        'kIntBasal_r2': [2E-7, 2E-1, None, None],
        'kint_IFN': [5E-7, 5E-1, None, None],
        'krec_a1': [3E-7, 3E-1, None, None],
        'krec_a2': [5E-6, 5E0, None, None],
        'krec_b1': [1E-7, 1E-1, None, None],
        'krec_b2': [1E-6, 1E0, None, None]
    }

    #   (n, theta_0, beta, rho, chains, burn_rate=0.1, down_sample=1, max_attempts=6,
    #    pflag=True, cpu=None, randomize=True)
    MCMC(50,
         p0_int,
         int_priors_dict,
         5,
         1,
         3,
         burn_rate=0.0,
         down_sample=1,
         max_attempts=0)
Esempio n. 34
0
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 19 08:49:04 2018

@author: Duncan

Export to STAN
"""
from pysb.export import export
modelfiles = [
    'ifnmodels.IFN_alpha_altSOCS_Internalization_ppCompatible',
    'ifnmodels.IFN_beta_altSOCS_Internalization_ppCompatible'
]

alpha_model = __import__(modelfiles[0], fromlist=['ifnmodels'])
py_output = export(alpha_model.model, 'stan')
with open('STAN_alpha.stan', 'w') as f:
    f.write(py_output)
beta_model = __import__(modelfiles[1], fromlist=['ifnmodels'])
py_output = export(beta_model.model, 'stan')
with open('STAN_beta.stan', 'w') as f:
    f.write(py_output)
Esempio n. 35
0
def to_mathematica(model, outfile):
    with open(outfile, 'w') as outfile:
        outfile.write(export(model, 'mathematica'))
    return None
Esempio n. 36
0
def k3k4_DRparamScan(modelfile, typeIFN, param2, testDose, t_list, spec, 
                  custom_params=None, Norm=False, cpu=None, suppress=False, doseNorm=1):    
    # build k3 and k4 parameters, performing a sanity check at the same time
    k4scan=[]
    k3scan=[]
    if typeIFN=='alpha':
        k4scan = 0.3*logspace(-3,3,num=len(param2[1]))
        k3scan = 3E-4*logspace(-3,3,num=len(param2[1]))
    elif typeIFN=='beta':
        k4scan = 0.0006*logspace(-3,3,num=len(param2[1]))
        k3scan = 1.2e-5*logspace(-3,3,num=len(param2[1]))
    else:
        print("Expected type of interferon to be either alpha or beta")
        return 1

    # initialization
    jobs = Queue()
    result = JoinableQueue()
    if cpu == None or cpu >= cpu_count():
        NUMBER_OF_PROCESSES = cpu_count()-1
    else:
        NUMBER_OF_PROCESSES = cpu
    print("using {} processors".format(NUMBER_OF_PROCESSES))
    # build task list
    params=[]
    print("building tasks")
    if typeIFN=='alpha':
        if custom_params == None:
            for val1 in range(len(k4scan)):
                for val2 in param2[1]:
                    params.append([['kd3',k3scan[val1]],['kd4',k4scan[val1]],[param2[0],val2]])
        else:
            for val1 in range(len(k4scan)):
                for val2 in param2[1]:
                    params.append([['kd3',k3scan[val1]],['kd4',k4scan[val1]],[param2[0],val2]]+[c for c in custom_params])
    elif typeIFN=='beta':
        if custom_params == None:
            for val1 in range(len(k4scan)):
                for val2 in param2[1]:
                    params.append([['k_d3',k3scan[val1]],['k_d4',k4scan[val1]],[param2[0],val2]])
        else:
            for val1 in range(len(k4scan)):
                for val2 in param2[1]:
                    params.append([['k_d3',k3scan[val1]],['k_d4',k4scan[val1]],[param2[0],val2]]+[c for c in custom_params])

    # Write modelfile
    imported_model = __import__(modelfile)
    py_output = export(imported_model.model, 'python')
    with open('ODE_system.py','w') as f:
        f.write(py_output)
        
    # put jobs on the queue
    tasks = [[modelfile, testDose, t_list, spec, Norm, p] for p in params]
    print("There are {} tasks to compute".format(len(params)))
    print("putting tasks on the queue")
	
    for w in tasks:
        jobs.put(w)
		
    print("computing scan")
	
    # start up the workers
    [Process(target=p_DRparamScan_helper, args=(i, jobs, result)).start()
            for i in range(NUMBER_OF_PROCESSES)]
    
    # pull in the results from each worker
    pool_results=[]
    for t in range(len(tasks)):
        r = result.get()
        pool_results.append(r)
        result.task_done()
    # tell the workers there are no more jobs
    for w in range(NUMBER_OF_PROCESSES):
        jobs.put(None)
    # close all extra threads
    result.join()
    jobs.close()
    result.close()
    print("done scan")
    # plot heatmap if suppress==False
    if suppress==False:
        dose_image, response_image = image_builder(pool_results, doseNorm, (len(k4scan),len(param2[1])))
        
        if typeIFN=='alpha':
            IFN_heatmap(response_image, ['Response_EC50_alpha_k4',k4scan], param2)
            IFN_heatmap(dose_image, ['Dose_EC50_alpha_k4',k4scan], param2)
        else:
            IFN_heatmap(response_image, ['Response_EC50_beta_k4',k4scan], param2)
            IFN_heatmap(dose_image, ['Dose_EC50_beta_k4',k4scan], param2)
    #return the scan 
    return pool_results
Esempio n. 37
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""
from pysb.export import export
import numpy as np
import matplotlib.pyplot as plt
modelfiles = [
    'IFN_detailed_alt_SOCS_alpha_ppCompatible',
    'IFN_detailed_alt_SOCS_beta_ppCompatible'
]

alpha_model = __import__(modelfiles[0])
py_output = export(alpha_model.model, 'sbml')
with open('detailed_SBML_alpha.sbml', 'w') as f:
    f.write(py_output)
beta_model = __import__(modelfiles[1])
py_output = export(beta_model.model, 'sbml')
with open('detailed_SBML_beta.sbml', 'w') as f:
    f.write(py_output)

# =============================================================================
# import ODE_system_alpha as m
# mA = m.Model()
# alpha_parameters=[]
# for p in mA.parameters:
#     if p[0]=='kd4':
#         alpha_parameters.append(0.485)
#     elif p[0]=='kSOCSon':
Esempio n. 38
0
def check_convert(model, format):
    """ Test exporters run without error """
    exported_file = None
    try:
        exported_file = export.export(model, format)
    except export.ExpressionsNotSupported:
        pass
    except export.CompartmentsNotSupported:
        pass
    except export.LocalFunctionsNotSupported:
        pass
    except Exception as e:
        # Some example models are deliberately incomplete, so here we
        # will treat any of these "expected" exceptions as a success.
        exception_class = expected_exceptions.get(base_name(model))
        if not exception_class or not isinstance(e, exception_class):
            raise

    if exported_file is not None:
        if format == 'python':
            # linspace arguments picked to avoid VODE warning
            exec(
                exported_file +
                'Model().simulate(tspan=numpy.linspace(0,1,501))\n',
                {'_use_inline': False})
        elif format == 'pysb_flat':
            exec(exported_file, {'__name__': model.name})
        elif format == 'sbml':
            # Skip the simulation comparison if roadrunner not available
            if roadrunner is None:
                raise SkipTest(
                    "SBML Simulation test skipped (requires roadrunner)")

            roadrunner.Logger.setLevel(roadrunner.Logger.LOG_ERROR)

            # Simulate SBML using roadrunner
            rr = roadrunner.RoadRunner(exported_file)
            rr.timeCourseSelections = \
                ['__s{}'.format(i) for i in range(len(model.species))] + \
                ['__obs{}'.format(i) for i in range(len(model.observables))]
            rr_result = rr.simulate(0, 10, 100)

            # Simulate original using PySB
            df = ScipyOdeSimulator(model).run(
                tspan=np.linspace(0, 10, 100)).dataframe

            # Compare species' trajectories
            for sp_idx in range(len(model.species)):
                rr_sp = rr_result[:, sp_idx]
                py_sp = df.iloc[:, sp_idx]
                is_close = np.allclose(rr_sp, py_sp, rtol=1e-4)
                if not is_close:
                    print(pd.DataFrame(dict(rr=rr_sp, pysb=py_sp)))
                    raise ValueError(
                        'Model {}, species __s{} trajectories do not match:'.
                        format(model.name, sp_idx))

            # Compare observables' trajectories
            for obs_idx in range(len(model.observables)):
                rr_obs = rr_result[:, obs_idx + len(model.species)]
                py_obs = df.iloc[:, obs_idx + len(model.species)]
                is_close = np.allclose(rr_obs, py_obs, rtol=1e-4)
                if not is_close:
                    print(pd.DataFrame(dict(rr=rr_obs, pysb=py_obs)))
                    raise ValueError(
                        'Model {}, observable__o{} "{}" trajectories do not match:'
                        .format(model.name, obs_idx,
                                model.observables[obs_idx].name))