def __init__(self, initialArgument, inputQ, outputQ, isException=False, logger=Logger()): super().__init__(initialArgument, inputQ, outputQ, logger=Logger()) self.isException = isException
def _testApi(self, method, logLevel): if IGNORE_TEST: return logger = Logger(toFile=LOG_PATH, logLevel=logLevel) stmt = "logger.%s(MSG)" % method exec(stmt) line1s = self._checkMsg(MSG) # logger = Logger(toFile=LOG_PATH, logLevel=0) stmt = "logger.%s(MSG)" % method exec(stmt) line2s = self.read() self.assertEqual(len(line1s), len(line2s))
def mkParameters( cls, parametersToFit: list, logger: Logger = Logger(), parameterLowerBound: float = cn.PARAMETER_LOWER_BOUND, parameterUpperBound: float = cn.PARAMETER_UPPER_BOUND ) -> lmfit.Parameters: """ Constructs lmfit parameters based on specifications. Parameters ---------- parametersToFit: list-Parameter/list-str logger: error logger parameterLowerBound: lower value of range for parameters parameterUpperBound: upper value of range for parameters Returns ------- lmfit.Parameters """ if len(parametersToFit) == 0: raise RuntimeError("Must specify at least one parameter.") if logger is None: logger = logger() # Process each parameter elements = [] for element in parametersToFit: # Get the lower bound, upper bound, and initial value for the parameter if not isinstance(element, SBstoat.Parameter): element = SBstoat.Parameter(element, lower=parameterLowerBound, upper=parameterUpperBound) elements.append(element) return SBstoat.Parameter.mkParameters(elements)
def __init__(self, function, initialParams, methods, logger=None, isCollect=False): """ Parameters ---------- function: Funtion Arguments lmfit.parameters isInitialze (bool). True on first call the isGetBest (bool). True to retrieve best parameters returns residuals (if bool arguments are false) initialParams: lmfit.parameters methods: list-_helpers.OptimizerMethod isCollect: bool Collects performance statistcs """ self._function = function self._methods = methods self._initialParams = initialParams self._isCollect = isCollect self.logger = logger if self.logger is None: self.logger = Logger() # Outputs self.performanceStats = [] # list of performance results self.qualityStats = [] # relative rssq self.params = None self.minimizerResult = None self.rssq = None
def __init__(self, modelFitters, modelNames=None, modelWeights=None, fitterMethods=None, numRestart=0, isParallel=False, logger=Logger()): """ Parameters ---------- modelFitters: list-modelFiter modelWeights: list-float how models are weighted in least squares modelNames: list-str fitterMethods: list-optimization methods numRestart: int number of times the minimization is restarted with random initial values for parameters to fit. isParallel: bool runs each fitter in parallel logger: Logger Raises ------ ValueError: len(modelNames) == len(modelFitters) """ self.numModel = len(modelFitters) self.modelWeights = modelWeights if self.modelWeights is None: self.modelWeights = np.repeat(1, self.numModel) self.modelNames = modelNames if self.modelNames is None: self.modelNames = [str(v) for v in range(len(modelSpecifications))] self._numRestart = numRestart self._isParallel = isParallel self.logger = logger # Validation checks if self.numModel != len(self.modelNames): msg = "Length of modelNames must be same as number of modelFitters." raise ValueError(msg) # self.fitterDct = {n: f for n, f in zip(modelNames, modelFitters)} # Construct tha parameters for each model self.parametersCollection = [f.params for f in self.fitterDct.values()] self.parameterManager = _ParameterManager(self.modelNames, self.parametersCollection) self._fitterMethods = ModelFitter.makeMethods( fitterMethods, cn.METHOD_FITTER_DEFAULTS) # Residuals calculations self.residualsServers = [ ResidualsServer(f, None, None, logger=self.logger) for f in self.fitterDct.values() ] self.manager = None # Results self.optimizer = None
def testNoLogPerformance(self): if IGNORE_TEST: return logger = Logger(toFile=LOG_PATH, logPerformance=False, logLevel=logs.LEVEL_MAX) guid = logger.startBlock(BLOCK1) self.assertEqual(len(self.logger.blockDct), 0) logger.endBlock(guid) self.assertEqual(len(self.logger.blockDct), 0)
def test(numBlock, sleepTime): logger = Logger(logPerformance=True) for idx in range(numBlock): block = "blk_%d" % idx guid = logger.startBlock(block) time.sleep(sleepTime) logger.endBlock(guid) df = logger.performanceDF self.assertLess(np.abs(sleepTime - df["mean"].mean()), sleepTime) self.assertEqual(df["count"].mean(), 1.0)
def testBug(self): if IGNORE_TEST: return runner = Runner(firstModel=607, numModel=1, useExistingData=False, figPath=FIG_PATH, pclPath=PCL_PATH, isPlot=IS_PLOT, logger=Logger()) runner.run()
def testWolfBug(self): if IGNORE_TEST: return fullDct = { #"J1_n": (1, 1, 8), # 4 #"J4_kp": (3600, 36000, 150000), #76411 #"J5_k": (10, 10, 160), # 80 #"J6_k": (1, 1, 10), # 9.7 "J9_k": (1, 50, 50), # 28 } for parameter in fullDct.keys(): logger = Logger(logLevel=LEVEL_MAX) logger = Logger() ts = NamedTimeseries(csvPath=WOLF_DATA) parameterDct = {parameter: fullDct[parameter]} fitter = ModelFitter(WOLF_MODEL, ts[0:100], parameterDct=parameterDct, logger=logger, fitterMethods=[ "differential_evolution", "leastsq"]) fitter.fitModel() self.assertTrue("J9_k" in fitter.reportFit())
def __init__(self, fitter, inputQ, outputQ, logger=Logger()): """ Parameters ---------- fitter: ModelFitter cannot have swig objects (e.g., roadrunner) inputQ: multiprocessing.queue outputQ: multiprocessing.queue logger: Logger """ super().__init__(fitter, inputQ, outputQ, logger=logger) self.fitter = fitter
def __init__(self, firstModel: int = 210, numModel: int = 2, pclPath=PCL_FILE, figPath=FIG_PATH, useExistingData: bool = False, reportInterval: int = 10, isPlot=IS_PLOT, **kwargDct): """ Parameters ---------- firstModel: first model to use numModel: number of models to use pclPath: file to which results are saved reportInterval: how frequently report progress useExistingData: use data in existing PCL file """ self.useExistingData = useExistingData and os.path.isfile(pclPath) # Recover previously saved results if desired if self.useExistingData: self.restore(pclPath=pclPath) else: # Initialize based on type of context variable for name in CONTEXT: if name[-1:] == "s": self.__setattr__(name, []) elif name[-3:] == "Dct": self.__setattr__(name, {}) elif name[-4:] == "Path": self.__setattr__(name, None) elif name[0:2] == "is": self.__setattr__(name, False) else: self.__setattr__(name, 0) # Initialize to parameters for this instantiation self.firstModel = firstModel self.numModel = numModel self.pclPath = pclPath self.figPath = figPath self.reportInterval = reportInterval self.kwargDct = kwargDct self.isPlot = isPlot self.useExistingData = useExistingData # if LOGGER in kwargDct.keys(): self.logger = kwargDct[LOGGER] else: self.logger = Logger() kwargDct[LOGGER] = self.logger self.save()
def __init__(self, initialArgument, inputQ, outputQ, logger=Logger()): """ Parameters ---------- initialArgument: Object used by RunFunction inputQ: multiprocessing.queue outputQ: multiprocessing.queue logger: Logger """ self.initialArgument = initialArgument multiprocessing.Process.__init__(self) self.inputQ = inputQ self.outputQ = outputQ self.logger = logger
def setupModel(cls, roadrunner, parameters, logger=Logger()): """ Sets up the model for use based on the parameter parameters Parameters ---------- roadrunner: ExtendedRoadRunner parameters: lmfit.Parameters logger Logger """ pp = parameters.valuesdict() for parameter in pp.keys(): try: roadrunner.model[parameter] = pp[parameter] except Exception as err: msg = "_modelFitterCore.setupModel: Could not set value for %s" \ % parameter logger.error(msg, err)
def __init__(self, cls, initialArguments, logger=Logger(), **kwargs): """ Parameters ---------- cls: AbstractServer initialArguments: list kwargs: dict optional arguments for cls constructor """ self.cls = cls self.logger = logger self.numProcess = len(initialArguments) # Create the servers self.inputQs = [multiprocessing.Queue() for _ in range(self.numProcess)] self.outputQs = [multiprocessing.Queue() for _ in range(self.numProcess)] self.servers = [self.cls(initialArguments[i], self.inputQs[i], self.outputQs[i], logger=self.logger, **kwargs) for i in range(self.numProcess)] _ = [s.start() for s in self.servers]
def __init__(self, runnerArgument): """ Parameters ---------- runnerArgument: RunnerArgument Notes ----- 1. Uses METHOD_LEASTSQ for fitModel iterations. """ super().__init__() # self.lastErr = "" self.fitter = runnerArgument.fitter self.numIteration = runnerArgument.numIteration self.kwargs = runnerArgument.kwargs self.synthesizerClass = runnerArgument.synthesizerClass if "logger" in self.fitter.__dict__.keys(): self.logger = self.fitter.logger else: self.logger = Logger() self._isDone = not self._fitInitial() self.columns = self.fitter.selectedColumns # Initializations for bootstrap loop if not self.isDone: fittedTS = self.fitter.fittedTS.subsetColumns(self.columns, isCopy=False) self.synthesizer = self.synthesizerClass( observedTS=self.fitter.observedTS.subsetColumns(self.columns, isCopy=False), fittedTS=fittedTS, **self.kwargs) self.numSuccessIteration = 0 if self.fitter.minimizerResult is None: self.fitter.fitModel() self.baseChisq = self.fitter.minimizerResult.redchi self.curIteration = 0 self.fd = self.logger.getFileDescriptor() self.baseFittedStatistic = TimeseriesStatistic( self.fitter.observedTS.subsetColumns( self.fitter.selectedColumns, isCopy=False))
def mkSuiteFitter(modelSpecifications, datasets, parametersCol, modelNames=None, modelWeights=None, fitterMethods=None, numRestart=0, isParallel=False, logger=Logger(), **kwargs): """ Constructs a SuiteFitterCore with fitters that have similar structure. Parameters ---------- modelSpecifications: list-modelSpecification as in ModelFitter datasets: list-observedData as in ModelFitter paramersCol: list-parametersToFit as in ModelFitter modelNames: list-str modelWeights: list-float how models are weighted in least squares fitterMethods: list-optimization methods numRestart: int number of times the minimization is restarted with random initial values for parameters to fit. isParallel: bool run fits in parallel for each fitter logger: Logger kwargs: dict keyword arguments for ModelFitter Returns ------- SuiteFitter """ modelFitters = [] for modelSpecification, dataset, parametersToFit in \ zip(modelSpecifications, datasets, parametersCol): modelFitter = ModelFitter(modelSpecification, dataset, parametersToFit=parametersToFit, logger=logger, **kwargs) modelFitters.append(modelFitter) return SuiteFitter(modelFitters, modelNames=modelNames, modelWeights=modelWeights, fitterMethods=fitterMethods, numRestart=numRestart, isParallel=isParallel, logger=logger)
def runSimulationNumpy( cls, parameters=None, modelSpecification=None, startTime=0, endTime=5, numPoint=30, selectedColumns=None, _logger=Logger(), _loggerPrefix="", ): """ Runs a simulation. Defaults to parameter values in the simulation. Returns a NamedArray Parameters ---------- modelSpecification: ExtendedRoadRunner/str Roadrunner model parameters: lmfit.Parameters lmfit parameters startTime: float start time for the simulation endTime: float end time for the simulation numPoint: int number of points in the simulation selectedColumns: list-str output columns in simulation _logger: Logger _loggerPrefix: str Return ------ NamedArray (or None if fail to converge) """ roadrunnerModel = modelSpecification if isinstance(modelSpecification, str): roadrunnerModel = cls.initializeRoadrunnerModel(roadrunnerModel) else: roadrunnerModel.reset() if parameters is not None: # Parameters have been specified cls.setupModel(roadrunnerModel, parameters, logger=_logger) # Do the simulation if selectedColumns is not None: newSelectedColumns = list(selectedColumns) if TIME not in newSelectedColumns: newSelectedColumns.insert(0, TIME) try: dataArr = roadrunnerModel.simulate(startTime, endTime, numPoint, newSelectedColumns) except Exception as err: _logger.error("Roadrunner exception: ", err) dataArr = None else: try: dataArr = roadrunnerModel.simulate(startTime, endTime, numPoint) except Exception as err: _logger.exception("Roadrunner exception: %s" % err) dataArr = None return dataArr
def mkParameters( cls, parameterDct: dict = None, parametersToFit: list = None, logger: Logger = Logger(), lowerBound: float = PARAMETER_LOWER_BOUND, upperBound: float = PARAMETER_UPPER_BOUND) -> lmfit.Parameters: """ Constructs lmfit parameters based on specifications. Parameters ---------- parameterDct: key=name, value=ParameterSpecification parametersToFit: list of parameters to fit logger: error logger lowerBound: lower value of range for parameters upperBound: upper value of range for parameters Returns ------- lmfit.Parameters """ def get(value, base_value, multiplier): if value is not None: return value return base_value * multiplier # if (parametersToFit is None) and (parameterDct is None): raise RuntimeError("Must specify one of these parameters.") if parameterDct is None: parameterDct = {} if parametersToFit is None: parametersToFit = parameterDct.keys() if logger is None: logger = logger() params = lmfit.Parameters() for parameterName in parametersToFit: if parameterName in parameterDct.keys(): specification = parameterDct[parameterName] value = get(specification.value, specification.value, 1.0) if value > 0: lower_factor = LOWER_PARAMETER_MULT upper_factor = UPPER_PARAMETER_MULT else: upper_factor = UPPER_PARAMETER_MULT lower_factor = LOWER_PARAMETER_MULT lower = get(specification.lower, specification.value, lower_factor) upper = get(specification.upper, specification.value, upper_factor) if np.isclose(lower - upper, 0): upper = 0.0001 try: params.add(parameterName, value=value, min=lower, max=upper) except Exception as err: msg = "modelFitterCore/mkParameters parameterName %s" \ % parameterName logger.error(msg, err) else: value = np.mean([lowerBound, upperBound]) params.add(parameterName, value=value, min=lowerBound, max=upperBound) return params
def runSimulation( cls, parameters=None, roadrunner=None, startTime=0, endTime=5, numPoint=30, selectedColumns=None, returnDataFrame=True, _logger=Logger(), _loggerPrefix="", ): """ Runs a simulation. Defaults to parameter values in the simulation. Parameters ---------- roadrunner: ExtendedRoadRunner/str Roadrunner model parameters: lmfit.Parameters lmfit parameters startTime: float start time for the simulation endTime: float end time for the simulation numPoint: int number of points in the simulation selectedColumns: list-str output columns in simulation returnDataFrame: bool return a DataFrame _logger: Logger _loggerPrefix: str Return ------ NamedTimeseries (or None if fail to converge) """ if isinstance(roadrunner, str): roadrunner = cls.initializeRoadrunnerModel(roadrunner) else: roadrunner.reset() if parameters is not None: # Parameters have been specified cls.setupModel(roadrunner, parameters, logger=_logger) # Do the simulation if selectedColumns is not None: newSelectedColumns = list(selectedColumns) if TIME not in newSelectedColumns: newSelectedColumns.insert(0, TIME) try: data = roadrunner.simulate(startTime, endTime, numPoint, newSelectedColumns) except Exception as err: _logger.error("Roadrunner exception: ", err) data = None else: try: data = roadrunner.simulate(startTime, endTime, numPoint) except Exception as err: _logger.exception("Roadrunner exception: %s", err) data = None if data is None: return data fittedTS = NamedTimeseries(namedArray=data) if returnDataFrame: result = fittedTS.to_dataframe() else: result = fittedTS return result
def __init__(self, modelSpecification, dataSources, dirStudyPath=None, instanceNames=None, logger=Logger(), useSerialized=True, doSerialize=True, isPlot=True, **kwargs): """ Parameters --------- modelSpecification: ExtendedRoadRunner/str roadrunner model or antimony model dataSources: list-NamedTimeseries/list-str or dict of either str: path to CSV file dirStudyPath: str Path to the output directory containing the serialized fitters for the study. instanceNames: list-str Names of study instances corresponds to the list of dataSources useSerialized: bool Use the serialization of each ModelFitter, if it exists doSerialized: bool Serialize each ModelFitter isPlot: bool Do plots kwargs: dict arguments passed to ModelFitter """ self.dirStudyPath = dirStudyPath # Path to the directory serialized ModelFitter if self.dirStudyPath is None: length = len(inspect.stack()) absPath = os.path.abspath((inspect.stack()[length - 1])[1]) dirCaller = os.path.dirname(absPath) self.dirStudyPath = os.path.join(dirCaller, DIR_NAME) self.isPlot = isPlot self.doSerialize = doSerialize self.useSerialized = useSerialized self.instanceNames, self.dataSourceDct = self._mkInstanceData( instanceNames, dataSources) self.fitterPathDct = { } # Path to serialized fitters; key is instanceName self.fitterDct = {} # Fitters: key is instanceName self.logger = logger # Ensure that the directory exists if not os.path.isdir(self.dirStudyPath): os.makedirs(self.dirStudyPath) # Construct the fitters for name, dataSource in self.dataSourceDct.items(): filePath = self._getSerializePath(name) self.fitterPathDct[name] = filePath if os.path.isfile(filePath) and useSerialized: self.fitterDct[name] = ModelFitter.deserialize(filePath) else: self.fitterDct[name] = ModelFitter(modelSpecification, dataSource, logger=self.logger, isPlot=self.isPlot, **kwargs) self._serializeFitter(name)
def __init__(self, fitter, numIteration: int, parameterDct: dict, fittedStatistic: TimeseriesStatistic, bootstrapError=0): """ Results from bootstrap Parameters ---------- fitter: Fitter numIteration: number of successful iterations parameterDct: dict key: parameter name value: list of values fittedStatistic: statistics for fitted timeseries err: Error encountered """ self.fitter = None self.numIteration = numIteration self.parameterDct = dict(parameterDct) self.bootstrapError = bootstrapError self.numSimulation = 0 self.parameterMeanDct = {} # Timeseries statistics for fits self.fittedStatistic = fittedStatistic if fitter is None: self.logger = Logger() else: self.logger = fitter.logger # list of parameters self.parameters = list(self.parameterDct.keys()) # Number of simulations if len(self.parameters) > 0: self.numSimulation = \ len(self.parameterDct[self.parameters[0]]) else: self.numSimulation = 0 if self.numSimulation > 1: # means of parameter values self.parameterMeanDct = { p: np.mean(parameterDct[p]) for p in self.parameters } # standard deviation of parameter values self.parameterStdDct = { p: np.std(parameterDct[p]) for p in self.parameters } # Confidence limits for parameter values self.percentileDct = {p: [] for p in self.parameterDct} for name, values in self.parameterDct.items(): if len(values) > MIN_COUNT_PERCENTILE: self.percentileDct[name] = np.percentile( values, PERCENTILES) else: # means of parameter values self.parameterMeanDct = {p: np.nan for p in self.parameters} # standard deviation of parameter values self.parameterStdDct = {p: np.nan for p in self.parameters} # Confidence limits for parameter values self.percentileDct = {p: np.nan for p in self.parameters} ### PRIVATE # Fitting parameters from result self._params = None
def setUp(self): self.remove() self.logger = Logger(toFile=LOG_PATH, logPerformance=True, logLevel=logs.LEVEL_MAX)
help='Path for log file (str); default: %s' % LOG_PATH, default=LOG_PATH) parser.add_argument('--figPath', type=str, help='Path for figure (str); Default: %s' % FIG_PATH, default=FIG_PATH) parser.add_argument('--useExistingData', action='store_true', help="Use saved data from an previous run (flag).") parser.add_argument('--plot', action='store_true', help="Plot existing data (flag).") parser.add_argument( '--useExistingLog', action='store_true', help="Append to the existing log file, if it exists (flag).") args = parser.parse_args() useExistingLog = args.plot or args.useExistingLog useExistingData = (args.plot and (args.numModel == 0)) or args.useExistingData # if not useExistingLog: remove(args.logPath) runner = Runner(firstModel=args.firstModel, numModel=args.numModel, useExistingData=useExistingData, figPath=args.figPath, isPlot=args.plot, logger=Logger(toFile=args.logPath)) runner.run()
help='Path for log file (str); default: %s' % LOG_PATH, default=LOG_PATH) parser.add_argument('--figPath', type=str, help='Path for figure (str); Default: %s' % FIG_PATH, default=FIG_PATH) parser.add_argument('--useExistingData', action='store_true', help="Use saved data from an previous run (flag).") parser.add_argument('--plot', action='store_true', help="Plot existing data (flag).") parser.add_argument( '--useExistingLog', action='store_true', help="Append to the existing log file, if it exists (flag).") args = parser.parse_args() useExistingLog = args.plot or args.useExistingLog useExistingData = (args.plot and (args.numModel == 0)) or args.useExistingData # if not useExistingLog: remove(args.logPath) runner = Runner(firstModel=args.firstModel, numModel=args.numModel, useExistingData=useExistingData, figPath=args.figPath, isPlot=args.plot, logger=Logger(toFile=args.logPath, logLevel=LOG_LEVEL)) runner.run()
def __init__( self, modelSpecification, observedData, parametersToFit=None, selectedColumns=None, fitterMethods=METHOD_FITTER_DEFAULTS, numFitRepeat=1, bootstrapMethods=METHOD_BOOTSTRAP_DEFAULTS, parameterLowerBound=PARAMETER_LOWER_BOUND, parameterUpperBound=PARAMETER_UPPER_BOUND, parameterDct={}, fittedDataTransformDct={}, logger=Logger(), isPlot=True, _loggerPrefix="", # The following must be kept in sync with ModelFitterBootstrap.bootstrap numIteration: int = 10, reportInterval: int = 1000, synthesizerClass=ObservationSynthesizerRandomizedResiduals, maxProcess: int = None, serializePath: str = None, ): """ Constructs estimates of parameter values. Parameters ---------- modelSpecification: ExtendedRoadRunner/str roadrunner model or antimony model observedData: NamedTimeseries/str str: path to CSV file parametersToFit: list-str/None parameters in the model that you want to fit if None, no parameters are fit selectedColumns: list-str species names you wish use to fit the model default: all columns in observedData parameterLowerBound: float lower bound for the fitting parameters parameterUpperBound: float upper bound for the fitting parameters parameterDct: dict key: parameter name value: triple - (lowerVange, startingValue, upperRange) fittedDataTransformDct: dict key: column in selectedColumns value: function of the data in selectedColumns; input: NamedTimeseries output: array for the values of the column logger: Logger fitterMethods: str/list-str method used for minimization in fitModel numFitRepeat: int number of times fitting is repeated for a method bootstrapMethods: str/list-str method used for minimization in bootstrap numIteration: number of bootstrap iterations reportInterval: number of iterations between progress reports synthesizerClass: object that synthesizes new observations Must subclass ObservationSynthesizer maxProcess: Maximum number of processes to use. Default: numCPU serializePath: Where to serialize the fitter after bootstrap Usage ----- parameterDct = { "k1": (1, 5, 10), # name of parameter: low value, initial, high "k2": (2, 3, 6)} ftter = ModelFitter(roadrunnerModel, "observed.csv", parameterDct=parameterDct) fitter.fitModel() # Do the fit fitter.bootstrap() # Estimate parameter variance with bootstrap """ if modelSpecification is not None: # Not the default constructor self._loggerPrefix = _loggerPrefix self.modelSpecification = modelSpecification self.parametersToFit = parametersToFit self.lowerBound = parameterLowerBound self.upperBound = parameterUpperBound self.bootstrapKwargs = dict( numIteration=numIteration, reportInterval=reportInterval, maxProcess=maxProcess, serializePath=serializePath, ) self.parameterDct = self._updateParameterDct(parameterDct) self._numFitRepeat = numFitRepeat if self.parametersToFit is None: self.parametersToFit = [p for p in self.parameterDct.keys()] self.observedTS = observedData if self.observedTS is not None: self.observedTS = mkNamedTimeseries(observedData) # self.fittedDataTransformDct = fittedDataTransformDct # if (selectedColumns is None) and (self.observedTS is not None): selectedColumns = self.observedTS.colnames self.selectedColumns = selectedColumns # Construct array of non-nan observed values self._observedArr = self.observedTS[self.selectedColumns].flatten() # Other internal state self._fitterMethods = fitterMethods if isinstance(self._fitterMethods, str): if self._fitterMethods == METHOD_BOTH: self._fitterMethods = METHOD_FITTER_DEFAULTS else: self._fitterMethods = [self._fitterMethods] self._bootstrapMethods = bootstrapMethods if isinstance(self._bootstrapMethods, str): self._bootstrapMethods = [self._bootstrapMethods] self._isPlot = isPlot self._plotter = tp.TimeseriesPlotter(isPlot=self._isPlot) self._plotFittedTS = None # Timeseries that is plotted self.logger = logger # The following are calculated during fitting self.roadrunnerModel = None self.minimizer = None # lmfit.minimizer self.minimizerResult = None # Results of minimization self.params = None # params property in lmfit.minimizer self.fittedTS = self.observedTS.copy( isInitialize=True) # Initialize self.residualsTS = None # Residuals for selectedColumns self.bootstrapResult = None # Result from bootstrapping # Validation checks self._validateFittedDataTransformDct() else: pass
import numpy as np import os import unittest IGNORE_TEST = False IS_PLOT = False DIR = os.path.dirname(os.path.abspath(__file__)) LOG_PATH = os.path.join(DIR, "testTestHarness.log") DATA_DIR = os.path.join(os.path.dirname(DIR), "biomodels") PATH_PAT = os.path.join(DATA_DIR, "BIOMD0000000%03d.xml") INPUT_PATH = PATH_PAT % 339 VARIABLE_NAMES = ["Va_Xa", "IIa_Tmod", "VIIa_TF"] PARAMETER_NAMES = ["r27_c", "r28_c", "r29_c"] VARIABLE_NAMES = ["Pk", "VK"] PARAMETER_NAMES = ["d_Pk", "d_VK"] LOGGER = Logger() if os.path.isfile(LOG_PATH): os.remove(LOG_PATH) class TestFunctions(unittest.TestCase): def setUp(self): if IGNORE_TEST: return self.harness = TestHarness(INPUT_PATH, PARAMETER_NAMES, VARIABLE_NAMES, logger=LOGGER) def testConstructor(self):
def __init__( self, modelSpecification, observedData, # The following must be kept in sync with ModelFitterBootstrap.bootstrap parametersToFit=None, # Must be first kw for backwards compatibility bootstrapMethods=None, endTime=None, fitterMethods=None, isPlot=True, logger=Logger(), _loggerPrefix="", maxProcess: int = None, numFitRepeat=1, numIteration: int = 10, numPoint=None, numRestart=0, parameterLowerBound=cn.PARAMETER_LOWER_BOUND, parameterUpperBound=cn.PARAMETER_UPPER_BOUND, selectedColumns=None, serializePath: str = None, isParallel=True, isProgressBar=True, ): """ Constructs estimates of parameter values. Parameters ---------- endTime: float end time for the simulation modelSpecification: ExtendedRoadRunner/str roadrunner model or antimony model observedData: NamedTimeseries/str str: path to CSV file parametersToFit: list-str/SBstoat.Parameter/None parameters in the model that you want to fit if None, no parameters are fit selectedColumns: list-str species names you wish use to fit the model default: all columns in observedData parameterLowerBound: float lower bound for the fitting parameters parameterUpperBound: float upper bound for the fitting parameters logger: Logger fitterMethods: str/list-str/list-OptimizerMethod method used for minimization in fitModel numFitRepeat: int number of times fitting is repeated for a method bootstrapMethods: str/list-str/list-OptimizerMethod method used for minimization in bootstrap numIteration: number of bootstrap iterations numPoint: int number of time points in the simulation maxProcess: Maximum number of processes to use. Default: numCPU serializePath: Where to serialize the fitter after bootstrap numRestart: int number of times the minimization is restarted with random initial values for parameters to fit. isParallel: bool run in parallel where possible isProgressBar: bool display the progress bar Usage ----- parametersToFit = [SBstoat.Parameter("k1", lower=1, upper=10, value=5), SBstoat.Parameter("k2", lower=2, upper=6, value=3), ] ftter = ModelFitter(roadrunnerModel, "observed.csv", parametersToFit=parametersToFit) fitter.fitModel() # Do the fit fitter.bootstrap() # Estimate parameter variance with bootstrap """ if modelSpecification is not None: # Not the default constructor self._numIteration = numIteration # Save for copy constructor self._serializePath = serializePath # Save for copy constructor self._loggerPrefix = _loggerPrefix self.modelSpecification = modelSpecification self.observedData = observedData self.parametersToFit = parametersToFit self.parameterLowerBound = parameterLowerBound self.parameterUpperBound = parameterUpperBound self._maxProcess = maxProcess self.bootstrapKwargs = dict( numIteration=numIteration, serializePath=serializePath, ) self._numFitRepeat = numFitRepeat self.selectedColumns = selectedColumns self.observedTS, self.selectedColumns = self._updateObservedTS( mkNamedTimeseries(self.observedData)) # Check for nan in observedTS self._isObservedNan = np.isnan(np.sum(self.observedTS.flatten())) # self.selectedColumns = [c.strip() for c in self.selectedColumns] self.numPoint = numPoint if self.numPoint is None: self.numPoint = len(self.observedTS) self.endTime = endTime if self.endTime is None: self.endTime = self.observedTS.end # Other internal state self._fitterMethods = ModelFitterCore.makeMethods( fitterMethods, cn.METHOD_FITTER_DEFAULTS) self._bootstrapMethods = ModelFitterCore.makeMethods( bootstrapMethods, cn.METHOD_BOOTSTRAP_DEFAULTS) if isinstance(self._bootstrapMethods, str): self._bootstrapMethods = [self._bootstrapMethods] self._isPlot = isPlot self._plotter = tp.TimeseriesPlotter(isPlot=self._isPlot) self.logger = logger self._numRestart = numRestart self._isParallel = isParallel self._isProgressBar = isProgressBar self._selectedIdxs = None self._params = self.mkParams() # The following are calculated during fitting self.roadrunnerModel = None self.minimizerResult = None # Results of minimization self.fittedTS = self.observedTS.copy( isInitialize=True) # Initialize self.residualsTS = None # Residuals for selectedColumns self.bootstrapResult = None # Result from bootstrapping self.optimizer = None self.suiteFitterParams = None # Result from a suite fitter # else: pass
import unittest import matplotlib import matplotlib.pyplot as plt IGNORE_TEST = False IS_PLOT = False DIR = os.path.dirname(os.path.abspath(__file__)) PCL_PATH = os.path.join(DIR, "testMainTestHarness.pcl") FIG_PATH = os.path.join(DIR, "testMainTestHarness.png") FILES = [PCL_PATH, FIG_PATH] FIRST_MODEL = 200 NUM_MODEL = 1 DIR = os.path.dirname(os.path.abspath(__file__)) LOG_FILE = os.path.join(DIR, "testMainTestHarness.log") if IGNORE_TEST: LOGGER = Logger() else: LOGGER = Logger(toFile=LOG_FILE) if os.path.isfile(LOG_FILE): os.remove(LOG_FILE) class TestRunner(unittest.TestCase): def setUp(self): self._remove() self.runner = Runner(firstModel=FIRST_MODEL, numModel=NUM_MODEL, useExistingData=False, figPath=FIG_PATH, pclPath=PCL_PATH,
def rpRevise(self): """ Overrides rpickler. """ if "logger" not in self.__dict__.keys(): self.logger = Logger()
def _makeMikeModel(self, **kwargs): """Makes a model from Mike's data.""" model = te.loada(''' function Fi(v, ri, kf, kr, i, s, p, Kmi, Kms, Kmp, wi, ms, mp) ((ri+(1-ri)*(1/(1+i/Kmi)))^wi)*(kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1) end function F0(v, kf, kr, s, p, Kms, Kmp, ms, mp) (kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1) end function Fa(v, ra, kf, kr, a, s, p, Kma, Kms, Kmp, wa, ms, mp) ((ra+(1-ra)*((a/Kma)/(1+a/Kma)))^wa)*(kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1) end function Fiii(v, ri1, ri2, ri3, kf, kr, i1, i2, i3, s, p, Kmi1, Kmi2, Kmi3, Kms, Kmp, wi1, wi2, wi3, ms, mp) ((ri1+(1-ri1)*(1/(1+i1/Kmi1)))^wi1) * ((ri2+(1-ri2)*(1/(1+i2/Kmi2)))^wi2) * ((ri3+(1-ri3)*(1/(1+i3/Kmi3)))^wi3) * (kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1) end model modular_EGFR_current_128() // Reactions FreeLigand: -> L; Fa(v_0, ra_0, kf_0, kr_0, Lp, E, L, Kma_0, Kms_0, Kmp_0, wa_0, ms_0, mp_0); Phosphotyrosine: -> P; Fi(v_1, ri_1, kf_1, kr_1, Mig6, L, P, Kmi_1, Kms_1, Kmp_1, wi_1, ms_1, mp_1); Ras: -> R; Fiii(v_2, ri1_2, ri2_2, ri3_2, kf_2, kr_2, Spry2, P, E, P, R, Kmi1_2, Kmi2_2, Kmi3_2, Kms_2, Kmp_2, wi1_2, wi2_2, wi3_2, ms_2, mp_2); Erk: -> E; F0(v_3, kf_3, kr_3, R, E, Kms_3, Kmp_3, ms_3, mp_3); // Species IVs Lp = 100; E = 0; L = 1000; Mig6 = 100; P = 0; Spry2 = 10000; R = 0; // Parameter values v_0 = 1; ra_0 = 1; kf_0 = 1; kr_0 = 1; Kma_0 = 1; Kms_0 = 1; Kmp_0 = 1; wa_0 = 1; ms_0 = 1; mp_0 = 1; v_1 = 1; ri_1 = 1; kf_1 = 1; kr_1 = 1; Kmi_1 = 1; Kms_1 = 1; Kmp_1 = 1; wi_1 = 1; ms_1 = 1; mp_1 = 1; v_2 = 1; ri1_2 = 1; ri2_2 = 1; ri3_2 = 1; kf_2 = 1; kr_2 = 1; Kmi1_2 = 1; Kmi2_2 = 1; Kmi3_2 = 1; Kms_2 = 1; Kmp_2 = 1; wi1_2 = 1; wi2_2 = 1; wi3_2 = 1; ms_2 = 1; mp_2 = 1; v_3 = 1; kf_3 = 1; kr_3 = 1; Kms_3 = 1; Kmp_3 = 1; ms_3 = 1; mp_3 = 1; end ''') if "fitterMethods" not in kwargs: methods = [] for optName in ["differential_evolution", "leastsq"]: methods.append( SBstoat.OptimizerMethod(optName, {cn.MAX_NFEV: 10})) kwargs["fitterMethods"] = methods observedPath = os.path.join(DIR, "mike_bug.csv") fitter = ModelFitter( model, observedPath, logger=Logger(logLevel=1), parametersToFit=[ #"v_0", "ra_0", "kf_0", "kr_0", "Kma_0", "Kms_0", "Kmp_0", "wa_0", "ms_0", #"mp_0", "v_1", "ri_1", "kf_1", "kr_1", "Kmi_1", "Kms_1", "Kmp_1", "wi_1", #"ms_1", "mp_1", "v_2", "ri1_2", "ri2_2", "ri3_2", "kf_2", "kr_2", "Kmi1_2", "Kmi2_2", "Kmi3_2", "Kms_2", "Kmp_2", "wi1_2", "wi2_2", "wi3_2", "ms_2", "mp_2", "v_3", "kf_3", "kr_3", "Kms_3", "Kmp_3", "ms_3", "mp_3" ], **kwargs) fitter.fitModel() return fitter