class ParScanner(Scanner): """ Arbitrary dimension generic distributed scanner. Subclassed from *pysces.PyscesScan.Scanner*. This class is initiated with a loaded PySCeS model and then allows the user to define scan parameters, see ``self.addScanParameter()`` and user output, see ``self.addUserOutput()``. Steady-state results are always stored in ``self.SteadyStateResults`` while user output can be found in ``self.UserOutputResults``. Distributed (parallel) execution is achieved with the clustering capability of IPython. See *ipcluster --help*. The optional 'engine' argument specifies the parallel engine to use. - *'multiproc'* -- multiprocessing (default) - *'ipcluster'* -- IPython cluster """ # --johann 20101206 genOn = True _MODE_ = 'state' HAS_USER_OUTPUT = False nan_on_bad_state = True MSG_PRINT_INTERVAL = 500 __AnalysisModes__ = ('state', 'elasticity', 'mca', 'stability') invalid_state_list = None invalid_state_list_idx = None scans_per_run = 100 def __init__(self, mod, engine='multiproc'): self.engine = engine if engine == 'multiproc': print('parallel engine: multiproc') elif engine == 'ipcluster': print('parallel engine: ipcluster') from ipyparallel import Client try: rc = Client() self.rc = rc except OSError as ex: raise OSError( str(ex) + '\nPARSCANNER: Requires a running IPython cluster. See "ipcluster --help".\n' ) dv = rc[:] # direct view lv = rc.load_balanced_view() self.dv = dv self.lv = lv dv.execute('from pysces.PyscesParScan import Analyze, setModValue') else: raise UserWarning(engine + " is not a valid parallel engine!") from ipyparallel.serialize import codeutil self.GenDict = {} self.GenOrder = [] self.ScanSpace = [] self.mod = mod self.SteadyStateResults = [] self.UserOutputList = [] self.UserOutputResults = [] self.scanT = TimerBox() def genScanSpace(self): """ Generates the parameter scan space, partitioned according to self.scans_per_run """ spr = self.scans_per_run Tsteps = 1 for gen in self.GenOrder: if self.GenDict[gen][ 4] == False: # don't increase Tsteps for followers Tsteps *= self.GenDict[gen][2] for step in range(Tsteps): pars = self.__nextValue__() # if pars not in self.ScanSpace: self.ScanSpace.append(pars) self.ScanSpace = np.array(self.ScanSpace) self.SeqArray = np.arange(1, Tsteps + 1) if Tsteps % spr == 0: numparts = Tsteps // spr else: numparts = Tsteps // spr + 1 self.ScanPartition = [ self.ScanSpace[n * spr:(n + 1) * spr] for n in range(numparts) ] self.SeqPartition = [ self.SeqArray[n * spr:(n + 1) * spr] for n in range(numparts) ] self.Tsteps = Tsteps def Prepare(self, ReRun=False): """ Internal method to prepare the parameters and generate ScanSpace. """ print("\nPREPARATION\n-----------") self.scanT.normal_timer('PREP') self._MODE_ = self._MODE_.lower() assert self._MODE_ in self.__AnalysisModes__, ( '\nSCANNER: \"%s\" is not a valid analysis mode!' % self._MODE_) if ReRun: self.ScanSpace = [] self.UserOutputResults = [] self.SteadyStateResults = [] self.invalid_state_list = [] self.invalid_state_list_idx = [] # if self.nan_on_bad_state: # self.mod.__settings__["mode_state_nan_on_fail"] = True # self.dv.execute('mod.__settings__["mode_state_nan_on_fail"] = True') # generate the scan space self.genScanSpace() print("Generated ScanSpace:", next(self.scanT.PREP)) print('PARSCANNER: Tsteps', self.Tsteps) flush() def Run(self, ReRun=False): """ Run the parameter scan with a load balancing task client. """ self.Prepare(ReRun) # this is where the parallel magic fun starts.... arl = [] # asynchronous results list if self.engine == 'multiproc': fN = str(time()).split('.')[0] with open(fN, 'wb') as F: pickle.dump( ( self.mod, self.ScanPartition, self.SeqPartition, self.GenOrder, self.UserOutputList, ), F, protocol=-1, ) fN = os.path.abspath(fN) print("Preparation completed:", next(self.scanT.PREP)) self.scanT.normal_timer('RUN') subprocess.call([sys.executable, MULTISCANFILE, self._MODE_, fN]) with open(fN, 'rb') as F: res_list = pickle.load(F) os.remove(fN) for result in res_list: self.StoreData(result) elif self.engine == 'ipcluster': for i in range(len(self.ScanPartition)): arl.append( self.lv.apply( Analyze, self.ScanPartition[i], self.SeqPartition[i], self.GenOrder, self._MODE_, self.UserOutputList, self.mod, )) print("Submitted tasks:", len(arl)) print("Preparation completed:", next(self.scanT.PREP)) print("\nPARALLEL COMPUTATION\n--------------------") flush() self.scanT.normal_timer('RUN') while self.lv.queue_status()['unassigned'] > 0: sleep(5) print('Tasks to go... ', self.lv.queue_status()['unassigned']) # wait until all tasks are completed self.lv.wait() flush() print("\nGATHER RESULT\n-------------") flush() for ar in arl: result = ar.get() # tuple: 0 - state_species # 1 - state_flux # 2 - user_output_results # 3 - invalid_state_list # 4 - invalid_state_list_idx self.StoreData(result) print("Parallel calculation completed:", next(self.scanT.RUN)) self.GatherScanResult() def RunScatter(self, ReRun=False): """ Run the parameter scan by using scatter and gather for the ScanSpace. Not load balanced, equal number of scan runs per node. """ if self.engine != 'ipcluster': print("RunScatter() only supported with ipcluster!") return self.Prepare(ReRun) # this is where the parallel magic fun starts.... # push details into client namespace self.dv.push({ 'GenOrder': self.GenOrder, 'mode': self._MODE_, 'mod': self.mod, 'UserOutputList': self.UserOutputList, }) # scatter ScanSpace and SeqArray self.dv.scatter('partition', self.ScanSpace) self.dv.scatter('seqarray', self.SeqArray) print("Scattered ScanSpace on number of engines:", len(self.dv)) print("Preparation completed:", next(self.scanT.PREP)) print("\nPARALLEL COMPUTATION\n--------------------") flush() self.scanT.normal_timer('RUN') # executes scan on partitioned ScanSpace on every node self.dv.execute( 'y=Analyze(partition,seqarray,GenOrder,mode,UserOutputList,mod)', block=True) print("Parallel calculation completed:", next(self.scanT.RUN)) flush() ## this is analysis stuff print("\nGATHER RESULT\n-------------") flush() ar = self.dv.gather('y') results = ar.get() results = [results[i:i + 5] for i in range(0, len(results), 5)] for result in results: # tuple: 0 - state_species # 1 - state_flux # 2 - user_output_results # 3 - invalid_state_list # 4 - invalid_state_list_idx self.StoreData(result) print("Parallel calculation completed:", next(self.scanT.RUN)) self.GatherScanResult() def StoreData(self, result): """ Internal function which concatenates and stores single result generated by Analyze. - *result* IPython client result object """ self.SteadyStateResults.append( np.hstack((np.array(result[0]), np.array(result[1])))) if self.HAS_USER_OUTPUT: self.UserOutputResults.append(np.array(result[2])) self.invalid_state_list += result[3] self.invalid_state_list_idx += result[4] def GatherScanResult(self): """ Concatenates and combines output result fragments from the parallel scan. """ # from here on we have the complete results self.SteadyStateResults = np.vstack( [i for i in self.SteadyStateResults]) if self.HAS_USER_OUTPUT: self.UserOutputResults = np.vstack( [i for i in self.UserOutputResults]) self.resetInputParameters() # print "Gather completed:", self.scanT.GATHER.next() print("\nPARSCANNER: %s states analysed" % len(self.SteadyStateResults)) print("Total time taken: ", next(self.scanT.PREP)) self.scanT.PREP.close() # close timer self.scanT.RUN.close() if len(self.invalid_state_list) > 0: print('\nBad steady states encountered at:\n') print("Sequence: ", self.invalid_state_list_idx) print("Parameters: ", self.invalid_state_list)
class ParScanner(Scanner): """ Arbitrary dimension generic distributed scanner. Subclassed from pysces.PyscesScan.Scanner. This class is initiated with a loaded PySCeS model and then allows the user to define scan parameters, see self.addScanParameter() and user output, see self.addUserOutput(). Steady-state results are always stored in self.SteadyStateResults while user output can be found in self.UserOutputResults. Distributed (parallel) execution is achieved with the clustering capability of IPython. See "ipcluster --help". """ # --johann 20101206 genOn = True _MODE_ = 'state' HAS_USER_OUTPUT = False nan_on_bad_state = True MSG_PRINT_INTERVAL = 500 __AnalysisModes__ = ('state','elasticity','mca','stability') invalid_state_list = None invalid_state_list_idx = None scans_per_run = 100 def __init__(self, mod, engine='multiproc'): """ Instantiate the parallel scanner class with a PySCeS model instance and an optional 'engine' argument specifying the parallel engine: 'multiproc' -- multiprocessing (default) 'ipcluster' -- IPython cluster """ self.engine=engine if engine == 'multiproc': print('parallel engine: multiproc') elif engine == 'ipcluster': print('parallel engine: ipcluster') try: from ipyparallel import Client except ImportError as ex: print('\n',ex) raise ImportError('PARSCANNER: Requires IPython and ipyparallel version >=4.0 (http://ipython.org) and 0MQ (http://zero.mq).') try: rc = Client() self.rc=rc except OSError as ex: raise OSError(str(ex)+'\nPARSCANNER: Requires a running IPython cluster. See "ipcluster --help".\n') dv = rc[:] # direct view lv = rc.load_balanced_view() self.dv=dv self.lv=lv dv.execute('from pysces.PyscesParScan import Analyze, setModValue') else: raise UserWarning(engine+" is not a valid parallel engine!") self.GenDict = {} self.GenOrder = [] self.ScanSpace = [] self.mod = mod self.SteadyStateResults = [] self.UserOutputList = [] self.UserOutputResults = [] self.scanT = TimerBox() def genScanSpace(self): """ Generates the parameter scan space, partitioned according to self.scans_per_run """ spr=self.scans_per_run Tsteps = 1 for gen in self.GenOrder: if self.GenDict[gen][4] == False: # don't increase Tsteps for slaves Tsteps *= self.GenDict[gen][2] for step in range(Tsteps): pars = self.__nextValue__() #if pars not in self.ScanSpace: self.ScanSpace.append(pars) self.ScanSpace = np.array(self.ScanSpace) self.SeqArray = np.arange(1,Tsteps+1) if Tsteps % spr == 0: numparts = Tsteps//spr else: numparts = Tsteps//spr + 1 self.ScanPartition = [self.ScanSpace[n*spr:(n+1)*spr] for n in range(numparts)] self.SeqPartition = [self.SeqArray[n*spr:(n+1)*spr] for n in range(numparts)] self.Tsteps = Tsteps def Prepare(self,ReRun=False): """ Internal method to prepare the parameters and generate ScanSpace. """ print("\nPREPARATION\n-----------") self.scanT.normal_timer('PREP') self._MODE_ = self._MODE_.lower() assert self._MODE_ in self.__AnalysisModes__, '\nSCANNER: \"%s\" is not a valid analysis mode!' % self._MODE_ if ReRun: self.ScanSpace = [] self.UserOutputResults = [] self.SteadyStateResults = [] self.invalid_state_list = [] self.invalid_state_list_idx = [] #if self.nan_on_bad_state: #self.mod.__settings__["mode_state_nan_on_fail"] = True #self.dv.execute('mod.__settings__["mode_state_nan_on_fail"] = True') # generate the scan space self.genScanSpace() print("Generated ScanSpace:", next(self.scanT.PREP)) print('PARSCANNER: Tsteps', self.Tsteps) flush() def Run(self,ReRun=False): """ Run the parameter scan with a load balancing task client. """ self.Prepare(ReRun) # this is where the parallel magic fun starts.... arl = [] # asynchronous results list if self.engine == 'multiproc': fN = str(time()).split('.')[0] F = open(fN, 'wb') pickle.dump((self.mod, self.ScanPartition, self.SeqPartition, self.GenOrder, self.UserOutputList), F, protocol=-1) F.close() fN = os.path.abspath(fN) print("Preparation completed:", next(self.scanT.PREP)) self.scanT.normal_timer('RUN') subprocess.call([sys.executable, MULTISCANFILE, self._MODE_, fN]) F = open(fN, 'rb') res_list = pickle.load(F) F.close() os.remove(fN) for result in res_list: self.StoreData(result) elif self.engine == 'ipcluster': for i in range(len(self.ScanPartition)): arl.append(self.lv.apply(Analyze, self.ScanPartition[i], self.SeqPartition[i], self.GenOrder, self._MODE_, self.UserOutputList, self.mod)) print("Submitted tasks:", len(arl)) print("Preparation completed:", next(self.scanT.PREP)) print("\nPARALLEL COMPUTATION\n--------------------") flush() self.scanT.normal_timer('RUN') while self.lv.queue_status()['unassigned'] > 0: sleep(5) print('Tasks to go... ', self.lv.queue_status()['unassigned']) # wait until all tasks are completed self.lv.wait() flush() print("\nGATHER RESULT\n-------------") flush() for ar in arl: result = ar.get() #tuple: 0 - state_species # 1 - state_flux # 2 - user_output_results # 3 - invalid_state_list # 4 - invalid_state_list_idx self.StoreData(result) print("Parallel calculation completed:", next(self.scanT.RUN)) self.GatherScanResult() def RunScatter(self,ReRun=False): """ Run the parameter scan by using scatter and gather for the ScanSpace. Not load balanced, equal number of scan runs per node. """ if self.engine != 'ipcluster': print("RunScatter() only supported with ipcluster!") return self.Prepare(ReRun) # this is where the parallel magic fun starts.... # push details into client namespace self.dv.push({ 'GenOrder' : self.GenOrder,\ 'mode' : self._MODE_,\ 'mod' : self.mod,\ 'UserOutputList' : self.UserOutputList }) # scatter ScanSpace and SeqArray self.dv.scatter('partition', self.ScanSpace) self.dv.scatter('seqarray', self.SeqArray) print("Scattered ScanSpace on number of engines:", len(self.dv)) print("Preparation completed:", next(self.scanT.PREP)) print("\nPARALLEL COMPUTATION\n--------------------") flush() self.scanT.normal_timer('RUN') # executes scan on partitioned ScanSpace on every node self.dv.execute('y=Analyze(partition,seqarray,GenOrder,mode,UserOutputList,mod)',block=True) print("Parallel calculation completed:", next(self.scanT.RUN)) flush() ## this is analysis stuff print("\nGATHER RESULT\n-------------") flush() ar = self.dv.gather('y') results=ar.get() results = [results[i:i+5] for i in range(0,len(results),5)] for result in results: #tuple: 0 - state_species # 1 - state_flux # 2 - user_output_results # 3 - invalid_state_list # 4 - invalid_state_list_idx self.StoreData(result) print("Parallel calculation completed:", next(self.scanT.RUN)) self.GatherScanResult() def StoreData(self, result): """ Internal function which concatenates and stores single result generated by Analyze. - *result* IPython client result object """ self.SteadyStateResults.append(np.hstack((np.array(result[0]),np.array(result[1])))) if self.HAS_USER_OUTPUT: self.UserOutputResults.append(np.array(result[2])) self.invalid_state_list += result[3] self.invalid_state_list_idx += result[4] def GatherScanResult(self): """ Concatenates and combines output result fragments from the parallel scan. """ # from here on we have the complete results self.SteadyStateResults = np.vstack([i for i in self.SteadyStateResults]) if self.HAS_USER_OUTPUT: self.UserOutputResults = np.vstack([i for i in self.UserOutputResults]) self.resetInputParameters() #print "Gather completed:", self.scanT.GATHER.next() print("\nPARSCANNER: %s states analysed" % len(self.SteadyStateResults)) print("Total time taken: ", next(self.scanT.PREP)) self.scanT.PREP.close() # close timer self.scanT.RUN.close() if len(self.invalid_state_list) > 0: print('\nBad steady states encountered at:\n') print("Sequence: ", self.invalid_state_list_idx) print("Parameters: ", self.invalid_state_list)
class Scanner(object): """ Arbitrary dimension generic scanner. This class is initiated with a loaded PySCeS model and then allows the user to define scan parameters see self.addScanParameter() and user output see self.addUserOutput(). Steady-state results are always stored in self.SteadyStateResults while user output can be found in self.UserOutputResults - brett 2007. """ genOn = True quietRun = False _MODE_ = 'state' HAS_USER_OUTPUT = False HAS_STATE_OUTPUT = True nan_on_bad_state = True MSG_PRINT_INTERVAL = 500 __AnalysisModes__ = ('state','elasticity','mca','stability','null') invalid_state_list = None invalid_state_list_idx = None def __init__(self, mod): try: N = mod.nmatrix except Exception as ex: print('\nSCANNER: Please load a model <i.e. mod. doLoad()> before trying to use it.') self.GenDict = {} self.GenOrder = [] self.ScanSpace = [] self.mod = mod self.SteadyStateResults = [] self.UserOutputList = [] self.UserOutputResults = [] self.scanT = TimerBox() def testInputParameter(self,name): """ This tests whether a str(name) is an attribute of the model """ if hasattr(self.mod, name): return True else: return False def resetInputParameters(self): """ Just remembered what this does, I think it resets the input model parameters after a scan run. """ for key in self.GenDict: setattr(self.mod,key,self.GenDict[key][6]) def addUserOutput(self,*kw): """ Add output parameters to the scanner as a collection of one or more string arguments ('O1','O2','O3', 'On'). These are evaluated at each iteration of the scanner and stored in the self.UserOutputResults array. The list of output is stored in self.UserOutputList. """ output = [attr for attr in kw if type(attr) == str] #print output self.HAS_USER_OUTPUT = True ModeList = list(self.__AnalysisModes__) MaxMode = 0 for attr in output: if attr[:2] == 'ec' and self._MODE_ != 'null': cM = ModeList.index('elasticity') if cM > MaxMode: self._MODE_ = 'elasticity' MaxMode = cM elif attr[:2] == 'cc' and self._MODE_ != 'null': cM = ModeList.index('mca') if cM > MaxMode: self._MODE_ = 'mca' MaxMode = cM elif attr[:6] == 'lambda' and self._MODE_ != 'null': cM = ModeList.index('stability') if cM > MaxMode: self._MODE_ = 'stability' MaxMode = cM print('MaxMode', MaxMode) self.UserOutputList = output def addScanParameter(self,name,start,end,points,log=False,slave=False): """ Add a parameter to scan (an axis if you like) input is: - str(name) = model parameter name - float(start) = lower bound of scan - float(end) = upper bound of scan - int(points) = number of points in scan range - bool(log) = Use a logarithmic (base10) range - bool(slave) = Scan parameters can be masters i.e. an independent axis or a "slave" which moves synchronously with the previously defined parameter range. The first ScanParameter cannot be a slave. """ offset = 1 assert self.testInputParameter(name), '\nSCANNER: Model does not have an attribute \"%s\" \n' % name assert name not in self.GenOrder, '\nSCANNER: This operation is currently not allowed\n' if not len(self.GenOrder) == 0: for el in self.GenOrder: if self.GenDict[el][4] == False: # test if not slave offset = offset * self.GenDict[el][2] # increment offset if slave == True: prevpar = self.GenOrder[-1] # previous parameter, i.e. master offset = self.GenDict[prevpar][5] # don't increment for slave if points != self.GenDict[prevpar][2]: print('SCANNER: Slave parameter needs to iterate over same number of\npoints as master...resetting points.') points = self.GenDict[prevpar][2] else: if slave == True: slave = False print('SCANNER: Inner range cannot be a slave ... resetting to master') self.GenDict.setdefault(name,(start,end,points,log,slave,offset,getattr(self.mod,name))) setattr(self,name+'_test',self.stepGen(offset)) setattr(self,name,self.rangeGen(name,start,end,points,log)) if name not in self.GenOrder: self.GenOrder.append(name) def makeRange(self,start,end,points,log): """ Should be pretty self evident it defines a range: - float(start) - float(end) - int(points) - bool(log) """ if log: rng = scipy.logspace(scipy.log10(start),scipy.log10(end),points) else: rng = scipy.linspace(start,end,points) return rng def rangeGen(self,name,start,end,points,log): """ This is where things get more interesting. This function creates a cycling generator which loops over a parameter range. - *parameter* name - *start* value - *end* value - *points* - *log* scale """ range = self.makeRange(start,end,points,log) cnt = 0 while self.genOn: if next(getattr(self,name+'_test')): if cnt >= len(range)-1: cnt = 0 else: cnt += 1 yield range[cnt] def stepGen(self,offset): """ Another looping generator function. The idea here is to create a set of generators for the scan parameters. These generators then all fire together and determine whether the range generators should advance or not. Believe it or not this dynamically creates the matrix of parameter values to be evaluated. """ val = 0 while self.genOn: if val >= offset: val = 0 yield True else: yield False val += 1 def __nextStep__(self): """ Fire all step generators """ return [next(getattr(self,genN+'_test')) for genN in self.GenOrder] def __nextValue__(self): """ Fire all range generators """ return [next(getattr(self,genN)) for genN in self.GenOrder] def setModValue(self,name,value): """ An easy one, assign value to name of the instantiated PySCeS model attribute """ assert hasattr(self.mod, name), '\nModel does not have an attribute: %s \n' % name setattr(self.mod, name, float(value)) def RunAgain(self): """ While it is impossible to change the generator/range structure of a scanner (just build another one) you can 'in principle' change the User Output and run it again. """ self.Run(ReRun=True) def Run(self,ReRun=False): """ Run the parameter scan """ self.scanT.normal_timer('RUN') self._MODE_ = self._MODE_.lower() assert self._MODE_ in self.__AnalysisModes__, '\nSCANNER: \"%s\" is not a valid analysis mode!' % self._MODE_ if self.quietRun: self.mod.SetQuiet() else: self.mod.SetLoud() if ReRun: self.ScanSpace = [] self.UserOutputResults = [] self.SteadyStateResults = [] self.invalid_state_list = [] self.invalid_state_list_idx = [] if self.nan_on_bad_state: self.mod.__settings__["mode_state_nan_on_fail"] = True #self.mod.mode_state_nan_on_fail = True Tsteps = 1 for gen in self.GenOrder: if self.GenDict[gen][4] == False: # don't increase Tsteps for slaves Tsteps *= self.GenDict[gen][2] print(next(self.scanT.RUN)) analysis_counter = 0 print('SCANNER: Tsteps', Tsteps) pcntr = 0 for step in range(Tsteps): pars = self.__nextValue__() #if pars not in self.ScanSpace: self.ScanSpace.append(pars) for par in range(len(self.GenOrder)): self.setModValue(self.GenOrder[par],pars[par]) analysis_counter += 1 self.Analyze() if not self.mod.__StateOK__: self.invalid_state_list.append(pars) self.invalid_state_list_idx.append(analysis_counter) self.StoreData() pcntr += 1 if pcntr >= self.MSG_PRINT_INTERVAL: print('\t', analysis_counter, next(self.scanT.RUN)) pcntr = 0 self.ScanSpace = scipy.array(self.ScanSpace) self.SteadyStateResults = scipy.array(self.SteadyStateResults) self.UserOutputResults = scipy.array(self.UserOutputResults) self.resetInputParameters() if self.nan_on_bad_state: self.mod.mode_state_nan_on_fail = False print("\nSCANNER: %s states analysed\n" % analysis_counter) if len(self.invalid_state_list) > 0: print('Bad steady states encountered at:\n') print(self.invalid_state_list) print(self.invalid_state_list_idx) def Analyze(self): """ The analysis method, the mode is automatically set by the self.addUserOutput() method but can be reset by the user. """ if self._MODE_ == 'state': self.mod.doState() elif self._MODE_ == 'elasticity': self.mod.doElas() elif self._MODE_ == 'mca': self.mod.doMca() elif self._MODE_ == 'stability': self.mod.doEigenMca() elif self._MODE_ == 'null': self.HAS_STATE_OUTPUT = False self.mod.User_Function() def StoreData(self): """ Internal function which concatenates and stores the data generated by Analyze. """ if self.HAS_STATE_OUTPUT and self._MODE_ != 'null': self.SteadyStateResults.append(scipy.hstack((self.mod.state_species,self.mod.state_flux))) if self.HAS_USER_OUTPUT and self._MODE_ != 'null': self.UserOutputResults.append(scipy.array([getattr(self.mod,res) for res in self.UserOutputList])) def getOutput(self): """ Will be the new output function. """ pass def getResultMatrix(self, stst=False, lbls=False): """ Returns an array of result data. I'm keepin this for backwards compatibility but it will be replaced by a getOutput() method when this scanner is updated to use the new data_scan object. - *stst* add steady-state data to output array - *lbls* return a tuple of (array, column_header_list) If *stst* is True output has dimensions [scan_parameters]+[state_species+state_flux]+[Useroutput] otherwise [scan_parameters]+[Useroutput]. """ output_array = None labels = [] if stst: if self.HAS_USER_OUTPUT: output_array = scipy.hstack([self.ScanSpace, self.SteadyStateResults, self.UserOutputResults]) labels = self.GenOrder+list(self.mod.species)+list(self.mod.reactions)+self.UserOutputList else: output_array = scipy.hstack([self.ScanSpace, self.SteadyStateResults]) labels = self.GenOrder+list(self.mod.species)+list(self.mod.reactions) else: output_array = scipy.hstack([self.ScanSpace, self.UserOutputResults]) labels = self.GenOrder+self.UserOutputList if lbls: return output_array, labels else: return output_array