def uncertainty_plot(self, total_variance, t=None, filename=''): """ Plot the top uncertainty contributions resulting from uncertainties in the kinetic parameters. The total_variance must be specified. Optionally, the reaction time `t` in seconds can be specified for plotting the uncertainties. The number of reaction uncertainties to plot is determined by self.num_reactions """ filename = filename if filename else "kinetics_uncertainty.png" self.load() if t: idx = find_nearest(self.x_var.data, t) else: idx = -1 reaction_uncertainty = [] total_uncertainty = total_variance for reactionSens in self.y_var: if isinstance(reactionSens, np.ndarray): # The parameter uncertainties are an array which should have the same length as the sensitivity data uncertainty_data = reactionSens * reactionSens.uncertainty uncertainty_contribution = uncertainty_data[idx]**2 else: uncertainty_contribution = (reactionSens.data[idx] * reactionSens.uncertainty)**2 reaction_uncertainty.append([ reactionSens.label, reactionSens.reaction, uncertainty_contribution ]) # Normalize and create new list of GenericData new_y_var = [] for label, reaction, uncertainty in reaction_uncertainty: data = GenericData(label=label, reaction=reaction, data=[uncertainty / total_uncertainty * 100]) new_y_var.append(data) new_y_var.sort(key=lambda x: abs(x.data[0]), reverse=True) new_y_var = new_y_var[:self.num_reactions] GenericPlot( x_var=None, y_var=new_y_var, xlabel="Uncertainty Contribution (%)").barplot(filename=filename) return reaction_uncertainty
def uncertaintyPlot(self, totalVariance, t=None, filename=''): """ Plot the top uncertainty contributions resulting from uncertainties in the thermo parameters. The totalVariance must be specified. Optionally, the reaction time `t` in seconds can be specified for plotting the uncertainties. The number of thermo uncertainties to plot is determined by self.numSpecies """ filename = filename if filename else "thermo_uncertainty.png" self.load() if t: idx = findNearest(self.xVar.data, t) else: idx = -1 thermoUncertainty = [] totalUncertainty = totalVariance for thermoSens in self.yVar: if isinstance(thermoSens, numpy.ndarray): # The parameter uncertainties are an array which should have the same length as the sensitivity data uncertaintyData = thermoSens * thermoSens.uncertainty uncertaintyContribution = uncertaintyData[idx]**2 else: # The parameter uncertainty is a scalar uncertaintyContribution = (thermoSens.data[idx] * thermoSens.uncertainty)**2 thermoUncertainty.append([ thermoSens.label, thermoSens.species, uncertaintyContribution ]) # Normalize and create new list of GenericData newYVar = [] for label, species, uncertainty in thermoUncertainty: data = GenericData(label=label, species=species, data=[uncertainty / totalUncertainty * 100]) newYVar.append(data) newYVar.sort(key=lambda x: abs(x.data[0]), reverse=True) newYVar = newYVar[:self.numSpecies] GenericPlot( xVar=None, yVar=newYVar, xlabel="Uncertainty Contribution (%)").barplot(filename=filename) return thermoUncertainty
def parseCSVData(csvFile): """ This function parses a typical csv file outputted from a simulation or sensitivity analysis in the form of Time (s) Header1 Header2 Header.... t0 val1_0 val2_0 val... t1.. .. It returns the data in the form Time, DataList Where Time is returned as a GenericData object, and DataList is list of GenericData objects """ import csv import numpy import re # Pattern for matching indices or units indexPattern = re.compile(r'^\S+\(\d+\)$') unitsPattern = re.compile(r'\s\(.+\)$') rxnSensPattern = re.compile('^dln\[\S+\]\/dln\[k\d+\]:\s\S+$') thermoSensPattern = re.compile('^dln\[\S+\]\/dG\[\S+\]$') timeData = [] data = {} f = csv.reader(open(csvFile, 'r')) columns = zip(*f) time = GenericData( label=columns[0][0], data=numpy.array(columns[0][1:], dtype=numpy.float64), ) # Parse the units from the Time header if unitsPattern.search(time.label): label, sep, units = time.label[:-1].rpartition('(') time.label = label time.units = units dataList = [] for col in columns[1:]: header = col[0] values = numpy.array(col[1:], dtype=numpy.float64) data = GenericData(label=header, data=values) # Parse the index or the label from the header if indexPattern.search(data.label): species, sep, index = data.label[:-1].rpartition('(') # Save the species attribute if an index was found data.species = species data.index = int(index) elif unitsPattern.search(data.label): label, sep, units = data.label[:-1].rpartition('(') data.label = label data.units = units elif rxnSensPattern.search(data.label): rxn = data.label.split()[1] index = data.label.split()[0][:-2].rpartition('dln[k')[2] data.reaction = rxn data.index = index elif thermoSensPattern.search(data.label): species = data.label[:-1].rpartition('dG[')[2] data.species = species if indexPattern.search(species): data.index = species[:-1].rpartition('(')[2] dataList.append(data) return time, dataList
def localAnalysis(self, sensitiveSpecies, correlated=False, number=10, fileformat='.png'): """ Conduct local uncertainty analysis on the reaction model. sensitiveSpecies is a list of sensitive Species objects number is the number of highest contributing uncertain parameters desired to be plotted fileformat can be either .png, .pdf, or .svg """ for sensSpecies in sensitiveSpecies: csvfilePath = os.path.join( self.outputDirectory, 'solver', 'sensitivity_{0}_SPC_{1}.csv'.format(1, sensSpecies.index)) time, dataList = parseCSVData(csvfilePath) # Assign uncertainties thermoDataList = [] reactionDataList = [] for data in dataList: if data.species: for species in self.speciesList: if species.toChemkin() == data.species: index = self.speciesList.index(species) break else: raise Exception( 'Chemkin name {} of species in the CSV file does not match anything in the species list.' .format(data.species)) data.uncertainty = self.thermoInputUncertainties[index] thermoDataList.append(data) if data.reaction: rxnIndex = int(data.index) - 1 data.uncertainty = self.kineticInputUncertainties[rxnIndex] reactionDataList.append(data) if correlated: correlatedThermoData = {} correlatedReactionData = {} for data in thermoDataList: for label, dpG in data.uncertainty.iteritems(): if label in correlatedThermoData: # Unpack the labels and partial uncertainties correlatedThermoData[label].data[-1] += data.data[ -1] * dpG # Multiply the sensitivity with the partial uncertainty else: correlatedThermoData[label] = GenericData( data=[data.data[-1] * dpG], uncertainty=1, label=label, species='dummy') for data in reactionDataList: for label, dplnk in data.uncertainty.iteritems(): if label in correlatedReactionData: correlatedReactionData[label].data[ -1] += data.data[-1] * dplnk else: correlatedReactionData[label] = GenericData( data=[data.data[-1] * dplnk], uncertainty=1, label=label, reaction='dummy') thermoDataList = correlatedThermoData.values() reactionDataList = correlatedReactionData.values() # Compute total variance totalVariance = 0.0 for data in thermoDataList: totalVariance += (data.data[-1] * data.uncertainty)**2 for data in reactionDataList: totalVariance += (data.data[-1] * data.uncertainty)**2 if not correlated: # Add the reaction index to the data label of the reaction uncertainties for data in reactionDataList: data.label = 'k' + str( data.index) + ': ' + data.label.split()[-1] thermoUncertaintyPlotPath = os.path.join( self.outputDirectory, 'thermoLocalUncertainty_{0}'.format(sensSpecies.toChemkin()) + fileformat) reactionUncertaintyPlotPath = os.path.join( self.outputDirectory, 'kineticsLocalUncertainty_{0}'.format( sensSpecies.toChemkin()) + fileformat) ReactionSensitivityPlot(xVar=time, yVar=reactionDataList, numReactions=number).uncertaintyPlot( totalVariance, filename=reactionUncertaintyPlotPath) ThermoSensitivityPlot(xVar=time, yVar=thermoDataList, numSpecies=number).uncertaintyPlot( totalVariance, filename=thermoUncertaintyPlotPath)
def simulate(self): """ Run all the conditions as a cantera simulation. Returns the data as a list of tuples containing: (time, [list of temperature, pressure, and species data]) for each reactor condition """ # Get all the cantera names for the species speciesNamesList = [ getSpeciesIdentifier(species) for species in self.speciesList ] inertIndexList = [ self.speciesList.index(species) for species in self.speciesList if species.index == -1 ] allData = [] for condition in self.conditions: # First translate the molFrac from species objects to species names newMolFrac = {} for key, value in condition.molFrac.iteritems(): newkey = getSpeciesIdentifier(key) newMolFrac[newkey] = value # Set Cantera simulation conditions if condition.V0 is None: self.model.TPX = condition.T0.value_si, condition.P0.value_si, newMolFrac elif condition.P0 is None: self.model.TDX = condition.T0.value_si, 1.0 / condition.V0.value_si, newMolFrac else: raise Exception( "Cantera conditions in which T0 and P0 or T0 and V0 are not the specified state variables are not yet implemented." ) # Choose reactor if condition.reactorType == 'IdealGasReactor': canteraReactor = ct.IdealGasReactor(self.model) elif condition.reactorType == 'IdealGasConstPressureReactor': canteraReactor = ct.IdealGasConstPressureReactor( contents=self.model) elif condition.reactorType == 'IdealGasConstPressureTemperatureReactor': canteraReactor = ct.IdealGasConstPressureReactor( contents=self.model, energy='off') else: raise Exception( 'Other types of reactor conditions are currently not supported' ) # Run this individual condition as a simulation canteraSimulation = ct.ReactorNet([canteraReactor]) numCtReactions = len(self.model.reactions()) if self.sensitiveSpecies: if ct.__version__ == '2.2.1': print 'Warning: Cantera version 2.2.1 may not support sensitivity analysis unless SUNDIALS was used during compilation.' print 'Warning: Upgrade to newer of Cantera in anaconda using the command "conda update -c rmg cantera"' # Add all the reactions as part of the analysis for i in range(numCtReactions): canteraReactor.add_sensitivity_reaction(i) # Set the tolerances for the sensitivity coefficients canteraSimulation.rtol_sensitivity = 1e-4 canteraSimulation.atol_sensitivity = 1e-6 # Initialize the variables to be saved times = [] temperature = [] pressure = [] speciesData = [] sensitivityData = [] # Begin integration time = 0.0 # Run the simulation over 100 time points while canteraSimulation.time < condition.reactionTime.value_si: # Advance the state of the reactor network in time from the current time to time t [s], taking as many integrator timesteps as necessary. canteraSimulation.step(condition.reactionTime.value_si) times.append(canteraSimulation.time) temperature.append(canteraReactor.T) pressure.append(canteraReactor.thermo.P) speciesData.append(canteraReactor.thermo[speciesNamesList].X) if self.sensitiveSpecies: # Cantera returns mass-based sensitivities rather than molar concentration or mole fraction based sensitivities. # The equation for converting between them is: # # d ln xi = d ln wi - sum_(species i) (dln wi) (xi) # # where xi is the mole fraction of species i and wi is the mass fraction of species i massFracSensitivityArray = canteraSimulation.sensitivities( ) if condition.reactorType == 'IdealGasReactor': # Row 0: mass, Row 1: volume, Row 2: internal energy or temperature, Row 3+: mass fractions of species massFracSensitivityArray = massFracSensitivityArray[ 3:, :] elif condition.reactorType == 'IdealGasConstPressureReactor' or condition.reactorType == 'IdealGasConstPressureTemperatureReactor': # Row 0: mass, Row 1: enthalpy or temperature, Row 2+: mass fractions of the species massFracSensitivityArray = massFracSensitivityArray[ 2:, :] else: raise Exception( 'Other types of reactor conditions are currently not supported' ) for i in range(len(massFracSensitivityArray)): massFracSensitivityArray[i] *= speciesData[-1][i] sensitivityArray = np.zeros( len(self.sensitiveSpecies) * len(self.model.reactions())) for index, species in enumerate(self.sensitiveSpecies): for j in range(numCtReactions): sensitivityArray[ numCtReactions * index + j] = canteraSimulation.sensitivity( species.toChemkin(), j) for i in range(len(massFracSensitivityArray)): if i not in inertIndexList: # massFracSensitivity for inerts are returned as nan in Cantera, so we must not include them here sensitivityArray[ numCtReactions * index + j] -= massFracSensitivityArray[i][j] sensitivityData.append(sensitivityArray) # Convert speciesData and sensitivityData to a numpy array speciesData = np.array(speciesData) sensitivityData = np.array(sensitivityData) # Resave data into generic data objects time = GenericData(label='Time', data=times, units='s') temperature = GenericData(label='Temperature', data=temperature, units='K') pressure = GenericData(label='Pressure', data=pressure, units='Pa') conditionData = [] conditionData.append(temperature) conditionData.append(pressure) for index, species in enumerate(self.speciesList): # Create generic data object that saves the species object into the species object. To allow easier manipulate later. speciesGenericData = GenericData(label=speciesNamesList[index], species=species, data=speciesData[:, index], index=species.index) conditionData.append(speciesGenericData) reactionSensitivityData = [] for index, species in enumerate(self.sensitiveSpecies): for j in range(numCtReactions): reactionSensitivityGenericData = GenericData( label='dln[{0}]/dln[k{1}]: {2}'.format( species.toChemkin(), j + 1, self.model.reactions()[j]), species=species, reaction=self.model.reactions()[j], data=sensitivityData[:, numCtReactions * index + j], index=j + 1, ) reactionSensitivityData.append( reactionSensitivityGenericData) allData.append((time, conditionData, reactionSensitivityData)) return allData
def parseCSVData(csvFile): """ This function parses a typical csv file outputted from a simulation or sensitivity analysis in the form of Time (s) Header1 Header2 Header.... t0 val1_0 val2_0 val... t1.. .. It returns the data in the form Time, DataList Where Time is returned as a GenericData object, and DataList is list of GenericData objects """ import csv import numpy import re # Pattern for matching indices or units indexPattern = re.compile(r'^\S+\(\d+\)$') unitsPattern = re.compile(r'\s\(.+\)$') rxnSensPattern = re.compile('^dln\[\S+\]\/dln\[k\d+\]:\s\S+$') thermoSensPattern = re.compile('^dln\[\S+\]\/dG\[\S+\]$') timeData = []; data = {} f = csv.reader(open(csvFile, 'r')) columns = zip(*f) time = GenericData(label = columns[0][0], data = numpy.array(columns[0][1:],dtype=numpy.float64), ) # Parse the units from the Time header if unitsPattern.search(time.label): label, sep, units = time.label[:-1].rpartition('(') time.label = label time.units = units dataList = [] for col in columns[1:]: header = col[0] values = numpy.array(col[1:],dtype=numpy.float64) data = GenericData(label=header,data=values) # Parse the index or the label from the header if indexPattern.search(data.label): species, sep, index = data.label[:-1].rpartition('(') # Save the species attribute if an index was found data.species = species data.index = int(index) elif unitsPattern.search(data.label): label, sep, units = data.label[:-1].rpartition('(') data.label = label data.units = units elif rxnSensPattern.search(data.label): rxn = data.label.split()[1] index = data.label.split()[0][:-2].rpartition('dln[k')[2] data.reaction = rxn data.index = index elif thermoSensPattern.search(data.label): species = data.label[:-1].rpartition('dG[')[2] data.species = species if indexPattern.search(species): data.index = species[:-1].rpartition('(')[2] dataList.append(data) return time, dataList
def local_analysis(self, sensitive_species, reaction_system_index=0, correlated=False, number=10, fileformat='.png'): """ Conduct local uncertainty analysis on the reaction model. sensitive_species is a list of sensitive Species objects number is the number of highest contributing uncertain parameters desired to be plotted fileformat can be either .png, .pdf, or .svg """ output = {} for sens_species in sensitive_species: csvfile_path = os.path.join( self.output_directory, 'solver', 'sensitivity_{0}_SPC_{1}.csv'.format(reaction_system_index + 1, sens_species.index)) time, data_list = parse_csv_data(csvfile_path) # Assign uncertainties thermo_data_list = [] reaction_data_list = [] for data in data_list: if data.species: for species in self.species_list: if species.to_chemkin() == data.species: index = self.species_list.index(species) break else: raise Exception( 'Chemkin name {} of species in the CSV file does not match anything in the ' 'species list.'.format(data.species)) data.uncertainty = self.thermo_input_uncertainties[index] thermo_data_list.append(data) if data.reaction: rxn_index = int(data.index) - 1 data.uncertainty = self.kinetic_input_uncertainties[ rxn_index] reaction_data_list.append(data) if correlated: correlated_thermo_data = {} correlated_reaction_data = {} for data in thermo_data_list: for label, dpG in data.uncertainty.items(): if label in correlated_thermo_data: # Unpack the labels and partial uncertainties correlated_thermo_data[label].data[-1] += data.data[ -1] * dpG # Multiply the sensitivity with the partial uncertainty else: correlated_thermo_data[label] = GenericData( data=[data.data[-1] * dpG], uncertainty=1, label=label, species='dummy') for data in reaction_data_list: for label, dplnk in data.uncertainty.items(): if label in correlated_reaction_data: correlated_reaction_data[label].data[ -1] += data.data[-1] * dplnk else: correlated_reaction_data[label] = GenericData( data=[data.data[-1] * dplnk], uncertainty=1, label=label, reaction='dummy') thermo_data_list = list(correlated_thermo_data.values()) reaction_data_list = list(correlated_reaction_data.values()) # Compute total variance total_variance = 0.0 for data in thermo_data_list: total_variance += (data.data[-1] * data.uncertainty)**2 for data in reaction_data_list: total_variance += (data.data[-1] * data.uncertainty)**2 if not correlated: # Add the reaction index to the data label of the reaction uncertainties # data.index stores the physical index of the reaction + 1, so we convert it to the RMG index here for data in reaction_data_list: data.label = 'k' + str(self.reaction_list[ data.index - 1].index) + ': ' + data.label.split()[-1] if correlated: folder = os.path.join(self.output_directory, 'correlated') else: folder = os.path.join(self.output_directory, 'uncorrelated') if not os.path.exists(folder): try: os.makedirs(folder) except OSError as e: raise OSError( 'Uncertainty output directory could not be created: {0!s}' .format(e)) r_path = os.path.join( folder, 'kineticsLocalUncertainty_{0}'.format( sens_species.to_chemkin()) + fileformat) t_path = os.path.join( folder, 'thermoLocalUncertainty_{0}'.format( sens_species.to_chemkin()) + fileformat) reaction_uncertainty = ReactionSensitivityPlot( x_var=time, y_var=reaction_data_list, num_reactions=number).uncertainty_plot(total_variance, filename=r_path) thermo_uncertainty = ThermoSensitivityPlot( x_var=time, y_var=thermo_data_list, num_species=number).uncertainty_plot(total_variance, filename=t_path) output[sens_species] = (total_variance, reaction_uncertainty, thermo_uncertainty) return output
def simulate(self): """ Run all the conditions as a cantera simulation. Returns the data as a list of tuples containing: (time, [list of temperature, pressure, and species data]) for each reactor condition """ # Get all the cantera names for the species speciesNamesList = [ getSpeciesIdentifier(species) for species in self.speciesList ] allData = [] for condition in self.conditions: # First translate the molFrac from species objects to species names newMolFrac = {} for key, value in condition.molFrac.iteritems(): newkey = getSpeciesIdentifier(key) newMolFrac[newkey] = value # Set Cantera simulation conditions if condition.V0 is None: self.model.TPX = condition.T0.value_si, condition.P0.value_si, newMolFrac elif condition.P0 is None: self.model.TDX = condition.T0.value_si, 1.0 / condition.V0.value_si, newMolFrac else: raise Exception( "Cantera conditions in which T0 and P0 or T0 and V0 are not the specified state variables are not yet implemented." ) # Choose reactor if condition.reactorType == 'IdealGasReactor': canteraReactor = ct.IdealGasReactor(self.model) elif condition.reactorType == 'IdealGasConstPressureReactor': canteraReactor = ct.IdealConstPressureGasReactor(self.model) else: raise Exception( 'Other types of reactor conditions are currently not supported' ) # Run this individual condition as a simulation canteraSimulation = ct.ReactorNet([canteraReactor]) # Initialize the variables to be saved times = [] temperature = [] pressure = [] speciesData = [] # Begin integration time = 0.0 # Run the simulation over 100 time points while canteraSimulation.time < condition.reactionTime.value_si: # Advance the state of the reactor network in time from the current time to time t [s], taking as many integrator timesteps as necessary. canteraSimulation.step(condition.reactionTime.value_si) times.append(canteraSimulation.time) temperature.append(canteraReactor.T) pressure.append(canteraReactor.thermo.P) speciesData.append(canteraReactor.thermo[speciesNamesList].X) # Convert speciesData to a numpy array speciesData = np.array(speciesData) # Resave data into generic data objects time = GenericData(label='Time', data=times, units='s') temperature = GenericData(label='Temperature', data=temperature, units='K') pressure = GenericData(label='Pressure', data=pressure, units='Pa') conditionData = [] conditionData.append(temperature) conditionData.append(pressure) for index, species in enumerate(self.speciesList): # Create generic data object that saves the species object into the species object. To allow easier manipulate later. speciesGenericData = GenericData(label=speciesNamesList[index], species=species, data=speciesData[:, index], index=species.index) conditionData.append(speciesGenericData) allData.append((time, conditionData)) return allData
def simulate(self): """ Simulate the mechanism and store all results to the all_data attribute. """ if self.sensitive_species: self.logger.info('Running a simulation with SA using CanteraConstantTP...') else: self.logger.info('Running a simulation using CanteraConstantTP...') species_names_list = [species.name for species in self.model.species()] self.all_data = list() for condition in self.conditions: # Set Cantera simulation conditions T0 = condition.T0.value_si try: V0 = self.conditions[0].V0.value_si P0 = None except AttributeError as e: P0 = condition.P0.value_si V0 = None self.reinitialize_simulation(T0=T0, P0=P0, X0=condition.mol_frac, V0=V0, ) # Initialize the variables to be saved times = [] temperature = [] pressure = [] species_data = [] kinetic_sensitivity_data = [] thermo_sensitivity_data = [] # Begin integration while self.cantera_simulation.time < condition.reaction_time.value_si: # Advance the state of the reactor network in time from the current time to time t [s], taking as many integrator timesteps as necessary. self.cantera_simulation.step() times.append(self.cantera_simulation.time) temperature.append(self.cantera_reactor.T) pressure.append(self.cantera_reactor.thermo.P) species_data.append(self.cantera_reactor.thermo[species_names_list].X) if self.sensitive_species: # Cantera returns mass-based sensitivities rather than molar concentration or mole fraction based sensitivities. # The equation for converting between them is: # # d ln xi = d ln wi - sum_(species i) (dln wi) (xi) # # where xi is the mole fraction of species i and wi is the mass fraction of species i mass_frac_sensitivity_array = self.cantera_simulation.sensitivities() if condition.reactor_type == 'IdealGasReactor': # Row 0: mass, Row 1: volume, Row 2: internal energy or temperature, Row 3+: mass fractions of species mass_frac_sensitivity_array = mass_frac_sensitivity_array[3:, :] elif condition.reactor_type == 'IdealGasConstPressureReactor' or condition.reactor_type == 'IdealGasConstPressureTemperatureReactor': # Row 0: mass, Row 1: enthalpy or temperature, Row 2+: mass fractions of the species mass_frac_sensitivity_array = mass_frac_sensitivity_array[2:, :] else: raise Exception('Other types of reactor conditions are currently not supported') for i in range(len(mass_frac_sensitivity_array)): mass_frac_sensitivity_array[i] *= species_data[-1][i] # extract kinetics SA kinetics_mass_frac_sa = mass_frac_sensitivity_array[:, 0:self.num_ct_reactions] sensitivity_array = np.zeros(len(self.sensitive_species) * len(self.model.reactions())) for index, species in enumerate(self.sensitive_species): for j in range(self.num_ct_reactions): sensitivity_array[self.num_ct_reactions * index + j] = self.cantera_simulation.sensitivity( species, j) for i in range(len(kinetics_mass_frac_sa)): if i not in self.inert_index_list: # massFracSensitivity for inerts are returned as 0.0 in Cantera, so we do not include them here sensitivity_array[self.num_ct_reactions * index + j] -= kinetics_mass_frac_sa[i][j] kinetic_sensitivity_data.append(sensitivity_array) # extract thermo SA thermo_mass_frac_sa = mass_frac_sensitivity_array[:, self.num_ct_reactions:] sensitivity_array = np.zeros(len(self.sensitive_species) * self.num_ct_species) for index, species in enumerate(self.sensitive_species): for j in range(self.num_ct_species): sensitivity_array[self.num_ct_species * index + j] = self.cantera_simulation.sensitivity( species, j + self.num_ct_reactions) for i in range(len(mass_frac_sensitivity_array)): if i not in self.inert_index_list: # massFracSensitivity for inerts are returned as 0.0 in Cantera, so we must not include them here sensitivity_array[self.num_ct_species * index + j] -= thermo_mass_frac_sa[i][j] thermo_sensitivity_data.append(sensitivity_array) # Convert species_data and sensitivity data to numpy arrays species_data = np.array(species_data) kinetic_sensitivity_data = np.array(kinetic_sensitivity_data) thermo_sensitivity_data = np.array(thermo_sensitivity_data) # Resave data into generic data objects time = GenericData(label='Time', data=times, units='s') temperature = GenericData(label='Temperature', data=temperature, units='K') pressure = GenericData(label='Pressure', data=pressure, units='Pa') condition_data = [] condition_data.append(temperature) condition_data.append(pressure) for index, species in enumerate(self.model.species()): # Create generic data object that saves the species object into the species object. To allow easier manipulate later. species_generic_data = GenericData(label=species.name, species=species, data=species_data[:, index], index=index ) condition_data.append(species_generic_data) # save kinetic data as generic data object reaction_sensitivity_data = [] for index, species in enumerate(self.sensitive_species): for j in range(self.num_ct_reactions): reaction_sensitivity_generic_data = GenericData( label='dln[{0}]/dln[k{1}]: {2}'.format(species, j + 1, self.model.reactions()[j]), species=species, reaction=self.model.reactions()[j], data=kinetic_sensitivity_data[:, self.num_ct_reactions * index + j], index=j + 1, ) reaction_sensitivity_data.append(reaction_sensitivity_generic_data) # save thermo data as generic data object thermodynamic_sensitivity_data = [] for index, species in enumerate(self.sensitive_species): for j in range(self.num_ct_species): thermo_sensitivity_generic_data = GenericData( label='dln[{0}]/dH[{1}]'.format(species, self.model.species()[j].name), species=species, data=thermo_sensitivity_data[:, self.num_ct_species * index + j], index=j + 1, ) thermodynamic_sensitivity_data.append(thermo_sensitivity_generic_data) self.all_data.append((time, condition_data, reaction_sensitivity_data, thermodynamic_sensitivity_data))
# this is a corrector that reduced dt if oxygen concentration varried # too much between resets if (abs((r.thermo[O2_name].X - O2_0) / O2_0) > 0.1): dt = 0.1 * dt # here we can increase dt of O2 content is relatively constant # but only do it at same modulo of output so we are sure that it # occurs on an mod=0 timestep so we don't loose output if ((abs((r.thermo[O2_name].X - O2_0) / O2_0) < 0.01) and dt < d_out and np.mod(np.around(time, decimals=5), np.around(d_out, decimals=5)) == 0): dt = 10 * dt # Convert data out_species_data = np.array(out_species_data) time = GenericData(label='Time', data=out_times, units='s') out_temperature = GenericData(label='Temperature', data=out_temperature, units='K') out_density = GenericData(label='Density', data=out_density, units='kg/m3') TDX_data = [out_temperature, out_density] for i, species_name in enumerate(species_names): match = re.search('\([0-9]*\)', species_name) if match: index = int(match.group()[1:-1]) else: index = -1 # don't plot these species (inerts and non-reactive solvent) species_generic_data = GenericData( label=out_dat_SMILES[i],