Example #1
0
def write_xml(family_names = None):
	"""Writes the library to xml files
	"""
	import os
	import xml.dom.minidom
	
	# Create document
	dom = xml.dom.minidom.Document()

	# Process root element
	root = dom.createElement('rmgoutput')
	dom.appendChild(root)
	
	
	if not family_names: 
		family_names = reaction.kineticsDatabase.families.keys()
		
	for family_name in family_names:
		family = reaction.kineticsDatabase.families[family_name]
		print 
		if not family.library:
			logging.debug("Family '%s' has no data in the library."%family_name)
			if family.reverse.library:
				logging.debug("(but its reverse '%s' does)"%family.reverse.label)
			continue
		
		logging.info("Writing xml for reaction family: %s (%s)"%(family_name,
			os.path.basename(os.path.abspath(family._path))) )
			
		family.library.toXML(dom,root)
		print dom.toprettyxml()
Example #2
0
def loadFrequencyDatabase(frequenciesDatabasePath):
	"""
	Create and load the frequencies databases from the given path. 
	
	The path should be the folder containing Dictionary.txt, Tree.txt, Library.txt
	"""
	import os.path
	
	# Create and load thermo databases
	database = FrequencyDatabase()
	logging.debug('\tFrequencies database from '+frequenciesDatabasePath)
	database.load(
		dictstr=os.path.join(frequenciesDatabasePath, 'Dictionary.txt'),
		treestr=os.path.join(frequenciesDatabasePath, 'Tree.txt'),
		libstr=os.path.join(frequenciesDatabasePath, 'Library.txt'))

	return database
Example #3
0
def loadFrequencyDatabase(databasePath):
	"""
	Create and load the frequencies databases.
	"""
	import os.path
	
	databasePath = os.path.join(databasePath, 'frequencies')

	# Create and load thermo databases
	database = FrequencyDatabase()
	logging.debug('\tFrequencies database')
	database.load(
		dictstr=os.path.join(databasePath, 'Dictionary.txt'),
		treestr=os.path.join(databasePath, 'Tree.txt'),
		libstr=os.path.join(databasePath, 'Library.txt'))

	return database
Example #4
0
				reactionModel,
				reactionSystems),
				f)
			f.close()

			# Update RMG execution statistics
			logging.info('Updating RMG execution statistics...')
			coreSpeciesCount.append(len(reactionModel.core.species))
			coreReactionCount.append(len(reactionModel.core.reactions))
			edgeSpeciesCount.append(len(reactionModel.edge.species))
			edgeReactionCount.append(len(reactionModel.edge.reactions))
			execTime.append(time.time() - settings.initializationTime)
			from guppy import hpy
			hp = hpy()
			memoryUse.append(hp.heap().size / 1.0e6)
			logging.debug('Execution time: %s s' % (execTime[-1]))
			logging.debug('Memory used: %s MB' % (memoryUse[-1]))
			restartSize.append(os.path.getsize(os.path.join(settings.outputDirectory,'restart.pkl')) / 1.0e6)
			saveExecutionStatistics(execTime, coreSpeciesCount, coreReactionCount, edgeSpeciesCount, edgeReactionCount, memoryUse, restartSize)
			generateExecutionPlots(execTime, coreSpeciesCount, coreReactionCount, edgeSpeciesCount, edgeReactionCount, memoryUse, restartSize)

		logging.info('')
		
		# Consider stopping gracefully if the next iteration might take us
		# past the wall time
		if settings.wallTime > 0 and len(execTime) > 1:
			t = execTime[-1]
			dt = execTime[-1] - execTime[-2]
			if t + 2 * dt > settings.wallTime:
				logging.info('MODEL GENERATION TERMINATED')
				logging.info('')
Example #5
0
def fit_groups(family_names = None):
	"""Decouples a nested tree and fits values to groups for each seperate tree.
	   If given a list of family names, only does those families.
	"""
	import os
	import math
	import numpy
	import numpy.linalg
	import pylab
	
	if not family_names: 
		family_names = reaction.kineticsDatabase.families.keys()
		
	for family_name in family_names:
		family = reaction.kineticsDatabase.families[family_name]
		print 
		if not family.library:
			logging.debug("Family '%s' has no data in the library."%family_name)
			if family.reverse.library:
				logging.debug("(but its reverse '%s' does)"%family.reverse.label)
			continue
		
		logging.info("Fitting groups for reaction family: %s (%s)"%(family_name,
			os.path.basename(os.path.abspath(family._path))) )
		
		# Get set of all nodes
		node_set = family.tree.parent.keys()
		non_top_nodes = [ node for node in node_set if family.tree.parent[node] ]
		top_nodes = [ node for node in node_set if not family.tree.parent[node] ]
		group_names = [node for node in non_top_nodes] # poor man's copy
		group_names.append("Constant")
		family.tree.children['Constant']=[]
		
		#: a dictionary of lists. key = node, value = list of kinetics items which contributed to that node
		kinetics_used_in={'Constant':[]}
		for node in node_set: # must initialise in loop so each has a separate list instance!
			kinetics_used_in[node] = list() 
		Ts = [300, 500, 1000, 1500]
		
		def rates_string(k):
			"""Return a string representing the rates of :class:`kinetics` object k
			
			log10 of the k at a bunch of T values"""
			string = "%5.2f "*len(Ts)
			return string%tuple([ math.log10(k.getRateConstant(T,Hrxn)) for T in Ts ])

		
		A_list = []
		b_list = []
		# Get available data
		
		to_delete=[]
		for key, kinetics in family.library.iteritems():
			if kinetics.alpha:
				logging.warning("Warning: %s %s has EP alpha = %g"%(kinetics.index, kinetics.label, kinetics.alpha))
				to_delete.append(key)
				
			#if re.search('O2b',kinetics.label): 
			#	logging.warning("Removing %s %s because I don't like O2b"%(kinetics.index, kinetics.label))
			#	to_delete.append(key)
			#	
		for key in to_delete:
			del family.library[key]
			logging.warning("Deleting %s from kinetics library!"%key)
				
				
		for key, kinetics in family.library.iteritems():
			nodes = key.split(';')
			# example:
			#  nodes = ['A11', 'B11']
			#  kinetics = <rmg.reaction.ArrheniusEPKinetics instance>
			#b_row = [ math.log(kinetics.A),
			#		  kinetics.n,
			#		  kinetics.alpha,
			#		  kinetics.E0 ]
			if kinetics.alpha:
				logging.warning("Warning: %s has EP alpha = %g"%(nodes,kinetics.alpha))
			
			Hrxn=0
			
			b_row = [ math.log10(kinetics.getRateConstant(T,Hrxn)) for T in Ts ]
				
			all_ancestors=list()
			kinetics.used_in_groups = list()
			kinetics.used_in_combinations = list()
			for node in nodes:
				# start with the most specific - the node itself
				# then add the ancestors
				ancestors = [node]
				ancestors.extend( family.tree.ancestors(node) )
				# append to the list of lists
				all_ancestors.append(ancestors)
				# add to the list 
				kinetics.used_in_groups.extend(ancestors)
				
				for ancestor in ancestors:
					kinetics_used_in[ancestor].append(kinetics)
			kinetics_used_in['Constant'].append(kinetics)
			
			# example
			#  all_ancestors = [['A11','A1','A'], ['B11','B1','B']]
			#  kinetics.used_in_groups = [ 'A11','A1','A','B11','B1','B' ]
			#  kinetics_used_in['A11'] = kinetics_used_in['A1'] ... = [... <kinetics>]
			
			all_combinations = data.getAllCombinations(all_ancestors)
			
			# example:
			#  all_combinations = 
			#  [['A11', 'B11'], ['A1', 'B11'], ['A', 'B11'],  ['A11', 'B1'],
			#   ['A1', 'B1'], ['A', 'B1'],  ['A11', 'B'], ['A1', 'B'], ['A', 'B']]
			
			for combination in all_combinations:
				# Create a row of the A matrix. Each column is for a non_top_node
				# It contains 1 if that node exists in combination, else 0
				A_row = [int(node in combination) for node in non_top_nodes]
				# Add on a column at the end for constant C which is always there
				A_row.append(1)
				
				kinetics.used_in_combinations.append(len(A_list))
				A_list.append(A_row)
				b_list.append(b_row)
				
		A = numpy.array(A_list)
		b = numpy.array(b_list)
		
		logging.info("Library contained %d rates"%len(family.library))
		logging.info("Matrix for inversion is %d x %d"%A.shape)
		
		x, residues, rank, s = numpy.linalg.lstsq(A,b)
		
		fitted_b = numpy.dot(A,x)
		errors = fitted_b - b
		#: squared and SUMMED over temperatures, not averaged
		errors_sum_squared = numpy.sum(errors*errors, axis=1)
		
		group_values=dict()
		group_error=dict()
		group_count=dict()
		group_error_MAD_by_T=dict()
		
		for node in top_nodes:
			group_values[node] = tuple([0 for i in Ts]) # eg. (0 0 0 0 0)
			group_error[node] = 0
			group_count[node] = 0
			group_error_MAD_by_T[node] = tuple([0 for i in Ts]) # eg. (0 0 0 0 0)
			
		for i in range(len(x)):
			group_values[group_names[i]] = tuple(x[i,:])
			
		for i in range(len(x)): # for each group
			#: vector of 1s and 0s, one for each rate-group
			rates_in_group = A[:,i]  
			#: number of data points training this group (each measured rate may be counted many times)
			group_count[group_names[i]] = sum(rates_in_group)
			#: RMS error for this group (where M = mean over temperatures and rates training the group) 
			group_error[group_names[i]] = numpy.sqrt(
				sum(rates_in_group * errors_sum_squared)  /
					 sum(rates_in_group) / len(Ts)   )
			#: Mean Absolute Deviation, reported by Temperature (as tuple)
			group_error_MAD_by_T[group_names[i]] = tuple( 
				numpy.dot(rates_in_group, abs(errors)) /
				 sum(rates_in_group)  )
				
		for key, kinetics in family.library.iteritems():
			rows = kinetics.used_in_combinations
			#: RMS error for this rate (where M = mean over temperatures and group combinations it's estimated by)
			kinetics.RMS_error = numpy.sqrt( 
				sum([errors_sum_squared[i] for i in rows])
				 / len(rows) / len(Ts) 
				)
			kinetics.key = key
		rates = family.library.values()
		rates.sort(cmp=lambda x,y: cmp(x.RMS_error, y.RMS_error))
		print "Rate expressions sorted by how well they are predicted by their group combinations"
		
		rates_1000 = []
		rates_err = []
		for k in rates:
			print "%-5s %-30s\tRMS error: %.2f  Rates: %s  %.30s"%(k.index, k.key, k.RMS_error, rates_string(k), k.comment )
			rates_1000.append( math.log10(k.getRateConstant(1000,Hrxn)) )
			rates_err.append( k.RMS_error )  # [Ts.index(T)]
		rates_1000 = numpy.array(rates_1000)
		rates_err = numpy.array(rates_err)
		
		fig_number = family_names.index(family_name)
		fig1 = pylab.figure( fig_number )
		pylab.plot(rates_1000, rates_err, 'o')
		pylab.xlabel('log10(k) at 1000K')
		pylab.ylabel('RMSE')
		pylab.show()
		
		def print_node_tree(node,indent=0):
			print (' '*indent +
					node.ljust(17-indent) + 
					("\t%7.2g"*len(group_values[node])) % group_values[node]  +
					"\t%6.2g\t%d"%(group_error[node],group_count[node]) + 
					("\t%7.3g"*len(group_error_MAD_by_T[node])) % group_error_MAD_by_T[node] 
				)
			children = family.tree.children[node]
			if children:
				children.sort()
				for child in children:
					# recurse!
					print_node_tree(child,indent+1)
					
		print ("Log10(k) at T=   " + ("\t%7g"*len(Ts)) % tuple(Ts) + 
				'\t RMS\tcount' + 
				("\tMAD @ %d"*len(Ts)) % tuple(Ts) 
			)
			
		print_node_tree('Constant')
		for node in top_nodes:
			print_node_tree(node)
		print
		
		
		fig = pylab.figure( 100 + fig_number )
		
		xvals = numpy.array([ group_count[group] for group in group_names ])
		yvals = numpy.array([ group_error[group] for group in group_names ])
		pylab.semilogx(xvals,yvals,'o',picker=5) # 5 points tolerance
		pylab.title(family_name)
		
		def onpick(event):
			thisline = event.artist
			xdata = thisline.get_xdata()
			ydata = thisline.get_ydata()
			for ind in event.ind:
				group_name = group_names[ind]
				print "#%d Name: %s \tRates:%d \tNode-Rates:%d \tRMS error: %g"%(ind, group_name, len(kinetics_used_in[group_name]) , xvals[ind], yvals[ind])
				print "MAD errors:"+("  %.2f"*len(Ts))%group_error_MAD_by_T[group_name]
				print "Kinetics taken from:"
				rates = kinetics_used_in[group_name]
				rates.sort(cmp=lambda x,y: cmp(x.RMS_error, y.RMS_error))
				for k in rates:
					print "%s\tIndex:%s \t%s "%(k.key,k.index,repr(k))
					print "RMS error: %.2f"%(k.RMS_error), 
					print "Rates: ",rates_string(k)
					for combo in k.used_in_combinations:
						#print "A[%d,%d] ="%(combo,ind),A[combo,ind]
						if not A[combo,ind]:
							#print "Rate didn't use the node in question (presumably used an ancestor)"
							continue
						print "Using",
						used_nodes = [ group_names[i] for i in A[combo,:].nonzero()[0] ]
						used_nodes.remove(group_name)
						print group_name + ' with ' + ' + '.join(used_nodes) + '\t',
						rms = numpy.sqrt( errors_sum_squared[combo] / len(Ts) )
						print "RMSE: %.2f  Err(T):"%(rms), errors[combo]
					print 
				#print 'check %g:'%ind, zip(xdata[ind], ydata[ind])
				
		connection_id = fig.canvas.mpl_connect('pick_event', onpick)
		# disconnect with: fig.canvas.mpl_disconnect(connection_id) 
		pylab.show()
		#http://matplotlib.sourceforge.net/users/event_handling.html
		
		import pdb; pdb.set_trace()
Example #6
0
	def applyApproximateMethod(self, T, P, Elist, method, errorCheck=True):
		"""
		Apply the approximate method specified in `method` to estimate the
		phenomenological rate coefficients for the network. This function
		expects that all preparations have already been made, as in the
		:meth:`calculateRateCoefficients` method.
		"""

		logging.debug('Applying %s method at %g K, %g bar...' % (method, T, P*1e-5))

		# Matrix and vector size indicators
		nIsom = self.numUniIsomers()
		nProd = self.numMultiIsomers()
		nGrains = len(Elist)

		dE = Elist[1] - Elist[0]

		# Density of states per partition function (i.e. normalized density of
		# states with respect to Boltzmann weighting factor) for each isomer
		densStates = numpy.zeros([nIsom,nGrains], numpy.float64)
		for i in range(nIsom): densStates[i,:] = self.isomers[i].densStates * dE / self.isomers[i].Q
		
		# If there are no product channels, we must temporarily create a fake
		# one; this is because f2py can't handle matrices with a dimension of zero
		if nProd == 0: nProd = 1

		# Active-state energy of each isomer
		Eres = numpy.zeros([nIsom+nProd], numpy.float64)
		for i, isomer in enumerate(self.isomers):
			Eres[i] = isomer.getActiveSpaceEnergy(self.pathReactions)
		
		# Isomerization, dissociation, and association microcanonical rate
		# coefficients, respectively
		Kij = numpy.zeros([nIsom,nIsom,nGrains], numpy.float64)
		Gnj = numpy.zeros([nProd,nIsom,nGrains], numpy.float64)
		Fim = numpy.zeros([nIsom,nProd,nGrains], numpy.float64)
		for reaction in self.pathReactions:
			i = self.indexOf(reaction.reactant)
			j = self.indexOf(reaction.product)
			if reaction.isIsomerization():
				Kij[j,i,:] = reaction.kf
				Kij[i,j,:] = reaction.kb
			elif reaction.isDissociation():
				Gnj[j-nIsom,i,:] = reaction.kf
				Fim[i,j-nIsom,:] = reaction.kb
			elif reaction.isAssociation():
				Fim[j,i-nIsom,:] = reaction.kf
				Gnj[i-nIsom,j,:] = reaction.kb

		if method.lower() == 'modifiedstrongcollision':

			# Modified collision frequency of each isomer
			collFreq = numpy.zeros([nIsom], numpy.float64)
			for i in range(nIsom): collFreq[i] = self.isomers[i].collFreq * \
				self.isomers[i].calculateCollisionEfficiency(T, self.pathReactions, self.bathGas.expDownParam, Elist)

			# Apply modified strong collision method
			import msc
			K, msg = msc.estimateratecoefficients_msc(T, P, Elist, collFreq, densStates, Eres,
				Kij, Fim, Gnj, nIsom, nProd, nGrains)
			msg = msg.strip()
			if msg != '':
				raise UnirxnNetworkException('Unable to apply modified strong collision method: %s' % msg)

		elif method.lower() == 'reservoirstate' or method.lower() == 'chemicaleigenvalues':

			# Average energy transferred in a deactivating collision
			dEdown = self.bathGas.expDownParam

			# Ground-state energy for each isomer
			E0 = numpy.zeros([nIsom], numpy.float64)
			for i in range(nIsom): E0[i] = self.isomers[i].E0

			# The full collision matrix for each isomer
			import mastereqn
			Mcoll = numpy.zeros([nIsom,nGrains,nGrains], numpy.float64)
			for i in range(nIsom):
				collFreq = self.isomers[i].collFreq
				densStates0 = self.isomers[i].densStates
				Mcoll[i,:,:], msg = mastereqn.collisionmatrix(T, P, Elist, collFreq, densStates0, E0[i], dEdown)
				msg = msg.strip()
				if msg != '':
					raise UnirxnNetworkException('Unable to determine collision matrix for isomer %i: %s' % (i, msg))

			if method.lower() == 'reservoirstate':

				# Apply reservoir state method
				import rs
				K, msg = rs.estimateratecoefficients_rs(T, P, Elist, Mcoll, densStates, E0, Eres,
					Kij, Fim, Gnj, dEdown, nIsom, nProd, nGrains)
				msg = msg.strip()
			
			elif method.lower() == 'chemicaleigenvalues':

				# Ground-state energy for each isomer
				E0 = numpy.zeros([nIsom+nProd], numpy.float64)
				for i in range(nIsom+nProd): E0[i] = self.isomers[i].E0

				# Use free energy to determine equilibrium ratios of each isomer and product channel
				eqRatios = numpy.zeros(nIsom+nProd, numpy.float64)
				for i, isom in enumerate(self.isomers):
					G = sum([spec.getFreeEnergy(T) for spec in isom.species])
					eqRatios[i] = math.exp(-G / constants.R / T)
				eqRatios /= numpy.sum(eqRatios)
				
				# Apply chemically-significant eigenvalue method
				import cse
				K, msg = cse.estimateratecoefficients_cse(T, P, Elist, Mcoll, E0,
					densStates, eqRatios, Kij, Fim, Gnj, nIsom, nProd, nGrains)
				msg = msg.strip()

				#print K
				#quit()

		if errorCheck:
			if msg == '':
				if not numpy.isfinite(K).all():
					print K
					msg = 'Non-finite rate constant returned at %s K, %s Pa.' % (T, P)

			if msg != '':
				raise UnirxnNetworkException('Unable to apply method %s: %s' % (method, msg))

		# If we had to create a temporary (fake) product channel, then don't
		# return the last row and column of the rate coefficient matrix
		if self.numMultiIsomers() == 0:
			return K[:-1,:-1]
		else:
			return K
Example #7
0
	def simulate(self, model):
		"""
		Conduct a simulation of the current reaction system using the core-edge
		reaction model `model`.

		Edge species fluxes are tracked, relative to the characteristic core
		flux at that time, throughout the simulation.
		If one exceeds `model.fluxToleranceInterrupt` the simulation
		is interrupted, and that species is returned.
		The highest relative flux reached by each species during the simulation
		is stored for later analysis.
		If one or more of these exceed `model.fluxToleranceMoveToCore` then the
		species with the highest will be returned.

		If the simulation completes without interruption, then any that fall
		below `model.fluxToleranceKeepInEdge` will be removed from the
		edge, along with the reactions that involve them.

		Returns:
		(tlist, ylist, dydtlist, valid?, Edge_species_with_highest_flux)
		"""

		# try writing cantera file
		sim,gas = self.runCantera(model)

		# Assemble stoichiometry matrix for all core and edge species
		# Rows are species (core, then edge); columns are reactions (core, then edge)
		stoichiometry = model.getStoichiometryMatrix()

		tlist = []; ylist = []; dydtlist = []
		maxRelativeSpeciesFluxes = numpy.zeros(len(model.core.species) + len(model.edge.species), float)

		maxRelativeNetworkLeakFluxes = numpy.zeros(len(model.unirxnNetworks), float)

		endtime = 10.0 # default. check for user value:
		for target in model.termination:
			if target.__class__ == modelmodule.TerminationTime:
				endtime = target.time

		# Set up initial conditions
		P = gas.pressure()
		V = sim.reactors()[0].volume()
		T = gas.temperature()
		# Note that, for molar density, Cantera thinks in kmol/m^3, while
		# RMG thinks in mol/m^3
		Ni = gas.molarDensity()*1000.0 * gas.moleFractions() * V
		y = [P, V, T]; y.extend(Ni)
		Ni0 = Ni
		y0 = y

#		# Output information about simulation at current time
		header = 'Time          '
		for target in model.termination:
			if target.__class__ == modelmodule.TerminationConversion: header += 'Conv        '
		header += 'Char. flux     Max. rel. flux to edge'
		logging.debug(header)
#		self.printSimulationStatus(model, 0, y, y0, criticalFlux, maxSpeciesFlux, maxSpecies)
#		tlist.append(0.0); ylist.append(y0)
#		dydtlist.append(self.getResidual(0.0, y0, model, stoichiometry))

		done = False
		time = 0.0
		while not done:

			# Conduct integration
			# Uses a fixed (on a log scale) time step
			nexttime = min(endtime,time*1.2589254117941673)
			# advance cantera one step
			if sim.step(endtime) < endtime:
				# didn't get to endtime, so take another step
				if sim.step(endtime) < nexttime:
					# still didn't get to endtime, so advance to nextime
					sim.advance(nexttime)
#			# Uses the same time steps that the Cantera solver used
#			sim.step(endtime)

			# Get state at current time
			time = sim.time()
			P = gas.pressure()
			V = sim.reactors()[0].volume()
			T = gas.temperature()
			# Note that, for molar density, Cantera thinks in kmol/m^3, while
			# RMG thinks in mol/m^3
			Ni = gas.molarDensity()*1000.0 * gas.moleFractions() * V
			y = [P, V, T]; y.extend(Ni)
			
			# Calculate species fluxes of all core and edge species at the
			# current time
			dNidt = self.getSpeciesFluxes(model, P, V, T, Ni, stoichiometry)
			
			# Determine characteristic species flux
			charFlux = math.sqrt(sum([x*x for x in dNidt[0:len(model.core.species)]]))

			# Store the highest relative flux for each species
			for i in range(len(dNidt)):
				if maxRelativeSpeciesFluxes[i] < abs(dNidt[i])/charFlux:
					maxRelativeSpeciesFluxes[i] = abs(dNidt[i])/charFlux

			# Test for model validity
			criticalFlux = charFlux * model.fluxToleranceInterrupt
			edgeValid, maxSpecies, maxSpeciesFlux = self.isModelValid(model, dNidt, criticalFlux)

			# Test leak fluxes of unimolecular networks
			if settings.unimolecularReactionNetworks:
				maxNetwork = None; maxNetworkFlux = 0.0
				# Get current leak fluxes of all unimolecular reaction networks
				networkLeakFluxes = self.getNetworkLeakFluxes(model, P, V, T, Ni, criticalFlux)
				for i in range(len(networkLeakFluxes)):
					if maxRelativeNetworkLeakFluxes[i] < abs(networkLeakFluxes[i]) / criticalFlux:
						maxRelativeNetworkLeakFluxes[i] = abs(networkLeakFluxes[i]) / criticalFlux
					if networkLeakFluxes[i] > maxNetworkFlux or maxNetwork is None:
						maxNetwork = model.unirxnNetworks[i]
						maxNetworkFlux = networkLeakFluxes[i]
				networksValid = (maxNetworkFlux <= criticalFlux)

			else:
				networksValid = True
				maxNetwork = None
				maxNetworkFlux = 0.0

			# Output information about simulation at current time
			self.printSimulationStatus(model, time, y, y0, charFlux, maxSpeciesFlux/charFlux, maxSpecies)
			tlist.append(time); ylist.append(y)
			#dydtlist.append(self.getResidual(time, solver.y, model, stoichiometry))

			# Exit simulation if model is not valid (exceeds interruption criterion)
			if not edgeValid or not networksValid:
				print gas
				logging.info('')
				# Choose the item with the maximum flux and act on it
				if maxSpeciesFlux >= maxNetworkFlux:
					logging.info('At t = %s, an edge species flux exceeds the critical flux for simulation interruption' % (time))
					logging.info('\tCharacteristic flux: %s' % (charFlux))
					logging.info('\tCritical flux: %s (%s times charFlux)' % (criticalFlux, model.fluxToleranceInterrupt))
					logging.info('\tSpecies flux for %s: %s (%.2g times charFlux)' % (maxSpecies, maxSpeciesFlux, maxSpeciesFlux/charFlux))
					return tlist, ylist, dydtlist, False, maxSpecies
				else:
					logging.info('At t = %s, a network leak flux exceeds the critical flux for simulation interruption' % (time))
					logging.info('\tCharacteristic flux: %s' % (charFlux))
					logging.info('\tCritical flux: %s (%s times charFlux)' % (criticalFlux, model.fluxToleranceInterrupt))
					logging.info('\tNetwork leak flux for %s: %s (%.2g times charFlux)' % (maxNetwork, maxNetworkFlux, maxNetworkFlux/charFlux))
					return tlist, ylist, dydtlist, False, maxNetwork

			# Test for simulation completion
			for target in model.termination:
				if target.__class__ == modelmodule.TerminationConversion:
					index = model.core.species.index(target.species) + 3
					conversion = 1.0 - y[index] / y0[index]
					if conversion > target.conversion: done = True
				elif target.__class__ == modelmodule.TerminationTime:
					if time > target.time: done = True

		logging.info(str(gas))

		# Compare maximum species fluxes
		maxRelativeSpeciesFlux = 0.0; maxSpecies = None
		speciesToRemove = []; maxRelativeFluxes_dict = {}
		for i in range(len(model.core.species), len(maxRelativeSpeciesFluxes)):
			spec = model.edge.species[i - len(model.core.species)]
			# pick out the single highest-flux edge species
			if maxRelativeSpeciesFluxes[i] > maxRelativeSpeciesFlux:
				maxRelativeSpeciesFlux = maxRelativeSpeciesFluxes[i]
				maxSpecies = spec
			# mark for removal those species whose flux is always too low
			if maxRelativeSpeciesFluxes[i] < model.fluxToleranceKeepInEdge:
				speciesToRemove.append(spec)
			# put max relative flux in dictionary
			maxRelativeFluxes_dict[spec] = maxRelativeSpeciesFluxes[i]
		edgeValid = maxRelativeSpeciesFlux <= model.fluxToleranceMoveToCore

		# Compare maximum network leak fluxes
		maxRelativeNetworkLeakFlux = 0.0; maxNetwork = None
		if settings.unimolecularReactionNetworks:
			# Compare maximum species fluxes
			for i in range(len(model.unirxnNetworks)):
				# pick out the single highest-flux edge species
				if maxRelativeNetworkLeakFluxes[i] > maxRelativeNetworkLeakFlux or maxNetwork is None:
					maxRelativeNetworkLeakFlux = maxRelativeNetworkLeakFluxes[i]
					maxNetwork = model.unirxnNetworks[i]
			networksValid = maxRelativeNetworkLeakFlux < model.fluxToleranceMoveToCore
		else:
			networksValid = True

		def removalSortKey(sp):
			return maxRelativeFluxes_dict[sp]
		speciesToRemove.sort(key=removalSortKey)

		# If model is not valid at these criteria, then return
		if not edgeValid or not networksValid:

			# trim the edge according to fluxToleranceKeepInEdge
			logging.info("Removing from edge %d/%d species whose relative flux never exceeded %s"%(
				len(speciesToRemove),len(model.edge.species),model.fluxToleranceKeepInEdge ) )
			logging.info("Max. rel. flux.\tSpecies")
			for sp in speciesToRemove:
				logging.info("%-10.3g    \t%s"%(maxRelativeFluxes_dict[sp], sp))
				model.removeSpeciesFromEdge(sp)

			# trim the edge according to maximumEdgeSpecies
			if len(model.edge.species)> model.maximumEdgeSpecies:
				logging.info("Removing from edge %d/%d species to reach maximum edge size of %s species"%(
					len(model.edge.species)-model.maximumEdgeSpecies,
					len(model.edge.species),
					model.maximumEdgeSpecies ) )
				edgeSpeciesCopy = model.edge.species[:]
				edgeSpeciesCopy.sort(key=removalSortKey)
				logging.info("Max. rel. flux.\tSpecies")
				while len(model.edge.species)>model.maximumEdgeSpecies:
					sp = edgeSpeciesCopy.pop(0)
					logging.info("%-10.3g    \t%s"%(maxRelativeFluxes_dict[sp], sp))
					model.removeSpeciesFromEdge(sp)

			criticalFlux = charFlux * model.fluxToleranceMoveToCore
			print gas
			logging.info('')
			# Choose the item with the maximum flux and act on it
			if maxSpeciesFlux >= maxNetworkFlux:
				logging.info('At some time the species flux for %s exceeded the critical flux\nrelative to the characteristic core flux at that time' % (maxSpecies))
				logging.info('\tCharacteristic flux: %s' % (charFlux))
				logging.info('\tCritical flux: %s (%s times charFlux)' % (criticalFlux, model.fluxToleranceMoveToCore))
				logging.info('\tSpecies flux for %s: %s (%.2g times charFlux)' % (maxSpecies, maxSpeciesFlux, maxSpeciesFlux/charFlux))
				return tlist, ylist, dydtlist, False, maxSpecies
			else:
				logging.info('At some time the network leak flux for %s exceeded the critical flux\nrelative to the characteristic core flux at that time' % (maxNetwork))
				logging.info('\tCharacteristic flux: %s' % (charFlux))
				logging.info('\tCritical flux: %s (%s times charFlux)' % (criticalFlux, model.fluxToleranceMoveToCore))
				logging.info('\tNetwork leak flux for %s: %s (%.2g times charFlux)' % (maxNetwork, maxNetworkFlux, maxNetworkFlux/charFlux))
				return tlist, ylist, dydtlist, False, maxNetwork

		return tlist, ylist, dydtlist, True, None
Example #8
0
	def runCantera(self, model):
		"""
		Execute a simulation of the reaction system in Cantera. The procedure:
		(1) write a CTML (Cantera) file, (2) read it into Cantera, (3) create
		the reactor in Cantera, and (4) return the simulation results.
		"""

		# Create a folder in the scratch directory for Cantera files if needed
		cantera_folder = os.path.join(settings.scratchDirectory,'cantera')
		os.path.exists(cantera_folder) or os.mkdir(cantera_folder)
		
		# Write the CTML file to scratch/cantera/ folder
		cti_file = os.path.join(cantera_folder, 'cantera_input_%03d' % len(model.core.species))
		logging.debug("Writing CTML file %s" % cti_file)
		ctml_writer.dataset(cti_file) # change name
		ctml_writer.write()

		import Cantera
		import Cantera.Reactor

		# Load the CTML file into Cantera
		logging.info("Preparing Cantera simulation %d" % len(model.core.species))
		Cantera.reset()
		gas = Cantera.importPhase('%s.xml' % cti_file, 'chem', loglevel=1)

		# Set initial concentrations
		moleFractions = numpy.zeros(len(model.core.species))
		for spec, conc in self.initialMoleFraction.iteritems():
			moleFractions[gas.speciesIndex(str(spec))] = conc
		gas.setMoleFractions(moleFractions) # it normalises it to 1

		# Set initial temperature and pressure
		gas.set(T=self.initialTemperature, P=self.initialPressure)

		# create a batch reactor
		if self.heatTransferCoeff == 1.0e100:
			reactor = Cantera.Reactor.Reactor(gas, volume=self.volume, energy='off')
		else:
			reactor = Cantera.Reactor.Reactor(gas, volume=self.volume)

		# set the inital environment conditions
		gasAir = Cantera.Air()
		gasAir.set(T=self.reservoirTemperature, P=self.reservoirPressure)

		# create a reservoir for the environment
		environment = Cantera.Reactor.Reservoir(gasAir)

		# Define a wall between the reactor and the environment, and
		# make it flexible, so that the pressure in the reactor is held
		# at the environment pressure, and conductive so the temperature likewise
		wall = Cantera.Reactor.Wall(reactor, environment)
		wall.set(K=self.expansionCoeff)
		wall.set(A=self.area)
		wall.setHeatTransferCoeff(self.heatTransferCoeff) # W/m2/K

		# put reactor in a reactor network so it can be integrated
		sim = Cantera.Reactor.ReactorNet([reactor])
		sim.setTolerances(atol=model.absoluteTolerance, rtol=model.relativeTolerance)

		#import pdb; pdb.set_trace()
		return sim, gas