def fugacity_coefficient(self, P, rho, xi, T, dy=1e-5, log_method=True): """ Compute fugacity coefficient. Parameters ---------- P : float Pressure of the system [Pa] rho : float Molar density of system [:math:`mol/m^3`] T : float Temperature of the system [K] xi : list[float] Mole fraction of each component, sum(xi) should equal 1.0 log_method : bool, Optional, default=False Choose to use a log transform in central difference method. This allows easier calculations for very small numbers. Returns ------- fugacity_coefficient : numpy.ndarray Array of fugacity coefficient values for each component """ if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, doesn't match self.number_of_components, {}" .format(len(xi), self.number_of_components)) if gtb.isiterable(T): if len(T) == 1: T = T[0] else: raise ValueError("Temperature must be given as a scalar.") if gtb.isiterable(rho): if len(rho) == 1: rho = rho[0] else: raise ValueError("Density must be given as a scalar.") if gtb.isiterable(P): if len(P) == 1: P = P[0] else: raise ValueError("Pressure must be given as a scalar.") rho = self._check_density(rho) logZ = np.log(P / (rho * T * constants.R)) Ares = self.residual_helmholtz_energy(rho, T, xi) dAresdrho = tb.partial_density_central_difference( xi, rho, T, self.residual_helmholtz_energy, step_size=dy, log_method=True) phi = np.exp(Ares + rho * dAresdrho - logZ) return phi
def pressure(self, rho, T, xi): """ Compute pressure given system information Parameters ---------- rho : numpy.ndarray Number density of system [:math:`mol/m^3`] T : float Temperature of the system [K] xi : list[float] Mole fraction of each component Returns ------- P : numpy.ndarray Array of pressure values [Pa] associated with each density and so equal in length """ if T != self.T: self.T = T self._calc_temp_dependent_parameters(T) self._calc_mixed_parameters(xi, T) if not gtb.isiterable(rho): rho = np.array([rho]) elif not isinstance(rho, np.ndarray): rho = np.array(rho) P = constants.R * self.T * rho / ( 1 - self.eos_dict["bij"] * rho) - rho**2 * self.eos_dict["aij"] / ( (1 + self.eos_dict["bij"] * rho) + rho * self.eos_dict["bij"] * (1 - self.eos_dict["bij"] * rho)) return P
def reformat_output(cluster): r""" Takes a list of lists that contain thermo output of lists and floats and reformats it into a 2D numpy array. Parameters ---------- cluster : list[list[list/floats]] A list of lists, where the inner list is made up of lists and floats Returns ------- matrix : numpy.ndarray A 2D matrix len_cluster : list a list of lengths for each of the columns (whether 1 for float, or len(list)) """ # if input is a list or array if len(cluster) == 1: matrix = np.transpose(np.array(cluster[0])) if not gtb.isiterable(cluster[0]): len_cluster = [1] else: len_cluster = [len(cluster[0])] # If list of lists or arrays else: # Obtain dimensions of final matrix len_cluster = [] for i, tmp_cluster in enumerate(cluster): if gtb.isiterable(tmp_cluster[0]): len_cluster.append(len(tmp_cluster[0])) else: len_cluster.append(1) matrix_tmp = np.zeros([len(cluster[0]), sum(len_cluster)]) # Transfer information to final matrix ind = 0 for i, val in enumerate(cluster): try: matrix = np.zeros([len(val[0]), len(val)]) except Exception: matrix = np.zeros([1, len(val)]) for j, tmp in enumerate( val ): # yes, this is a simple transpose, but for some reason a numpy array of np arrays wouldn't transpose matrix[:, j] = tmp l = len_cluster[i] if l == 1: matrix_tmp[:, ind] = np.array(matrix) ind += 1 else: if len(matrix) == 1: matrix = matrix[0] for j in range(l): matrix_tmp[:, ind] = matrix[j] ind += 1 matrix = np.array(matrix_tmp) return matrix, len_cluster
def __init__(self, data_dict): super().__init__(data_dict) # If required items weren't defined, set defaults self.name = "saturation_properties" if self.thermodict["calculation_type"] == None: self.thermodict["calculation_type"] = "saturation_properties" tmp = { "min_density_fraction": (1.0 / 80000.0), "density_increment": 10.0, "max_volume_increment": 1.0e-4, } if "density_opts" in self.thermodict: tmp.update(self.thermodict["density_opts"]) self.thermodict["density_opts"] = tmp # Extract system data if "xi" in data_dict: self.thermodict["xilist"] = data_dict["xi"] del data_dict["xi"] if "yi" in data_dict: self.thermodict["xilist"] = data_dict["yi"] logger.info("Vapor mole fraction recorded as 'xi'") del data_dict["yi"] if "T" in data_dict: self.thermodict["Tlist"] = data_dict["T"] del data_dict["T"] if "P" in data_dict: self.thermodict["Psat"] = data_dict["P"] del data_dict["P"] if "P" in self.weights: if gtb.isiterable(self.weights["P"]) and len( self.weights["P"]) != len(self.thermodict["Psat"]): raise ValueError( "Array of weights for '{}' values not equal to number of experimental values given." .format("P")) else: self.weights["Psat"] = self.weights.pop("P") self.thermodict.update(data_dict) thermo_keys = ["xilist", "Tlist"] self.result_keys = ["rhol", "rhov", "Psat"] key_list = list(set(thermo_keys + self.result_keys)) self.thermodict.update(gtb.check_length_dict(self.thermodict, key_list)) for key in self.result_keys: if key in self.thermodict: self.npoints = np.size(self.thermodict[key]) break if "xilist" not in self.thermodict and self.Eos.number_of_components > 1: raise ValueError( "Ambiguous instructions. Include xi to define intended component to obtain saturation properties" ) thermo_defaults = [ np.array([[1.0] for x in range(self.npoints)]), constants.standard_temperature, ] self.thermodict.update( gtb.set_defaults(self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints)) self.weights.update( gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints)) self.weights.update( gtb.set_defaults(self.weights, self.result_keys, 1.0)) if "Tlist" not in self.thermodict: raise ImportError( "Given saturation data, value(s) for T should have been provided." ) tmp = ["Psat", "rhol", "rhov"] if not any([x in self.thermodict for x in tmp]): raise ImportError( "Given saturation data, values for Psat, rhol, and/or rhov should have been provided." ) logger.info( "Data type 'saturation_properties' initiated with calculation_type, {}, and data types: {}.\nWeight data by: {}" .format( self.thermodict["calculation_type"], ", ".join(self.result_keys), self.weights, ))
def fugacity_coefficient(self, P, rho, xi, T): r""" Compute fugacity coefficient Parameters ---------- P : float Pressure of the system [Pa] rho : float Molar density of system [:math:`mol/m^3`] T : float Temperature of the system [K] xi : list[float] Mole fraction of each component Returns ------- fugacity_coefficient : numpy.ndarray :math:`\phi_i`, Array of fugacity coefficient values for each component """ if gtb.isiterable(T): if len(T) == 1: T = T[0] else: raise ValueError("Temperature must be given as a scalar.") if gtb.isiterable(rho): if len(rho) == 1: rho = rho[0] else: raise ValueError("Density must be given as a scalar.") if gtb.isiterable(P): if len(P) == 1: P = P[0] else: raise ValueError("Pressure must be given as a scalar.") if T != self.T: self.T = T self._calc_temp_dependent_parameters(T) self._calc_mixed_parameters(xi, T) tmp_RT = constants.R * T Z = P / (tmp_RT * rho) Ai = self.eos_dict["ai"] * self.eos_dict["alpha"] * P / tmp_RT**2 Bi = self.eos_dict["bi"] * P / tmp_RT B = self.eos_dict["bij"] * P / tmp_RT A = self.eos_dict["aij"] * P / tmp_RT**2 sqrt2 = np.sqrt(2.0) tmp1 = (A / (2.0 * sqrt2 * B) * np.log( (Z + (1 + sqrt2) * B) / (Z + (1 - sqrt2) * B))) tmp3 = Bi * (Z - 1) / B - np.log(Z - B) tmp2 = np.zeros(len(xi)) index = range(len(xi)) for i in index: Aij = np.zeros(len(xi)) for j in index: Aij[j] = np.sqrt( Ai[i] * Ai[j]) * (1.0 - self.eos_dict["kij"][i][j]) tmp2[i] = Bi[i] / B - 2 * np.sum(xi * Aij) / A phi = np.exp(tmp1 * tmp2 + tmp3) return phi
def obj_function_form( data_test, data0, weights=1.0, method="average-squared-deviation", nan_number=1000, nan_ratio=0.1, ): """ Sets objective functional form Note that if the result is np.nan, that point is removed from the list for the purposes of averaging. Parameters ---------- data_test : numpy.ndarray Data that is being assessed. Array of data of the same length as ``data_test`` data0 : numpy.ndarray Reference data for comparison weights : (numpy.ndarray or float), Optional, default=1.0 Can be a float or array of data of the same length as ``data_test``. Allows the user to tune the importance of various data points. method : str, Optional, default="mean-squared-relative-error" Keyword used to choose the functional form. Can be: - average-squared-deviation: :math:`\sum{(\\frac{data\_test-data0}{data0})^2}/N` - sum-squared-deviation: :math:`\sum{(\\frac{data\_test-data0}{data0})^2}` - sum-squared-deviation-boltz: :math:`\sum{(\\frac{data\_test-data0}{data0})^2 exp(\\frac{data\_test\_min-data\_test}{|data\_test\_min|})}` [DOI: 10.1063/1.2181979] - sum-deviation-boltz: :math:`\sum{\\frac{data\_test-data0}{data0} exp(\\frac{data\_test\_min-data\_test}{|data\_test\_min|})}` [DOI: 10.1063/1.2181979] - percent-absolute-average-deviation: :math:`\sum{(\\frac{data\_test-data0}{data0})^2}/N \\times 100` nan_ratio : float, Optional, default=0.1 If more than "nan_ratio*100" percent of the calculated data failed to produce NaN, increase the objective value by the number of entries where data_test is NaN times ``nan_number``. nan_number : float, Optional, default=1000 If a thermodynamic calculation produces NaN, add this quantity to the objective value. (See nan_ratio) Returns ------- obj_value : float Objective value given the calculated and reference information """ if np.size(data0) != np.size(data_test): raise ValueError( "Input data of length, {}, must be the same length as reference data of length {}" .format(len(data_test), len(data0))) if np.size(weights) > 1 and np.size(weights) != np.size(data_test): raise ValueError( "Weight for data is provided as an array of length, {}, but must be length, {}." .format(len(weights), len(data_test))) data_tmp = np.array([(data_test[i] - data0[i]) / data0[i] for i in range(len(data_test)) if not np.isnan((data_test[i] - data0[i]) / data0[i]) ]) if gtb.isiterable(weights): weight_tmp = np.array([ weights[i] for i in range(len(data_test)) if not np.isnan((data_test[i] - data0[i]) / data0[i]) ]) else: weight_tmp = weights if method == "average-squared-deviation": obj_value = np.mean(data_tmp**2 * weight_tmp) elif method == "sum-squared-deviation": obj_value = np.sum(data_tmp**2 * weight_tmp) elif method == "sum-squared-deviation-boltz": data_min = np.min(data_tmp) obj_value = np.sum(data_tmp**2 * weight_tmp * np.exp( (data_min - data_tmp) / np.abs(data_min))) elif method == "sum-deviation-boltz": data_min = np.min(data_tmp) obj_value = np.sum(data_tmp * weight_tmp * np.exp( (data_min - data_tmp) / np.abs(data_min))) elif method == "percent-absolute-average-deviation": obj_value = np.mean(np.abs(data_tmp) * weight_tmp) * 100 if len(data_tmp) == 0: obj_value = np.nan if len(data_test) != len(data_tmp): tmp = 1 - len(data_tmp) / len(data_test) if tmp > nan_ratio: obj_value += (len(data_test) - len(data_tmp)) * nan_number logger.debug( "Values of NaN were removed from objective value calculation, nan_ratio {} > {}, augment obj. value" .format(tmp, nan_ratio)) else: logger.debug( "Values of NaN were removed from objective value calculation, nan_ratio {} < {}" .format(tmp, nan_ratio)) return obj_value