def run_init(self): """ """ ## Setup loop based on self.loopstyle self.loopstyle = toList(self.input.v('loopstyle')) # change self.loopstyle if 'permutations' or 'simultaneous' + check if self.loopstyle has the correct format if len(self.loopstyle) == 1 and self.loopstyle[ 0] == 'permutations': # all possible combinations self.loopstyle = range(0, len(self.variables)) elif len(self.loopstyle) == 1 and self.loopstyle[ 0] == 'simultaneous': # everything simultaneous self.loopstyle = [0] * len(self.variables) elif len(self.loopstyle) == len(self.variables): pass else: raise KnownError( 'self.loopstyle "%s" in input of module %s unknown ' % (str(self.loopstyle), self.__module__)) # make list of unique elements of self.loopstyle self.ls_list = [i for i in sorted(list(set(toList(self.loopstyle))))] # verify that number of values is the same in covarying variables num_items = [] for ls in self.ls_list: num_items.append( len(self.values[self.variables[self.loopstyle.index(ls)]])) for i, var in enumerate(self.variables): if ls == self.loopstyle[i] and len( self.values[var]) != num_items[-1]: if self.input.v('loopstyle') == 'simultaneous': raise KnownError( 'Problem in input of module %s for loopstyle "simultaneous". Number of values in "%s" is unequal to number of values in "%s" ' % (self.__module__, self.variables[i], self.variables[0])) else: raise KnownError( 'Problem in input of module %s for loopstyle "%s". Number of values in "%s" is unequal to number of values in "%s" ' % (self.__module__, ls, self.variables[i], self.variables[self.loopstyle.index(ls)])) # determine number of loops per element in permutation self.numLoops = num_items # run first iteration self.iteration = 0 d = self.run() return d
def __checkInputRequirements(self): """Private methods checkInputRequirements. See buildCallStack step 2 for information. """ for mod in [i for i in self.moduleList if i.runModule]: # find input variables for module 'mod' inputList = list( set(mod.getInputRequirements() + mod.getInputRequirements(init=True))) # check against variables calculated by other modules outputList = [] for j in [k for k in self.moduleList if k.runModule]: outputList += j.getOutputVariables() inputList = [i for i in inputList if i not in outputList] # check against input variables and config file inputvars = [l[0] for l in mod.getAvailableVariableNames()] inputList = [i for i in inputList if i not in inputvars] # for output module, check whether the missing variables are output variables. If so, then continue without error; a warning is provided later when building the call stack if mod.isOutputModule(): inputList = [ i for i in inputList if i not in mod.getOutputRequirements() ] # if there are variables in the inputlist that cannot be provided by any source, show an error and abort. if inputList: message = ( "Not all required variables are given. Missing '%s' in module '%s'.\nVariables can be calculated in other modules, given on input or specified in the iFlow config file" % (', '.join(inputList), str(mod.getName()))) raise KnownError(message) return
def run(self): cwdpath = cfm.CWD folder = os.path.join(cwdpath, self.input.v('folder')) file = self.input.v('file') # If files is 'all' then get all files that have a .p extension if file == 'all': file = [ f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) and f.endswith('.p') ] # check if only one file was requested if len(ny.toList(file)) > 1: raise KnownError( 'Found multiple files to load in module %s. This module can only load one file' % (self.__module__)) # take the single file (in case files has a list type) file = ny.toList(file)[0] if self.input.v('variables') == 'all': varlist = None else: varlist = ny.toList(self.input.v('variables')) + ['grid'] d = ny.pickleload(os.path.join(folder, file), varlist) return d
def run(self, init=False): """Invoke the module's run() method or run_init() method if available and init=True Parameters: init (bool, optional) - if True, invoke the run_init method Exception: KnownError exception if the output of the underlying module is not a dictionary of None-type Returns DataContainer with results of calculated module """ self.timer.tic() if init and self.isIterative( ): #21-07-2017 YMD correction: check if iterative, not if run_init exists result = self.module.run_init() else: result = self.module.run() # make a dataContainer for the result try: self.result = DataContainer.DataContainer(result) except: raise KnownError( 'Output of module %s is invalid. Please make sure to return a dictionary.' % self.getName()) self.timer.toc() return self.result
def slice(self, key, *args, **kwargs): """Returns a new DataContainer instance containing the key (+subkeys in args) NB. does not make a copy of the data. Parameters: key & args (str) - keys and subkeys such as meant in .v() in kwargs: excludeKey (bool, optional) - if set to true, the key (and/or subkeys) will not be part of the new DataConatainer. This will only work if the value obtained from the keys is a dict. Else it raises an exception Please use this option with caution. Default: False Returns: New DataContainer instance with key provided by the final element of args or by 'key' if args is empty """ value = self.data keys = (key, ) + args lastKey = keys[-1] kwargs[ 'dontadd'] = True # let unwrapdictionaries know not to add data in subdicts while slicing. value, args, kwargs, _ = self.__unwrapDictionaries( value, key, *args, **kwargs) excludeKey = kwargs.get('excludeKey') if not excludeKey: DC = DataContainer({lastKey: value}) else: try: DC = DataContainer(value) except Exception as e: raise KnownError( 'Problem in slicing DataContainer. Result is not a dictionary.', e) return DC
def interpretValues(self, values): """inpterpret values on input as space-separated list or as pure python input Parameters values - values to evaluate Returns: values - evaluated values """ values = toList(values) # case 1: pure python: check for (, [, range, np.arange # merge list to a string valString = ' '.join([str(f) for f in values]) # try to interpret as python string if any([i in valString for i in ['(', '[', ',', '/', '*', '+', '-']]): try: valuespy = None exec('valuespy =' + valString) return valuespy except Exception as e: try: errorString = ': ' + e.message except: errorString = '' raise KnownError( 'Failed to interpret input as python command %s in input: %s' % (errorString, valString), e) # case 2: else interpret as space-separated list else: return values
def __init__(self, input): self.input = input self.variables = toList(self.input.v('variables')) # check if values of variables are provided for var in self.variables: if self.input.v(var) is None: message = ( "Not all required variables are given. Missing '%s' in module '%s'" % (var, self.__module__)) raise KnownError(message) # load values to loop over from input self.values = {} for var in self.variables: # check if values are given directly or in a separate substructure varkeys = self.input.getKeysOf(var) # case 1: values supplied directly if not varkeys: values = self.input.v(var) values = self.interpretValues(values) self.values[var] = values # case 2: values in sub-dict else: # load values per key values = {} for key in varkeys: temp = self.input.v(var, key) values[key] = self.interpretValues(temp) # check that all keys have the same number of arguments for key in varkeys: if len(values[key]) != len(values[varkeys[0]]): raise KnownError( 'Problem with values of "%s" in input of module %s. Number of values in "%s" is unequal to number of values in "%s" ' % (var, self.__module__, key, varkeys[0])) # rewrite dict with lists to list with dicts self.values[var] = [] for i in range(0, len(values[varkeys[0]])): self.values[var].append({}) for key in varkeys: self.values[var][i][key] = values[key][i] return
def axisSecondDerivative(u, dim, dimNo, grid, *args): u = np.asarray(u) inds = [np.arange(0, n) for n in u.shape] # replace indices by requested indices wherever available for n in range(0, min(len(args), len(inds))): inds[n] = np.asarray(args[n]) # determine the maximum index along the axis of derivation # if this maximum index does not exist or is zero, then return a zero array try: maxIndex = u.shape[dimNo]-1 if maxIndex == 0: raise Exception except: # if dimNo is out of range or corresponds to a length 1 dimension, return an array of zeros; data has a constant derivative ux = np.zeros([len(inds[n]) for n in range(0, len(inds))]) return ux # central derivative # NB SHOULD BE IMPROVED ON THE EDGES OF THE DOMAIN if SECONDDERMETHOD == 'CENTRAL': upInds = inds try: upInd = np.maximum(np.minimum(np.asarray(args[dimNo])+1, maxIndex), 2) except: upInd = np.asarray([2] + range(2, maxIndex+1)+[maxIndex]) upInds[dimNo] = upInd midInds = inds[:] try: midInd = np.maximum(np.minimum(np.asarray(args[dimNo]), maxIndex-1), 1) except: midInd = np.asarray([1] + range(1, maxIndex)+[maxIndex-1]) midInds[dimNo] = midInd downInds = inds[:] try: downInd = np.maximum(np.minimum(np.asarray(args[dimNo])-1, maxIndex-2), 0) except: downInd = np.asarray([0] + range(0, maxIndex-1)+[maxIndex-2]) downInds[dimNo] = downInd upaxis = np.multiply(grid.v('grid', 'axis', dim, *upInds, copy='all'), (grid.v('grid', 'high', dim, *upInds, copy='all')-grid.v('grid', 'low', dim, *upInds, copy='all')))+grid.v('grid', 'low', dim, *upInds, copy='all') midaxis = np.multiply(grid.v('grid', 'axis', dim, *midInds, copy='all'), (grid.v('grid', 'high', dim, *midInds, copy='all')-grid.v('grid', 'low', dim, *midInds, copy='all')))+grid.v('grid', 'low', dim, *midInds, copy='all') downaxis = np.multiply(grid.v('grid', 'axis', dim, *downInds, copy='all'), (grid.v('grid', 'high', dim, *downInds, copy='all')-grid.v('grid', 'low', dim, *downInds, copy='all')))+grid.v('grid', 'low', dim, *downInds, copy='all') dxup = upaxis-midaxis dxdown = midaxis-downaxis dxav = .5*(dxup+dxdown) umid = u[np.ix_(*midInds)] ux = (u[np.ix_(*upInds)]-umid)/(dxup*dxav) - (umid - u[np.ix_(*downInds)])/(dxdown*dxav) else: raise KnownError("Numerical derivative scheme '%s' is not implemented" %(SECONDDERMETHOD)) return ux
def __refactorUtil(self, inputList, data, **kwargs): """Replace tags @{}, +{} and if{}. Do this recursively by removing an item from the inputList, replacing one occurrence of @{} and +{} and putting the result at the back of inputList. """ i = 0 while i<len(inputList): item = inputList[i] if item.find('@')>=0 or item.find('+{')>=0 or item.find('if{')>=0: inputList.pop(i) if item.find('@')>=0 and item.find('@{')<0: if '.' in item: dict = item[1:].split('.')[0] key = item[1:].split('.')[-1] item = toList(kwargs[dict].v(key)) else: item = toList(data.v(item[1:])) inputList = inputList + item elif item.find('@{')>=0: start = item.find('@{') end = item.find('}') if '.' in item[start+2:end]: dict = item[start+2:end].split('.')[0] key = item[start+2:end].split('.')[-1] item = [item[:start]+j+item[end+1:] for j in toList(kwargs[dict].v(key))] else: item = [item[:start]+str(j)+item[end+1:] for j in toList(data.v(item[start+2:end]), forceNone=True)] #20-01-2017 added forceNone=True inputList = inputList + item elif item.find('+{')>=0: start = item.find('+{') end = item.find('}') setrange = range(*eval(item[start+2:end])) item = item[:start]+'%s'+item[end+1:] for k in setrange: inputList = inputList + toList(item % k) elif item.find('if{')>=0: # added YMD 02-11-2016 start = item.find('if{') end = item.find('}') item = item[start+3:end] item = item.split(',') if len(item)!=2: raise KnownError('error in registry in if{}. Does not have two comma-separated arguments. Make sure there are no spaces in the if-statement') try: if eval(item[1]): inputList = inputList + toList(item[0]) except NameError: item1 = item[1].split('==')[0] item2 = item[1].split('==')[1] if item1 == eval(item2): inputList = inputList + toList(item[0]) else: i += 1 return list(set(inputList))
def instantiateModule(self): """Make and instantiate a module and check whether the input criteria are satisfied. """ # find the module & run method moduleMain_ = dynamicImport(self.__register.v('packagePath'), self.__register.v('module')) try: if len(inspect.getargspec(moduleMain_.__init__)[0]) == 2: self.module = moduleMain_( self.__input ) # VERSION 2.4. ONLY ONE ARGUMENT, SUBMODULES NOW IN DATACONTAINER 'INPUT' (SUPPORTS DYNAMIC SUBMODULE LIST IN ITERATIONS) else: self.module = moduleMain_( self.__input, self.submodulesToRun ) # VERSION 2.3. SUBMODULES AS EXTRA ARGUMENT. WILL BECOME OBSOLETE [dep02] self.logger = logging.getLogger(__name__) self.logger.warning( 'Module ' + self.getName() + ' still takes 3 arguments upon initialisation.\nAs of v2.4 it should take 2 arguments. SubmodulesToRun is now found in the DataContainer keyword "submodules".' ) # except Exception as e: # if isinstance(e, ) except Exception as e: # Reraise a KnownError message received from the module if isinstance(e, KnownError): raise # Else raise a new KnownError else: raise KnownError( 'Could not instantiate module %s. Please check if the init method is correct and if all variables used in the init method are also available to the module.\nDo this by checking whether all variables are provided on input and by checking the call stack.' % self.__register.v('module'), e) if not (hasattr(self.module, 'run') and isinstance(self.module.run, types.MethodType)): raise KnownError('Module ' + self.__register.v('module') + ' has no working run() method') if self.isIterative(): if not (hasattr(self.module, 'stopping_criterion') and isinstance(self.module.run, types.MethodType)): raise KnownError('Module ' + self.__register.v('module') + ' has no working stopping_criterion() method') return
def derfunction(self, **kwargs): """Same as function, but then provides a reference its derivative Still requires the axis of the derivative in a named argument 'dim' """ # reset operations if kwargs.get('operation') == 'n': # if the negative of a negfunction is called, return to function. kwargs['operation'] = 'dn' else: kwargs['operation'] = 'd' # i.e. .d() and .v() of a derfunction returns the same # check derivative axis if kwargs.get('dim') is None: raise KnownError('Called a derivative function without dim argument') elif not all(i in self.dimNames for i in kwargs.get('dim')): raise KnownError('Called a derivative function with an incorrect dim argument') # evaluate try: returnval = self.__evaluateFunction(**kwargs) except FunctionEvaluationError: returnval = self.__setReturnReference(kwargs.get('operation')) return returnval
def __setReturnReference(self, operation=None): '''No difference with FunctionBase, but required here to refer to its own functions ''' if not operation: returnval = self.function elif operation == 'n': returnval = self.negfunction elif operation == 'd': returnval = self.derfunction elif operation == 'dn': returnval = self.dnfunction else: raise KnownError('Function called with unknown operation. (This error indicates an incorrectly defined function)') return returnval
def checkVariables(self, *args): """Check if variables are set. For each variable this will check if the variable is different from None. An error is raised if the variable is None Parameters: args (tuples of two elements: (str, any)) - set of tuples of two elements. The first element describes the variable name in a string, the second is the value of any type """ for pair in args: if pair[1] is None: message = ("Not all required variables are given. Missing '%s' in module '%s'" % (pair[0], self.__class__.__name__)) raise KnownError(message) return
def __setReturnReference(self, operation=None): '''Determine what function reference to return if no value can be returned. ''' if not operation: returnval = self.function elif operation == 'n': returnval = self.negfunction elif operation == 'd': returnval = self.derfunction elif operation == 'dn': returnval = self.dnfunction else: raise KnownError('Function called with unknown operation. (This error indicates an incorrectly defined function)') return returnval
def d(self, key, *args, **kwargs): """Returns the derivative of the value accessed by .v(). See documentation .v() Parameters: same as in .v(). Additionally: kwargs['dim'] (str, int) - NB not optional! Axis along which to take the derivative. Provide multiple characters to get higher derivatives. Mixed derivatives also allowed, but order of derivation should not matter """ kwargs['operation'] = 'd' if kwargs.get('dim') is None: raise KnownError( 'Called .d() method without supplying the axis of derivation.' '\n Please add a dim=(str or int) argument.') value = self.v(key, *args, **kwargs) return value
def dd(self, key, *args, **kwargs): """Returns the second derivative of the value accessed by .v(). See documentation .v(). METHOD DEPRECIATED FROM v2.2 [dep01] Parameters: same as in .v(). Additionally: kwargs['dim'] (str, int) - NB not optional! axis along which to take the derivative """ kwargs['operation'] = 'dd' if kwargs.get('dim') is None: raise KnownError( 'Called .dd() method without supplying the axis of derivation.' '\n Please add a dim=(str or int) argument.') value = self.v(key, *args, **kwargs) return value
def stopping_criterion(self, iteration): """Invoke method 'stopping_criterion' of the underlying module. Pass iteration number down to the module. Parameters: iteration - (int) number of the current iteration. Exception: KnownError exception if underlying method does not return a boolean. Returns: bool whether to stop (=True) or continue (=False) """ stop = self.module.stopping_criterion(iteration) if not isinstance(stop, bool): raise KnownError( 'Stopping critertion of module %s is invalid. Please make sure to return a boolean.' % self.getName()) return stop
def open(self, filePath): """Opens an ASCII file Parameters: filePath - path of input file Returns: file pointer of input file Raises: Exception - if input file was not found """ try: self.filePointer = open(filePath, 'r') except IOError as e: # throw exception if file is not found raise KnownError(('No file found at ' + filePath), e) return
def run(self, init=False): """Invoke the module's run() method or run_init() method if available and init=True Parameters: init (bool, optional) - if True, invoke the run_init method Exception: KnownError exception if the output of the underlying module is not a dictionary of None-type Returns DataContainer with results of calculated module """ self.timer.tic() try: if init and self.isIterative( ): #21-07-2017 YMD correction: check if iterative, not if run_init exists result = self.module.run_init() else: result = self.module.run() except Exception as e: if cf.IGNOREEXCEPTIONS: result = {'ERROR': True} pass else: raise # make a dataContainer for the result # try: ## YMD 10-12-2019 not necessary to convert to DC. Keep as dict. # self.result = DataContainer.DataContainer(result) # except: # raise KnownError('Output of module %s is invalid. Please make sure to return a dictionary.' % self.getName()) if not isinstance(result, dict): raise KnownError( 'Output of module %s is invalid. Please make sure to return a dictionary.' % self.getName()) self.timer.toc() return result
def transportplot_mechanisms(self, **kwargs): """ Plots the advective transport based on the physical mechanisms that force it. kwargs: sublevel: string displays underlying levels of the associated mechanisms: 'sublevel', 'subsublevel' or False plotno: integer plot number display: integer or list of strings displays the underlying mechanisms indicated. An integer plots the largest contributions up to that integer and a list of strings plots the mechanisms in that list scale: boolean scales the transport contributions to the maximum value of all contributions concentration: boolean plots the depth-mean, sub-tidal concentration in the background """ ################################################################################################################ # Extract args and/or kwargs ################################################################################################################ sublevel = kwargs.get('sublevel') or kwargs.get('subsublevel') or False # show sub-level data: True/False plotno = kwargs.get('plotno') or 1 # set plot number (default 1) display = kwargs.get('display') or 5 # display number of mechanisms (sorted in descending order) or specific mechanisms scale = kwargs.get('scale') or False # scale the transport contributions to the maximum: True/False concentration = kwargs.get('concentration') or False legend = kwargs.get('legend') if legend !='in': legend = 'out' ################################################################################################################ # Construct list of mechanisms to display and calculate these mechanisms ################################################################################################################ # get keys of the transport mechanisms to display entered by the user or all mechanism if isinstance(display, list): if set(display).issubset(self.input.getKeysOf('T')): keyList = display else: raise KnownError('Not all transport mechanisms passed with display are available.') else: keyList = self.input.getKeysOf('T') # get availability and its derivative w.r.t. x x = self.input.v('grid', 'axis', 'x') if self.input.v('f') is not None: a = self.input.v('f', x=x).reshape(len(x),) a_x = self.input.d('f', x=x, dim='x').reshape(len(x),) else: a = self.input.v('a').reshape(len(x),) a_x = -self.input.v('T') * a / self.input.v('F') # construct list with values to plot loopvalues = [[]] if sublevel: tmp_max = [] for key in keyList: if not 'diffusion' in key: T = self.input.v('T', key) if key in self.input.getKeysOf('F'): trans = T * a + self.input.v('F', key) * a_x loopvalues[0].append([trans, np.sqrt(np.mean(np.square(trans))), key]) else: trans = T * a loopvalues[0].append([trans, np.sqrt(np.mean(np.square(trans))), key]) tmp_max.append(abs(trans).max()) if sublevel == 'subsublevel' and len(self.input.slice('T', key).getAllKeys()[0]) > 2: loopvalues.append([]) tmpkeys = sorted(self.input.slice('T', key).getAllKeys(), key=itemgetter(2)) subkeys = [tmpkeys[i*3:3+i*3] for i in range(len(tmpkeys)/3)] for subkey in subkeys: tmp = np.zeros(a.shape) for subsubkey in subkey: tmp += self.input.v('T', *subsubkey) * a loopvalues[len(loopvalues)-1].append([tmp, np.sqrt(np.mean(np.square(trans))), subsubkey[-1]]) loopvalues[len(loopvalues)-1].append([trans, np.sqrt(np.mean(np.square(trans))), key]) maxT = max(tmp_max) # Sort transport mechanisms based on the value of their root-mean-square value loopvalues[0] = sorted(loopvalues[0], key=itemgetter(1), reverse=True) # Only take the largest transport contributions indicated by the display integer. If the display integer is # larger than the length of the keyList, then all contributions are taken into account if isinstance(display, int): loopvalues[0] = loopvalues[0][:min(display, len(keyList))] # Sort alphetically so that mechanisms receive the same line color for plotting loopvalues[0] = sorted(loopvalues[0], key=itemgetter(2)) else: Ttotal = ((self.input.v('T') - self.input.v('T', 'diffusion_tide') - self.input.v('T', 'diffusion_river')) * a + (self.input.v('F') - self.input.v('F', 'diffusion_tide') - self.input.v('F', 'diffusion_river') ) * a_x) loopvalues[0].append([Ttotal, np.sqrt(np.mean(np.square(Ttotal))), 'total']) maxT = abs(Ttotal).max() ################################################################################################################ # determine number and shape of subplots ################################################################################################################ numberOfSubplots = len(loopvalues) subplotShape = (numberOfSubplots, 2) ################################################################################################################ # plot ################################################################################################################ ## load grid data xdim = ny.dimensionalAxis(self.input.slice('grid'), 'x')[:, 0, 0] conv_grid = cf.conversion.get('x') or 1. # convert size of axis depending on conversion factor in config xdim = xdim * conv_grid ## plot plt.figure(plotno, dpi=cf.dpi, figsize=subplotShape) # plt.hold(True) if not sublevel: sp = plt.subplot() plt.axhline(0, color='k', linewidth=0.5) if scale: loopvalues[0][0][0] = loopvalues[0][0][0] / maxT ln = [] ln += sp.plot(xdim, loopvalues[0][0][0], label='adv. transport') if concentration: conv = cf.conversion.get('c') or 1. c = np.real(np.mean(self.input.v('c0')[:, :, 0] + self.input.v('c1')[:, :, 0] + self.input.v('c2')[:, :, 0], axis=1))*conv if scale: c = c / c.max() ln += sp.plot(xdim, c, '--', color='grey', label=r'$\langle\bar{c}\rangle$') labels = [l.get_label() for l in ln] if legend is 'out': plt.legend(ln, labels, bbox_to_anchor=(1.02, 0), loc=3, borderaxespad=0., fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4) elif legend is 'in': plt.legend(ln, labels, loc='upper left', borderaxespad=0.2, fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4, frameon=False) plt.title('Advective Transport') else: sp2 = sp.twinx() ln += sp2.plot(xdim, c, '--', color='grey', label=r'$\langle\bar{c}\rangle$') labels = [l.get_label() for l in ln] if legend is 'out': plt.legend(ln, labels, bbox_to_anchor=(1.3, 0), loc=3, borderaxespad=0., fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4) elif legend is 'in': plt.legend(ln, labels, loc='upper left', borderaxespad=0.2, fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4, frameon=False) plt.title('Advective Transport', y=1.09) else: plt.title('Advective Transport') ## Axis labels try: xname = cf.names['x'] xunit = cf.units['x'] except KeyError: xname = 'x' xunit = '' plt.xlabel(xname + ' (' + xunit + ')', fontsize = cf.fontsize) try: yunitT = cf.units['T'] if concentration: if scale: sp.set_ylabel(r'$\mathcal{T}$ / $\mathcal{T}_{max}$, $c$ / $c_{max}$ (-)', fontsize = cf.fontsize) if legend is 'in': sp.set_ylim([-1.1, 1.1]) else: sp.set_ylim([-1.1, 1.1]) else: yunitc = cf.units['c'] sp.set_ylabel(r'$\mathcal{T}$ (' + yunitT + ')', fontsize = cf.fontsize) sp2.set_ylabel(r'$c$ (' + yunitc + ')', fontsize = cf.fontsize) else: if scale: sp.set_ylabel(r'$\mathcal{T}$ / $\mathcal{T}_{max}$ (' + yunitT + ')', fontsize = cf.fontsize) if legend is 'in': sp.set_ylim([-1.1, 1.1]) else: sp.set_ylim([-1.1, 1.1]) else: sp.set_ylabel(r'$\mathcal{T}$ (' + yunitT + ')', fontsize = cf.fontsize) except KeyError: yname = [r'$\mathcal{T}$'] yunit = '' plt.ylabel(yname + ' (' + yunit + ')', fontsize = cf.fontsize) else: for subplot_number, subplot_values in enumerate(loopvalues): pos = np.unravel_index(subplot_number, subplotShape) sp = plt.subplot2grid(subplotShape, (pos[0], pos[1])) plt.axhline(0, color='k', linewidth=0.5) # loop over all combinations of the data ln = [] for i, value in enumerate(subplot_values): try: label = cf.transportlabels[value[2]] except KeyError: label = value[2] if scale: value[0] = value[0] / maxT if i == len(subplot_values)-1 and subplot_number >= 1: ln += sp.plot(xdim, value[0], 'k', label=label) else: ln += sp.plot(xdim, value[0], label=label) if concentration and subplot_number == 0: conv = cf.conversion.get('c') or 1. c = np.real(np.mean(self.input.v('c0')[:, :, 0] + self.input.v('c1')[:, :, 0] + self.input.v('c2')[:, :, 0], axis=1)) * conv if scale: c = c / c.max() ln += sp.plot(xdim, c, '--', color='grey', label=r'$\langle\bar{c}\rangle$') labels = [l.get_label() for l in ln] if legend is 'out': plt.legend(ln, labels, bbox_to_anchor=(1.02, 0), loc=3, borderaxespad=0., fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4) elif legend is 'in': plt.legend(ln, labels, loc='upper left', borderaxespad=0.2, fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4, frameon=False) if subplot_number == 0: plt.title('Advective Transport') else: title = keyList[subplot_number-1] try: title = cf.names[title] except: pass plt.title(title, fontsize=cf.fontsize) else: sp2 = sp.twinx() ln += sp2.plot(xdim, c, '--', color='grey', label=r'$\langle\bar{c}\rangle$') labels = [l.get_label() for l in ln] if legend is 'out': plt.legend(ln, labels, bbox_to_anchor=(1.3, 0), loc=3, borderaxespad=0., fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4) elif legend is 'in': plt.legend(ln, labels, loc='upper left', borderaxespad=0.2, fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4, frameon=False) if subplot_number == 0: plt.title(r'Advective Transport ', y=1.09, fontsize=cf.fontsize) else: title = keyList[subplot_number-1] try: title = cf.names[title] except: pass plt.title(title, y=1.09, fontsize=cf.fontsize) else: if legend is 'out': plt.legend(bbox_to_anchor=(1.02, 0), loc=3, borderaxespad=0., fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4) elif legend is 'in': plt.legend(ln, labels, loc='upper left', borderaxespad=0.2, fontsize=cf.fontsize2, labelspacing=0.1, handlelength=0.1, handletextpad=0.4, frameon=False) if subplot_number == 0: plt.title('Advective Transport', fontsize=cf.fontsize) else: title = keyList[subplot_number-1] try: title = cf.names[title] except: pass if concentration and subplot_number > 0: plt.title(title, y=1.09, fontsize=cf.fontsize) else: plt.title(title, fontsize=cf.fontsize) # axis labels and limits. Try to get from config file, else take plain name try: xname = cf.names['x'] xunit = cf.units['x'] except KeyError: xname = 'x' xunit = '' plt.xlabel(xname + ' (' + xunit + ')', fontsize=cf.fontsize) try: yunitT = cf.units['T'] if concentration: if scale: if subplot_number == 0: sp.set_ylabel(r'$\mathcal{T}$ / $\mathcal{T}_{max}$, $c$ / $c_{max}$ (-)', fontsize=cf.fontsize) else: sp.set_ylabel(r'$\mathcal{T}$ / $\mathcal{T}_{max}$ (-)', fontsize=cf.fontsize) if legend is 'in': sp.set_ylim([-1.1, 1.1]) else: sp.set_ylim([-1.1, 1.1]) else: yunitc = cf.units['c'] sp.set_ylabel(r'$\mathcal{T}$ (' + yunitT + ')', fontsize=cf.fontsize) sp2.set_ylabel(r'$c$ (' + yunitc + ')', fontsize=cf.fontsize) else: if scale: sp.set_ylabel(r'$\mathcal{T}$ / $\mathcal{T}_{max}$ (' + yunitT + ')', fontsize=cf.fontsize) if legend is 'in': sp.set_ylim([-1.1, 1.1]) else: sp.set_ylim([-1.1, 1.1]) else: sp.set_ylabel(r'$\mathcal{T}$ (' + yunitT + ')', fontsize=cf.fontsize) except KeyError: yname = [r'$\mathcal{T}$'] yunit = '' plt.label(yname + ' (' + yunit + ')', fontsize=cf.fontsize) plt.xlim(0, max(xdim)) plt.draw() return
def timestepping(self, T, F, alpha1, alpha2, Told, Fold, alpha1old, alpha2old, Xold, fsea, G): """ """ jmax = self.input.v('grid', 'maxIndex', 'x') A = np.zeros((4, jmax + 1)) rhs = np.zeros((jmax + 1)) Sold = Xold[jmax + 1:] hatSold = Sold / alpha1old fold = Xold[:jmax + 1] h = self.erodibility_stock_relation(alpha2old, hatSold) hder = self.erodibility_stock_relation_der(alpha2old, hatSold) hda2 = self.erodibility_stock_relation_da2(alpha2old, hatSold) beta = h - hder * hatSold + hda2 * (alpha2 - alpha2old) Tx = np.gradient(T, self.x, edge_order=2) Fx = np.gradient(F, self.x, edge_order=2) dif = self.B * F adv = self.B * T + self.Bx * F + self.B * Fx BTx = self.B * Tx + self.Bx * T Txold = np.gradient(Told, self.x, edge_order=2) Fxold = np.gradient(Fold, self.x, edge_order=2) dif_old = self.B * Fold adv_old = self.B * Told + self.Bx * Fold + self.B * Fxold BTx_old = self.B * Txold + self.Bx * Told # interior A[0, 2:] = +self.theta * np.minimum(adv[1:-1], 0) * hder[2:] / ( alpha1[2:] * self.dx[1:]) + self.theta * dif[1:-1] / ( 0.5 * (self.dx[1:] + self.dx[:-1])) * hder[2:] / alpha1[2:] / self.dx[1:] A[1, 1:-1] = self.B[1:-1]/self.dt + self.theta*BTx[1:-1]*hder[1:-1]/alpha1[1:-1] + \ self.theta*np.maximum(adv[1:-1], 0)*hder[1:-1]/(alpha1[1:-1]*self.dx[:-1]) - self.theta*np.minimum(adv[1:-1], 0)*hder[1:-1]/(alpha1[1:-1]*self.dx[1:]) \ - self.theta*dif[1:-1]/(0.5*(self.dx[1:]+self.dx[:-1]))*hder[1:-1]/alpha1[1:-1]*(1./self.dx[1:]+1./self.dx[:-1]) A[2, :-2] = -self.theta * np.maximum(adv[1:-1], 0) * hder[:-2] / ( alpha1[:-2] * self.dx[:-1]) + self.theta * dif[1:-1] / ( 0.5 * (self.dx[1:] + self.dx[:-1])) * hder[:-2] / alpha1[:-2] / self.dx[:-1] rhs[1:-1] = self.B[1:-1]/self.dt*Sold[1:-1] - self.theta*BTx[1:-1]*beta[1:-1] - self.theta*np.maximum(adv[1:-1], 0)*(beta[1:-1]-beta[:-2])/self.dx[:-1] - self.theta*np.minimum(adv[1:-1], 0)*(beta[2:]-beta[1:-1])/self.dx[1:] \ -self.theta*dif[1:-1]*((beta[2:]-beta[1:-1])/self.dx[1:] - (beta[1:-1]-beta[:-2])/self.dx[:-1])/(0.5*(self.dx[1:]+self.dx[:-1])) \ + (1-self.theta) * (-BTx_old[1:-1]*fold[1:-1] - np.maximum(adv_old[1:-1], 0)*(fold[1:-1]-fold[:-2])/self.dx[:-1] - np.minimum(adv_old[1:-1], 0)*(fold[2:]-fold[1:-1])/self.dx[1:] \ -dif_old[1:-1]*((fold[2:]-fold[1:-1])/self.dx[1:] - (fold[1:-1]-fold[:-2])/self.dx[:-1])/(0.5*(self.dx[1:]+self.dx[:-1]))) rhs[1:-1] += -self.B[1:-1] * G[1:-1] # Quick fix for ensuring positivity (Patankar, 1980); could be neater for greater accuracy A[1, 1:-1] += -np.minimum(rhs[1:-1], 0) / (Sold[1:-1] + 1.e-6) rhs[1:-1] = np.maximum(rhs[1:-1], 0) # Boundaries # x=0 if hder[0] == 0: A[1, 0] = 1 rhs[0] = Sold[0] else: A[1, 0] = hder[0] / alpha1[0] rhs[0] = fsea - h[0] + hder[0] * hatSold[0] # x=L if hder[-1] == 0: A[1, -1] = 1 # TODO - needs correction; VIOLATION OF MASS BALANCE. This line will only apply in case Q1 = 0 rhs[-1] = Sold[-1] self.logger.warning( 'f=1 at upstream boundary. The code is not correct for this case and mass balance may be violated. Please investigate.' ) else: A[1, -1] = self.B[-1] * T[-1] * hder[-1] / alpha1[ -1] + 3. / 2. * self.B[-1] * F[-1] * hder[-1] / alpha1[ -1] / self.dx[-1] A[2, -2] = -2. * self.B[-1] * F[-1] * hder[-2] / alpha1[ -2] / self.dx[-1] A[3, -3] = 0.5 * self.B[-1] * F[-1] * hder[-3] / alpha1[ -3] / self.dx[-1] rhs[-1] = -self.B[-1] * T[-1] * beta[-1] - self.B[-1] * F[-1] * ( 3. / 2. * beta[-1] - 2. * beta[-2] + 0.5 * beta[-3]) / self.dx[-1] rhs[-1] += -self.B[-1] * G[-1] # alternative first order # A[1, -1] = self.B[-1]*T[-1]*hder[-1]/alpha1[-1] + self.B[-1]*F[-1]*hder[-1]/alpha1[-1]/self.dx[-1] # A[2, -2] = -1.*self.B[-1]*F[-1]*hder[-2]/alpha1[-2]/self.dx[-1] # rhs[-1] = -self.B[-1]*T[-1]*beta[-1] - self.B[-1]*F[-1]*(1*beta[-1] - 1.*beta[-2])/self.dx[-1] # rhs[-1] += self.B[-1]*G[-1] try: S = scipy.linalg.solve_banded((2, 1), A, rhs, overwrite_ab=False, overwrite_b=False) except: print Xold[:jmax + 1] raise KnownError('Time integration failed.') f = self.erodibility_stock_relation(alpha2, S / alpha1) X = np.concatenate((f, S), axis=0) return X
def v(self, key, *args, **kwargs): """Check and retrieve a value with key 'key' and optional subkeys provided in args. Specific elements of the data can be retrieved using either list/array indices provided in args or dimensionless positions on the grid axes in the named argumenst (i.e. kwargs). In case of using indices or grid positions, the data will, if possible, be returned in the shape of the request. Example of practical use for multidimensional output: v(key, subkey, x=..,z=..,f=..) or v(key, subkey, .., .., ..) The method tries the following: 1. Retrieve the value belonging to 'key'. Then consecutively use the optional string arguments contained in 'args' to retrieve sub-values belonging to sub-keys. Continue untill all string arguments in 'args' have been used. 2. If the value found is a dictionary structure (i.e. multiple key-value pairs). Evaluate all underlying values (see step 3) and then try to add the values. If addition is not possible, return True to indicate that the item exists, but is a data structure instead of a value. 3. Evaluate the value found depending on the data type a. List/nested list Use numerical arguments in 'args' (one or more integers or lists of integers) to access the list by its indices. The whole list is returned if args are omitted. If the number of numerical arguments in args is less than the dimension of the list, the full sub-list is returned in the omitted dimensions. Call the list as v('(optional) dicts', 'variable', index1, index2, ...) NB. Lists do not support direct numerical operations and access by coordinate (see numpy arrays) b. numpy array: Either use the numerical data in args to access the element corresponding to the indices in the array or use kwargs to access coordinates on a grid. Grid: The grid should be available in data as a key 'grid'. It should at least contain the 'dimensions' if a call with indices (i.e. using args) is used. It should have a full grid specification if a call with coordinates (i.e. kwargs) is used. Data: Dimensions of an array should follow the order prescribed by the grid dimensions. For example if the registry dimensions are x, z, f, then data can have the configurations (), (x), (x,z) or (x,z,f). If the data is only dependent on x and f,it should still have dimensions (x,z,f), with len(z)=1 Calling: using indices: v('key', (optional) 'subkeys', .., .., ..). The dots can contain integers or 1D integer lists/arrays containing the indices to return. The order of the dimensions should correspond to the grid dimensions. using coordinates v('key' (optional) 'subkeys', x=..., z=..., f=...), where x,z,f are the grid dimensions. If the variable is e.g. only dependent on x and f, it is sufficient to call with only x=... and f=... . the arguments following x,z and f should be dimensionless and may be integer or 1D integer lists/arrays. Returns: Data is returned in the shape following the request. If the data is smaller than the requested size, e.g. 1D with a 2D call, the data is extended to 2D by copying its values. If a dimension is omitted, the data will be returned on the its original shape in that dimension. NB. indices/coordinates are allowed to be omitted or incomplete. It is however not advised to use this option for retrieving data. It may be used for checking whether a variable exists. c. Reference to function or instance method: call the method with either grid indices or dimensionless coordinates as arguments. Calls are the same as for arrays. The function reference will be returned if indices/dimensions are omitted or incomplete. d. Scalar/String: Return the value. Indices/coordinates are only used to reshape the data to the right size. This is done by copying the value. 4. Reshape the data to the dimension requested (array, function, scalar, not list). If the data is smaller than the request, its values are copied to obtain the right shape. If the data is larger than the request, data is returned in its original format; i.e. arrays/lists are returned in their original shape in the omitted dimensions and functions are returned as function references. Parameters: name - key of value in dictionary args - (optional, one or more str) keys of a hierarchy of sub-dictionaries (in the correct order). - (optional, one or more int or 1D int lists/arrays) grid indices. Order of args should correspond to order of grid dimensions. kwargs - (optional, one or more int or 1D int lists/arrays) dimensionless grid coordinates using argument names corresponding to grid dimensions - reshape (optional, bool) reshape result to shape of request. Default: True Except: KnownError - if 1. access by index: if index out of range or if too many indices are given 2. access by coordinates while grid is missing or no interpolation is implemented for this grid 3. method is left with a dictionary Returns: scalar, string, list or array belonging to key and optional subkeys on the requested indices/coordinates Returns None if the value could not be found. Returns True if a dictionary (i.e. set key-value pairs) was found and the underlying values could not be added """ # preparation: remove optional argument 'reshape' from argument list and use it to set whether the data should # be reshaped. Default: reshape = True try: reshape = kwargs.pop('reshape') except: reshape = True ################################################################################################################ ################################################################################################################ # STEPS 1, 2 ################################################################################################################ ################################################################################################################ value = self.data # use value as a running variable that is reduced step by step value, args, kwargs, done = self.__unwrapDictionaries( value, key, *args, **kwargs) # if data from sub-keys has to be summed up, the unwrapDictionaries function will internally call .v and set # done=True to indicate that .v does not have to be run another time. If done=True, directly return the result if done: return value # return None if nothing was found if value == []: return None # return True when a dictionary is found elif isinstance(value, dict): return True ################################################################################################################ ################################################################################################################ # STEP 3. ################################################################################################################ ################################################################################################################ ################################################################################################################ # 3a. Lists ################################################################################################################ elif isinstance(value, list): reshape = False # use the args as indices if args: for i in args: try: #value = (np.asarray(value)[i]).tolist() #removed 08-12-15, replaced by lines below if isinstance(i, int): value = value[i] else: value = (np.asarray(value)[i]).tolist() except IndexError as e: raise KnownError('Index out of range', e) except TypeError as e: raise KnownError( 'Tried to access a dimension that does not exist', e) ################################################################################################################ # 3b. Numpy arrays ################################################################################################################ elif isinstance(value, np.ndarray): # use the args as indices if args: try: # reduce args to 0 if accessing a dimension that has only one element newArgs = () for i, v in enumerate(value.shape): if value.shape[i] == 1: newArgs += ( [0], ) # return single dimension for non-existing axes elif i < len(args): newArgs += ( nf.toList(args[i]), ) # return data on requested points for existing axes else: newArgs += ( range(0, v), ) # return data on original grid if dimension not provided value = self.__operationsNumerical( value, *newArgs, **kwargs) # perform any operation on the data except IndexError as e: raise KnownError('Index out of range', e) except TypeError as e: raise KnownError( 'Tried to access a dimension that does not exist', e) # otherwise use kwargs if not empty else: value = self.__operationsNumerical(value, **kwargs) if kwargs: if self.data['grid'].get('gridtype') == 'Regular': # interpolation on a regular grid interpolator = RegularGridInterpolator() value = interpolator.interpolate(value, self, **kwargs) else: # give an error message if the grid type has no data interpolation method implemented raise KnownError( 'Access to gridded data not implemented on a grid of type %s' % (self.data['grid']['gridtype'])) ################################################################################################################ # 3c. Functions and methods ################################################################################################################ elif isinstance(value, types.MethodType) or isinstance( value, types.FunctionType): # try to access the function value. However if too few arguments are given, return the reference to function coordinates = kwargs.copy() if args: coordinates.update( convertIndexToCoordinate(self.data['grid'], args)) [ coordinates.update( {dim: np.asarray(nf.toList(coordinates[dim]))}) for dim in value.im_self.dimNames if coordinates.get(dim) is not None ] # convert called dimensions to array. i.e. function will not be called with scalar arguments functiondims = value.im_self.dimNames value = value( reshape=reshape, **coordinates ) # function call with coordinates and operations in **coordinates if isinstance(value, types.MethodType) or isinstance( value, types.FunctionType): reshape = False else: # before full reshape, first reshape so that shape matches the order of 'dimensions' value = self.__reshapeFunction(value, functiondims) ################################################################################################################ # 3d. Scalars, strings & other items ################################################################################################################ else: value = self.__operationsNumerical(value, **kwargs) ################################################################################################################ ################################################################################################################ # STEP 4 ################################################################################################################ ################################################################################################################ # remove operation try: kwargs.pop('operation') except: pass # reshape if reshape: value = self.__reshape(value, *args, **kwargs) return value
def read(self, chapter='', name=''): """General reader. Reads data in file between 'chapter' 'name' and the next 'chapter' tag (first 'chapter' is inclusive) This has the following structure: chaptername name (string) key value (scalar, string or vector) " " key subkey value " " key " " " " Parameters: chapter - (str, optional) chapter name Default: ''; read full file name - (str, optional) name of 'chapter' to be read Returns: list containing DataContainers with read data for each chapter block read """ self.filePointer.seek(0) # restarts reading the file startChapter = [chapter, name] endChapter = chapter inChapter = False inIndent = False containerList = [] dataStructures = [] #start reading for line in self.filePointer: line = self.__removeComment(line) linesplit = ( (line.replace('.', ' ')).replace('\t', ' ')).split(' ') + [''] if endChapter in linesplit and inChapter: # stop reading. Convert and clean result inChapter = not inChapter if inIndent: # add sublist if this has not been done yet dataStructures[-1].append(sublist) # convert raw data to a dictionary and load into a DataContainer dataStructures = nestedListToDictionary(dataStructures) try: dataStructures = DataContainer(dataStructures) except: raise KnownError( 'Incomplete entry in the input file. Please check if there is an unfinished entry with keyword "module".' ) containerList.append(dataStructures) if startChapter[0] in linesplit and startChapter[ 1] in linesplit and not inChapter: # start reading inChapter = not inChapter inIndent = False dataStructures = [] if inChapter: #in the right chapter if not line == "": # if line starts with a space/tab: this line belongs to a block if line[:1] == '\t' or line[:1] == " ": # start a new block if no block was started yet if not inIndent: sublist = dataStructures[-1][1:] inIndent = True # if isinstance(sublist[0], list): sublist.append(line.split()) # else: sublist = [sublist] sublist.append(line.split()) del dataStructures[-1][1:] elif (not (line[:1] == '\t' or line[:1] == " ")) and inIndent: inIndent = False dataStructures[-1].append(sublist) dataStructures.append(line.split()) else: dataStructures.append(line.split()) # repeat the block controlling reading stop # stop reading. Convert and clean result if inChapter: if inIndent: # add sublist if this has not been done yet dataStructures[-1].append(sublist) # convert raw data to a dictionary and load into a DataContainer dataStructures = nestedListToDictionary(dataStructures) dataStructures = DataContainer(dataStructures) containerList.append(dataStructures) return containerList
def dynamicImport(path, name): """Dynamically load a class from its path. Parameters: path - path to the class to be loaded. This includes the class name without .py Accepts folder description separated by ., \ or /. Also accepts None; an empty path name - name of the class file (without .py) Returns: pointer to the class' __init__ method Raises: Exception is the class could not be found """ # set . as the path separator if path is None or path == '': packagePath = '' else: packagePath = path.replace('\\', '.') packagePath = packagePath.replace('/', '.') if packagePath[-1:] != '.': # add . at the end if not already packagePath += '.' # import and retrieve constructor method try: workpath = (packagePath+name).split('.') # new in v2.4 to make sure modules are only searched for in a controlled set of packages # find and load package and subpackages i = 0 while i < len(workpath)-1: try: if i == 0: modpack = imp.find_module(workpath[i], localpath) else: modpack = imp.find_module(workpath[i], [modpack[1]]) except ImportError as e: raise KnownError('Error while loading module ' + '.'.join(workpath)+'.\nPlease check if '+str(workpath[-1]+' exists in package '+'.'.join(workpath[:-1]))+'.\nError given is: '+e.message) imp.load_module('.'.join(workpath[:i+1]), *modpack) i += 1 # find and load the module modlocation = imp.find_module(name, [modpack[1]]) programPointer = imp.load_module(packagePath+name, *modlocation) programMain_ = getattr(programPointer, name) except ImportError as e: errorlocation = e.message.split(' ')[-1] errorlocation = errorlocation.split('.')[-1] if errorlocation==name: raise KnownError("Could not find the class file with name '%s.py' in package '%s'." % (name, path), e) else: raise # re-raises the original exception except AttributeError as e: errorlocation = e.message.split(' ')[-1] errorlocation = errorlocation.split('.')[-1] errorlocation = errorlocation.replace("'", "") errorlocation = errorlocation.replace('"', '') if errorlocation == name: raise KnownError("Could not find the class name '%s' in package '%s'" % (name, path), e) else: raise return programMain_
def primitive(u, dimNo, low, high, grid, *args, **kwargs): """Compute the integral of numerical array between low and high. The method is specified in src.config (INTMETHOD) NB. 'indices' now only as indices (not as coordinates) NB. when requesting a shape that has more dimensions that the data, this method fails. Needs fixing (TODO) NB. low/high only as indices. Needs coordinates (incl interpolation) later Parameters: u (ndarray) - data to take integral of dimNo (int or str) - number or name of dimension to take the integral of low (int) - index of lower integration boundary grid point high (int) - index of upper integration boundary grid point data (DataContainer) - DataContainer containing grid information args (ndarray/list, optional) - indices at which the integral is requested. Dimension of integration should be included to keep the order of dimensions correct, but this information is not used When omitted data is requested at the original grid. For omitting dimensions, the same rules hold as for the .v method of the DataContainer kwargs (optional): INTMETHOD: interpolation method. If not set, this is taken from the config file Returns: Ju (ndarray) - Numerical integral of u in dimension dimNo evaluated between grid indices 'low' and 'high' """ INTMETHOD = kwargs.get('INTMETHOD') or cf.INTMETHOD # find dimension name corresponding to dimNo (or vv) if isinstance(dimNo, int): dim = grid.v('grid', 'dimensions')[dimNo] else: # else assume dimNo is a string with dimension name dim = dimNo dimNo = grid.v('grid', 'dimensions').index(dim) # preparation: determine the size of u and the maximum index along the axis of derivation # if this maximum index does not exist or is zero, then return a zero array u = np.asarray(u) inds = [np.arange(0, n) for n in u.shape] # replace indices by requested indices wherever available for n in range(0, min(len(args), len(inds))): inds[n] = np.asarray(args[n]) # trapezoidal integration if INTMETHOD == 'TRAPEZOIDAL': # indices of upper and lower grid points upInds = inds downInds = inds[:] if high > low: upInds[dimNo] = [low] + range(low + 1, high + 1) downInds[dimNo] = [low] + range(low, high) else: upInds[dimNo] = range(high, low) + [low] downInds[dimNo] = range(high + 1, low + 1) + [low] # take grid axis at the grid points required upaxis = np.multiply( grid.v('grid', 'axis', dim, *upInds, copy='all'), (grid.v('grid', 'high', dim, *upInds, copy='all') - grid.v('grid', 'low', dim, *upInds, copy='all'))) + grid.v( 'grid', 'low', dim, *upInds, copy='all') downaxis = np.multiply( grid.v('grid', 'axis', dim, *downInds, copy='all'), (grid.v('grid', 'high', dim, *downInds, copy='all') - grid.v('grid', 'low', dim, *downInds, copy='all'))) + grid.v( 'grid', 'low', dim, *downInds, copy='all') Ju = 0.5 * (upaxis - downaxis) * (u[np.ix_(*upInds)] + u[np.ix_(*downInds)]) elif INTMETHOD == 'INTERPOLSIMPSON': # indices of upper and lower grid points upInds = inds downInds = inds[:] axis = grid.v('grid', 'axis', dim) axis = axis.reshape(np.product(axis.shape)) axis_mid = np.zeros(axis.shape) if high > low: upInds[dimNo] = [low] + range(low + 1, high + 1) downInds[dimNo] = [low] + range(low, high) axis_mid = 0.5 * (axis[downInds[dimNo]] - axis[upInds[dimNo]]) + axis[upInds[dimNo]] else: upInds[dimNo] = range(high, low) + [low] downInds[dimNo] = range(high + 1, low + 1) + [low] axis_mid = 0.5 * (axis[downInds[dimNo]] - axis[upInds[dimNo]]) + axis[upInds[dimNo]] # take grid axis at the grid points required upaxis = np.multiply( grid.v('grid', 'axis', dim, *upInds, copy='all'), (grid.v('grid', 'high', dim, *upInds, copy='all') - grid.v('grid', 'low', dim, *upInds, copy='all'))) + grid.v( 'grid', 'low', dim, *upInds, copy='all') downaxis = np.multiply( grid.v('grid', 'axis', dim, *downInds, copy='all'), (grid.v('grid', 'high', dim, *downInds, copy='all') - grid.v('grid', 'low', dim, *downInds, copy='all'))) + grid.v( 'grid', 'low', dim, *downInds, copy='all') uquad = scipy.interpolate.interp1d(axis, u, 'quadratic', axis=dimNo) Ju = 1. / 6 * (upaxis - downaxis) * ( u[np.ix_(*upInds)] + 4. * uquad(axis_mid) + u[np.ix_(*downInds)]) else: raise KnownError( "Numerical integration scheme '%s' is not implemented" % (INTMETHOD)) return Ju
def buildCallStack(self): """Build the call stack of calculation modules. The method follows these steps 1. update the submodules that each module should run using the method '__updateSubmodulesToRun'. This is done by starting from the modules necessary for providing the requested output. These modules already have the property 'runModule' set to True. Using a while loop, modules required for these modules also get the property 'runModule'=True. This also determines which submodules are required. All submodules that might be used are called. 2. Check input variables. The register of all modules that are enlisted for running (i.e. runModule=True) are checked to verify the input. Input variables can be provided either by other modules (i.e. in 'output' tag in module registry), on input or in the config.py file. A KnownError Exception is raised when the input is invalid. 3. Reinitialise Iterator modules and use Iterator modules to ignore the iterative properties of its depending iterative modules 4. Initialise. First determine the the required module for each module (both for initial run (iterative mods) and consecutive runs. Then prepare the lists required in step 3. 5. place modules in the call stack. This works with a two list system: a list of unplaced modules and a call stack (=list of placed modules). Iteratively do the following check: a. Sort the modules (see below at *) b. Place the first module in the list that does not require other unplaced modules. c. If the placed module is iterative, determine the requirements for closing the iteration loop 6. check if the any change occurred in the last iteration or if the unplaced list is depleted. If no change or depleted list, stop the iteration. Provide a warning (but continue the program) if not all output requirements have been met or if not all specified methods are used. Notes: the modules themselves are responsible for knowing whether to run or not and which submodules to run. In building the call stack, ask module for information on whether it runs and what input/output needs/provides based its submodules to run. i.e. ModuleList has no knowledge of the submodules. Exceptions: KnownError expception if the input is invalid. The exception contains intructions on the missing variables Note that no exception is thrown when the call stack is incomplete, the program will then only provide a warning, but continues running. """ # 1.a update the submodules to run based on the input requirements of all modules self.__updateSubmodulesToRun() # 2. check input requirements; have all variables been provided self.__checkInputRequirements() # 3 set the registry of Iterator modules self.__setIterator() # 4. Init # Determine which modules are required for the modules to run inputInitMods = self.__loadInputRequirements(init=True) inputMods = self.__loadInputRequirements() # Initialise lists unplacedList = [i for i in self.moduleList if i.runModule ] # list with modules not placed in the call stack self.callStack = ( [], [], [] ) # sublists with 0: modules in call stack, 1: iteration number, 2: 'start' if this is the place to start an iteration (i.e. run stopping criterion) outputList = [] # output of all modules in the call stack outputReqList = [] # list of required output variables for module in self.moduleList: outputReqList = list( set(outputReqList + toList(module.getOutputRequirements()))) # iterative module inits iterativeList = [ i.isIterative() for i in unplacedList ] # is the module in the unplacedList iterative or not (bool) iterationReqList = [[]] iterationNo = 0 # 5. place modules in call stack while unplacedList: listSizeBefore = len(unplacedList) iterativeDependence, iterativeList = self.__iterativeDependence( unplacedList, inputInitMods, inputMods, iterativeList ) # set importance per iterative loop # 06-08-2018: in separate module to evaluate everytime the callstack is updated; helps dealing with situation of two independent loops inside another loop # a. sort modules depending if iterationNo == 0: # no iteration # sort the iterativeList and unplacedList so that non-iterative modules are always placed first iterativeList, unplacedList = (list(x) for x in zip( *sorted(zip(iterativeList, unplacedList)))) else: # in an iteration # sort so that modules that can contribute to the iteration requirements come first. # if more modules can contribute, take non-iteratives first contributionList = [(i in iterationReqList[-1]) for i in unplacedList] iterativeList = [ -i for i in iterativeList ] # sorting is reversed for contribution list, therefore also reverse iterativelist. Reverse back later contributionList, iterativeList, unplacedList = ( list(x) for x in zip(*sorted(zip( contributionList, iterativeList, unplacedList), reverse=True))) iterativeList = [-i for i in iterativeList] # b. place modules that can be placed according to their input requirements for i, mod in enumerate(unplacedList): if not [ j for j in inputInitMods[mod] if j not in self.callStack[0] ]: # if a module does not require initial input that is not already in outList if iterativeList[i]: # start a new iteration loop iterationNo += 1 iterationReqList.append([]) iterationReqList[iterationNo] = [ j for j in iterativeDependence[mod] if j not in self.callStack[0] ] # update and append list of modules required for this iteration loop self.callStack[0].append(mod) # place in call stack self.callStack[1].append( iterationNo ) # iteration loop this module is in; 0 means no iteration self.callStack[2].append( bool(iterativeList[i]) * 'start' ) # 'start' if this is the starting point of a loop unplacedList.pop(i) # remove from list of unplaced modules iterativeList.pop(i) # " " outputList = list( set(outputList + mod.getOutputVariables()) ) # add its output to the list of calculated output variables # iterationReqList[iterationNo] = list(set(iterationReqList[iterationNo]+ [j for j in inputMods[mod.getName()] if j not in self.callStack[0]])) # update and append list of modules required for this iteration loop for k in range(1, iterationNo + 1): iterationReqList[k] = [ j for j in iterationReqList[k] if j not in self.callStack[0] ] # update list of variables required for all iteration loops break # break loop if a module is placed # c. close iteration loop if in iteration but no requirements anymore while iterationNo > 0 and not iterationReqList[iterationNo]: # add module if iterative output is requested and it belongs to this iteration loop iteratingModules = [ mod for mod in unplacedList if mod.getIteratesWith() ] outputmodule = [ mod.isOutputModule() for mod in iteratingModules ] # find output modules ... iteratingModules = [ x for (y, x) in sorted(zip(outputmodule, iteratingModules)) ] #.. and sort them to set at the front for mod in iteratingModules: IterationModule = mod.getIteratesWith( ) # find with which module the output iterates or None if non-iterative iterStartModuleName_list = [ q.getName() for i, q in enumerate(self.callStack[0]) if (self.callStack[2][i] == 'start' and self.callStack[1][i] == iterationNo) ] # find the module(s) that started the current iteration loop for iterStartModuleName in iterStartModuleName_list: if IterationModule == iterStartModuleName: # if output iterates with current iterative module, add output module and remove from unplaced list self.callStack[0].append(mod) self.callStack[1].append(iterationNo) self.callStack[2].append('') try: unplacedList.remove(mod) except Exception as e: raise KnownError( 'Module %s is supposed to iterate with module %s, which is not used.' % (mod.getName(), IterationModule), e) # # add output module if iterative output is requested and it belongs to this iteration loop # outputModule = [mod for mod in self.moduleList if mod.isOutputModule()] # find the output module # if outputModule: # outputModule = outputModule[0] # outputIterationModule = outputModule.outputIterationModule # find with which module the output iterates or None if non-iterative # iterStartModuleName_list = [mod.getName() for i, mod in enumerate(self.callStack[0]) if (self.callStack[2][i]=='start' and self.callStack[1][i]==iterationNo)] # find the module(s) that started the current iteration loop # for iterStartModuleName in iterStartModuleName_list: # if outputIterationModule and outputIterationModule == iterStartModuleName: # if output iterates with current iterative module, add output module and remove from unplaced list # self.callStack[0].append(outputModule) # self.callStack[1].append(iterationNo) # self.callStack[2].append('') # try: # unplacedList.remove(outputModule) # except Exception as e: # raise KnownError('Output module is supposed to iterate with module %s, which is not used.' % outputIterationModule, e) # set iteration level back by 1 iterationReqList.pop(iterationNo) iterationNo -= 1 listSizeAfter = len(unplacedList) # 6. check the progress made in the last iteration if listSizeBefore == listSizeAfter or not unplacedList: # a. there are vars required on output, but not calculated if any([j for j in outputReqList if j not in outputList]): missingVars = [ j for j in outputReqList if j not in outputList ] # Mention that not all variables can be calculated, but carry on anyway message = ( 'Not all variables that are required on output can be calculated using the current input settings. ' '\nCould not calculate %s.' '\nComputation will continue.\n' % (', '.join(missingVars))) self.logger.warning(message) unplacedList = [] # b. output requirements (more than) fulfilled. Stop the building of the call stack. unusedlist = [i for i in self.moduleList if not i.runModule] if any(unusedlist): # Nb. not necessarily all modules will be run, provide a warning if modules have not been placed in the call stack self.logger.warning( 'Not all modules will be used.' '\nModule(s) ' + (', '.join([mod.getName() for mod in unusedlist])) + ' will not be run' '\nComputation will continue.\n') # c. output requirements (more than) fulfilled. Stop the building of the call stack. if any(unplacedList): raise KnownError( 'Modules %s could not be placed in the call stack, but seem to contribute to the requested output. ' 'Please check the input and output requirements of your modules.' % ', '.join([mod.getName() for mod in unplacedList])) # Update submodulesToRun in subsequent iterations of a loop self.__updateSubmodulesToRunIteration() # Console information self.logger.info('Call stack was built successfully') # Print call stack to log. Includes information on loops self.logger.info('** Call stack **') for i, mod in enumerate(self.callStack[0]): self.logger.info( str(i + 1) + '\t' + mod.getName() + '\t' + ', '.join(toList(mod.getSubmodulesToRun())) + '\t' + 'Loop '.join([''] + [(str)(j) for j in [self.callStack[1][i]] if j > 0]) + '\t' + self.callStack[2][i]) self.logger.info('') return
def run(self): """ """ self.logger.info('Running module DynamicAvailability_upwind') ################################################################################################################ # Init ################################################################################################################ jmax = self.input.v('grid', 'maxIndex', 'x') kmax = self.input.v('grid', 'maxIndex', 'z') fmax = self.input.v('grid', 'maxIndex', 'f') self.x = ny.dimensionalAxis(self.input.slice('grid'), 'x')[:, 0, 0] self.dx = (self.x[1:] - self.x[:-1]) self.zarr = ny.dimensionalAxis(self.input.slice('grid'), 'z')[:, :, 0] self.B = self.input.v('B', range(0, jmax + 1)) self.Bx = self.input.d('B', range(0, jmax + 1), dim='x') self.Kh = self.input.v('Kh') self.u0tide_bed = self.input.v('u0', 'tide', range(0, jmax + 1), kmax, 1) c00 = np.real( self.input.v('hatc0', range(0, jmax + 1), range(0, kmax + 1), 0)) c04 = np.abs( self.input.v('hatc0', range(0, jmax + 1), range(0, kmax + 1), 2)) c04_int = np.trapz(c04, x=-self.zarr) hatc2 = np.abs( self.input.v('hatc2', range(0, jmax + 1), range(0, kmax + 1), 0)) alpha1 = np.trapz(c00 + hatc2, x=-self.zarr, axis=1) if alpha1[-1] == 0: alpha1[-1] = alpha1[-2] alpha2 = c04_int / alpha1 + 1e-3 ## load time series Q t = self.interpretValues(self.input.v('t')) toutput = self.interpretValues(self.input.v('toutput')) toutput[0] = t[ 0] # correct output time; first time level is always equal to initial computation time Qarray = self.interpretValues(self.input.v('Q1')) if len(Qarray) != len(t): from src.util.diagnostics.KnownError import KnownError raise KnownError( 'Length of Q does not correspond to length of time array.') ################################################################################################################ # Compute transport, source and BC ################################################################################################################ ## Transport d = self.compute_transport() dc = DataContainer(d) # change size of those components that depend on the river discharge and put init value in first element T_r = copy(dc.v('T', 'river', range(0, jmax + 1))) T_rr = copy(dc.v('T', 'river_river', range(0, jmax + 1))) T_dr = copy(dc.v('T', 'diffusion_river', range(0, jmax + 1))) F_dr = dc.v('F', 'diffusion_river', range(0, jmax + 1)) d['T']['river'] = np.zeros((jmax + 1, 1, 1, len(toutput))) d['T']['river'][:, 0, 0, 0] = T_r d['T']['river_river'] = np.zeros((jmax + 1, 1, 1, len(toutput))) d['T']['river_river'][:, 0, 0, 0] = T_rr d['T']['diffusion_river'] = np.zeros((jmax + 1, 1, 1, len(toutput))) d['T']['diffusion_river'][:, 0, 0, 0] = T_dr d['F']['diffusion_river'] = np.zeros((jmax + 1, 1, 1, len(toutput))) d['F']['diffusion_river'][:, 0, 0, 0] = F_dr T = dc.v('T', range(0, jmax + 1), 0, 0, 0) F = dc.v('F', range(0, jmax + 1), 0, 0, 0) ## Source G = self.compute_source() #NB does not change over long time scale ## Seaward boundary condition if self.input.v('sedbc') == 'csea': csea = self.input.v('csea') fsea = csea / alpha1[0] * ( self.input.v('grid', 'low', 'z', 0) - self.input.v('grid', 'high', 'z', 0) ) #NB does not change over long time scale else: from src.util.diagnostics.KnownError import KnownError raise KnownError( 'incorrect seaward boundary type (sedbc) for sediment module') ## compute TQ, uQ, hatc2Q: quantities relative to the river discharge u1river = np.real( self.input.v('u1', 'river', range(0, jmax + 1), range(0, kmax + 1), 0)) Q_init = -np.trapz(u1river[-1, :], x=-self.zarr[-1, :]) * self.B[ -1] # initial discharge self.TQ = T_r / Q_init # river transport per discharge unit self.uQ = u1river / Q_init # river velocity per discharge unit ################################################################################################################ # Initialise X = (f, S) ################################################################################################################ if self.input.v('initial') == 'erodibility': finit = self.input.v('finit', range(0, jmax + 1)) Sinit = self.init_stock(finit, alpha1, alpha2) elif self.input.v('initial') == 'stock': Sinit = self.input.v('Sinit', range(0, jmax + 1)) finit = self.erodibility_stock_relation(alpha2, Sinit / alpha1) elif self.input.v('initial') == 'equilibrium': _, finit, _ = self.availability(F, T, G, alpha1, alpha2) Sinit = self.init_stock(finit, alpha1, alpha2) else: from src.util.diagnostics.KnownError import KnownError raise KnownError( 'incorrect initial value for sediment module. Use erodibility, stock or equilibrium' ) X = np.concatenate((finit, Sinit)) f = np.zeros((jmax + 1, 1, 1, len(toutput))) S = np.zeros((jmax + 1, 1, 1, len(toutput))) f[:, 0, 0, 0] = finit S[:, 0, 0, 0] = Sinit ################################################################################################################ # Time integrator ################################################################################################################ T_base = dc.v('T', range(0, jmax + 1), 0, 0, 0) - dc.v( 'T', 'river', range(0, jmax + 1), 0, 0, 0) - dc.v( 'T', 'river_river', range(0, jmax + 1), 0, 0, 0) - dc.v( 'T', 'diffusion_river', range(0, jmax + 1), 0, 0, 0) F_base = dc.v('F', range(0, jmax + 1), 0, 0, 0) - dc.v( 'F', 'diffusion_river', range(0, jmax + 1), 0, 0, 0) # loop self.timer.tic() qq = 1 # counter for saving for i, Q in enumerate(Qarray[1:]): # quantities at old time step Told = copy(T) Fold = copy(F) alpha1old = copy(alpha1) alpha2old = copy(alpha2) # Update transport terms and hatc2 & load new transport terms T_riv, T_rivriv, T_difriv, F_difriv = self.update_transport(Q) ur = self.uQ[:, -1] * Q hatc2 = self.update_hatc2(ur) T = T_base + T_riv + T_rivriv + T_difriv F = F_base + F_difriv # Make one time step and iterate over non-linearity self.dt = t[i + 1] - t[i] alpha1 = np.trapz(c00 + hatc2, x=-self.zarr, axis=1) if alpha1[-1] == 0: alpha1[-1] = alpha1[-2] alpha2 = c04_int / alpha1 + 1e-3 X = self.timestepping(T, F, alpha1, alpha2, Told, Fold, alpha1old, alpha2old, X, fsea, G) # save output on output timestep if t[i + 1] >= toutput[qq]: toutput[qq] = t[ i + 1] # correct output time to real time if time step and output time do not correspond d['T']['river'][:, 0, 0, qq] = T_riv d['T']['river_river'][:, 0, 0, qq] = T_rivriv d['T']['diffusion_river'][:, 0, 0, qq] = T_difriv d['F']['diffusion_river'][:, 0, 0, qq] = F_difriv f[:, 0, 0, qq] = X[:jmax + 1] S[:, 0, 0, qq] = X[jmax + 1:] qq += 1 qq = np.minimum(qq, len(toutput) - 1) # display progress if i % np.floor(len(Qarray[1:]) / 100.) == 0: percent = float(i) / len(Qarray[1:]) hashes = '#' * int(round(percent * 10)) spaces = ' ' * (10 - len(hashes)) sys.stdout.write("\rProgress: [{0}]{1}%".format( hashes + spaces, int(round(percent * 100)))) sys.stdout.flush() sys.stdout.write('\n') self.timer.toc() self.timer.disp('time integration time') ################################################################################################################ # Prepare output ################################################################################################################ d['f'] = f d['a'] = S fx = np.gradient(f, self.x, axis=0, edge_order=2) hatc0 = self.input.v('hatc0', 'a', range(0, jmax + 1), range(0, kmax + 1), range(0, fmax + 1), [0]) hatc1 = self.input.v('hatc1', 'a', range(0, jmax + 1), range(0, kmax + 1), range(0, fmax + 1), [0]) hatc1x = self.input.v('hatc1', 'ax', range(0, jmax + 1), range(0, kmax + 1), range(0, fmax + 1), [0]) hatc2 = self.input.v('hatc2', 'a', range(0, jmax + 1), range(0, kmax + 1), range(0, fmax + 1), [0]) d['c0'] = hatc0 * f d['c1'] = hatc1 * f + hatc1x * fx d['c2'] = hatc2 * f d['t'] = toutput return d
def __operationsNumerical(self, value, *args, **kwargs): """Utility of .v(). Deals with operations, such a derivation and integration on numerical data (scalar, array). Returns the value at the specified indices if no operation is prescribed Parameters: value (scalar or ndarray) - value containing the data args (lists, ndarray) - indices for each dimension. NB. these should already be tailormade (e.g. excess dimensions cut off) If no args are provided, the data is returned on the original grid. kwargs['operation'] (string, optional) - specification of operation n: negative d: derivative dd: second derivative kwargs['dim'] (string, int, optional) - axis to take the operation over (if applicable). Can be given as dimension number or dimension name. Exception: KnownError if the data cannot be accessed using the operation Returns: value with operation executed or raise an exception """ if kwargs.get('operation'): if kwargs.get('operation') == 'n': if args: value = -value[np.ix_(*args)] else: value = -value if kwargs.get('operation') == 'd': dim = kwargs.get('dim') import numbers for dir in list(set( sorted(dim))): # loop over all dimensions once if isinstance(value, numbers.Number ): # return 0 for derivative of constant value = 0. else: order = len( [i for i in dim if i == dir] ) # collect the number of occurances of this dimension if order == 1: value = nf.derivative( value, dir, self.slice('grid'), *args, DERMETHOD=kwargs.get('DERMETHOD')) elif order == 2: value = nf.secondDerivative( value, dir, self.slice('grid'), *args) else: raise KnownError( 'Numerical derivatives of order %s are not implemented' % str(order)) ### Depreciated since v2.2 (02-03-2016) [dep01] ### if kwargs.get('operation') == 'dd': dim = kwargs.get('dim') value = nf.secondDerivative(value, dim, self.slice('grid'), *args) ### End ### else: if args: value = value[np.ix_(*args)] return value
def integrate(u, dimNo, low, high, grid, *args, **kwargs): """Compute the integral of numerical array between low and high. The method is specified in src.config (INTMETHOD) NB. 'indices' now only as indices (not as coordinates) NB. when requesting a shape that has more dimensions that the data, this method fails. Needs fixing (TODO) NB. low/high only as indices. Needs coordinates (incl interpolation) later Parameters: u (ndarray) - data to take integral of dimNo (int or str) - number or name of dimension to take the integral of low (int) - index of lower integration boundary grid point high (1d-array or int) - (list of) index/indices of upper integration boundary grid point data (DataContainer) - DataContainer containing grid information args (ndarray/list, optional) - indices at which the integral is requested. Dimension of integration should be included to keep the order of dimensions correct, but this information is not used When omitted data is requested at the original grid. For omitting dimensions, the same rules hold as for the .v method of the DataContainer kwargs (optional): INTMETHOD: integration method. If not set, this is taken from the config file Returns: Ju (ndarray) - Numerical integral of u in dimension dimNo evaluated between grid indices 'low' and 'high'. Shape of Ju is equal to u, with the exception that axis 'dimNo' has the same length as 'high' """ INTMETHOD = kwargs.get('INTMETHOD') or cf.INTMETHOD # if string of dimension is provided, convert to number of axis if isinstance(dimNo, basestring): dimNo = grid.v('grid', 'dimensions').index(dimNo) if INTMETHOD == 'TRAPEZOIDAL' or INTMETHOD == 'INTERPOLSIMPSON': # determine 'maximum' of high, i.e. that value furthest from low high = np.asarray(toList(high)) if max(high) > low: maxHigh = max(high) incr = 1 else: maxHigh = min(high) incr = -1 # Determine primitive function over each cell Ju = primitive(u, dimNo, low, maxHigh, grid, INTMETHOD=INTMETHOD, *args) # sum the primitive over each cell with respect to low size = [j for j in Ju.shape] size[dimNo] = len(high) if incr == -1: uInt = np.zeros(Ju.shape, dtype=u.dtype) uInt[(slice(None),)*dimNo+(slice(1, None),)+(Ellipsis,)] = np.cumsum(Ju, axis=dimNo)[(slice(None),)*dimNo+(slice(None, -1),)+(Ellipsis,)] high = high - min(high) uInt = (-uInt + uInt[(slice(None), )*dimNo+([-1], )+(Ellipsis,)])[(slice(None),)*dimNo+(high,)+(Ellipsis,)] else: uInt = np.cumsum(Ju, axis=dimNo) high = high - low uInt = uInt[(slice(None),)*dimNo+(high,)+(Ellipsis,)] elif INTMETHOD == 'SIMPSON': # make z the same shape as u # if u has less dimensions or length-1 dimensions, remove them from z axisrequest = {} for num, j in enumerate(grid.v('grid', 'dimensions')): if num >= len(u.shape): axisrequest[j] = 0 elif u.shape[num] == 1: axisrequest[j] = [0] axis = ny.dimensionalAxis(grid, 'z', **axisrequest) # if u has more dimensions, append them axis = axis.reshape(axis.shape+(1,)*(len(u.shape) - len(axis.shape)))*np.ones(u.shape) # integrate uInt = np.zeros(u.shape[:dimNo]+(len(ny.toList(high)),)+u.shape[dimNo+1:], dtype=u.dtype) high = np.asarray(toList(high)) if max(high) > low: incr = 1 else: incr = -1 for i, hi in enumerate(high+incr): if hi < 0: hi = None slices = (slice(None),)*dimNo+(slice(low, hi, incr),)+(Ellipsis,) uInt[(slice(None),)*dimNo+(i,)+(Ellipsis,)] = scipy.integrate.simps(u[slices], axis[slices], axis=dimNo) else: raise KnownError("Numerical integration scheme '%s' is not implemented" %(INTMETHOD)) return uInt
def axisDerivative(u, dim, dimNo, grid, *args, **kwargs): DERMETHOD = kwargs.get('DERMETHOD') or cf.DERMETHOD # Preparation # determine the size of u u = np.asarray(u) inds = [np.arange(0, n) for n in u.shape] # replace indices by requested indices wherever available for n in range(0, min(len(args), len(inds))): inds[n] = np.asarray(args[n]) # determine the maximum index along the axis of derivation # if this maximum index does not exist or is zero, then return a zero array try: maxIndex = u.shape[dimNo] - 1 if maxIndex == 0: raise Exception except: # if dimNo is out of range or corresponds to a length 1 dimension, return an array of zeros; data has a constant derivative ux = np.zeros([len(inds[n]) for n in range(0, len(inds))]) return ux # central derivative if DERMETHOD == 'CENTRAL': # central method with first order at the boundaries upInds = inds try: upInd = np.minimum(np.asarray(args[dimNo]) + 1, maxIndex) except: upInd = np.asarray(range(1, maxIndex + 1) + [maxIndex]) upInds[dimNo] = upInd downInds = inds[:] try: downInd = np.maximum(np.asarray(args[dimNo]) - 1, 0) except: downInd = np.asarray([0] + range(0, maxIndex)) downInds[dimNo] = downInd upaxis = np.multiply( grid.v('grid', 'axis', dim, *upInds, copy='all'), (grid.v('grid', 'high', dim, *upInds, copy='all') - grid.v('grid', 'low', dim, *upInds, copy='all'))) + grid.v( 'grid', 'low', dim, *upInds, copy='all') downaxis = np.multiply( grid.v('grid', 'axis', dim, *downInds, copy='all'), (grid.v('grid', 'high', dim, *downInds, copy='all') - grid.v('grid', 'low', dim, *downInds, copy='all'))) + grid.v( 'grid', 'low', dim, *downInds, copy='all') ux = (u[np.ix_(*upInds)] - u[np.ix_(*downInds)]) / (upaxis - downaxis) elif DERMETHOD == 'FORWARD': # first order forward upInds = inds try: upInd = np.minimum(np.asarray(args[dimNo]) + 1, maxIndex) except: upInd = np.asarray(range(1, maxIndex + 1) + [maxIndex]) upInds[dimNo] = upInd downInds = inds[:] try: downInd = np.minimum(np.asarray(args[dimNo]), maxIndex - 1) except: downInd = np.asarray(range(0, maxIndex) + [maxIndex - 1]) downInds[dimNo] = downInd upaxis = np.multiply( grid.v('grid', 'axis', dim, *upInds, copy='all'), (grid.v('grid', 'high', dim, *upInds, copy='all') - grid.v('grid', 'low', dim, *upInds, copy='all'))) + grid.v( 'grid', 'low', dim, *upInds, copy='all') downaxis = np.multiply( grid.v('grid', 'axis', dim, *downInds, copy='all'), (grid.v('grid', 'high', dim, *downInds, copy='all') - grid.v('grid', 'low', dim, *downInds, copy='all'))) + grid.v( 'grid', 'low', dim, *downInds, copy='all') ux = (u[np.ix_(*upInds)] - u[np.ix_(*downInds)]) / (upaxis - downaxis) elif DERMETHOD == 'CENTRAL2': # central method with second order at the boundaries midInds = inds[:] try: midInd = np.minimum(np.maximum(np.asarray(args[dimNo]), 1), maxIndex - 1) beta = np.asarray([0] * len(midInd)) beta[[i for i in args[dimNo] if i == 0]] = 4. beta[[i for i in args[dimNo] if i == maxIndex]] = -4. except: midInd = np.asarray([1] + range(1, maxIndex) + [maxIndex - 1]) beta = np.asarray([0] * len(midInd)) beta[0] = 4. beta[-1] = -4. midInds[dimNo] = midInd upInds = inds[:] try: upInd = np.maximum( np.minimum(np.asarray(args[dimNo]) + 1, maxIndex), 2) gamma = np.asarray([1.] * len(midInd)) gamma[[i for i in args[dimNo] if i == 0]] = -1. gamma[[i for i in args[dimNo] if i == maxIndex]] = 3. except: upInd = np.asarray([2] + range(2, maxIndex + 1) + [maxIndex]) gamma = np.asarray([1.] * len(midInd)) gamma[0] = -1. gamma[-1] = 3. upInds[dimNo] = upInd downInds = inds[:] try: downInd = np.minimum(np.maximum(np.asarray(args[dimNo]) - 1, 0), maxIndex - 2) alpha = np.asarray([-1.] * len(midInd)) alpha[[i for i in args[dimNo] if i == 0]] = -3. alpha[[i for i in args[dimNo] if i == maxIndex]] = 1. except: downInd = np.asarray([0] + range(0, maxIndex - 1) + [maxIndex - 2]) alpha = np.asarray([-1.] * len(midInd)) alpha[0] = -3. alpha[-1] = 1. downInds[dimNo] = downInd alpha = alpha.reshape([1] * dimNo + [len(alpha)] + [1] * (len(u.shape) - dimNo - 1)) beta = beta.reshape([1] * dimNo + [len(beta)] + [1] * (len(u.shape) - dimNo - 1)) gamma = gamma.reshape([1] * dimNo + [len(gamma)] + [1] * (len(u.shape) - dimNo - 1)) upaxis = np.multiply( grid.v('grid', 'axis', dim, *upInds, copy='all'), (grid.v('grid', 'high', dim, *upInds, copy='all') - grid.v('grid', 'low', dim, *upInds, copy='all'))) + grid.v( 'grid', 'low', dim, *upInds, copy='all') #midaxis = np.multiply(grid.v('grid', 'axis', dim, *midInds, copy = 'all'), (grid.v('grid', 'high', dim, *midInds, copy = 'all')-grid.v('grid', 'low', dim, *midInds, copy = 'all')))+grid.v('grid', 'low', dim, *midInds, copy = 'all') downaxis = np.multiply( grid.v('grid', 'axis', dim, *downInds, copy='all'), (grid.v('grid', 'high', dim, *downInds, copy='all') - grid.v('grid', 'low', dim, *downInds, copy='all'))) + grid.v( 'grid', 'low', dim, *downInds, copy='all') ux = (gamma * u[np.ix_(*upInds)] + beta * u[np.ix_(*midInds)] + alpha * u[np.ix_(*downInds)]) / (upaxis - downaxis) else: raise KnownError( "Numerical derivative scheme '%s' is not implemented" % (DERMETHOD)) return ux