def binedges(bins=None, binedgs=None, limits=None, lcheckVar=True): ''' utility function to generate and validate bins and binegdes from either one ''' # check input if bins is None and binedgs is None: raise ArgumentError elif bins is not None and binedgs is not None: if len(bins)+1 != len(binedgs): raise ArgumentError(len(bins)) if bins is not None: if limits is not None: vmin, vmax = limits else: raise ArgumentError(bins) # expand bins (values refer to center of bins) if isinstance(bins,(int,np.integer)): if bins == 1: bins = np.asarray(( (vmin+vmax)/2. ,)) else: bins = np.linspace(vmin,vmax,bins) elif isinstance(bins,(tuple,list)) and 0 < len(bins) < 4: bins = np.linspace(*bins) elif not isinstance(bins,(list,np.ndarray)): raise TypeError(bins) if len(bins) == 1: tmpbinedgs = np.asarray((vmin,vmax)) else: hbd = np.diff(bins) / 2. # make sure this is a float! tmpbinedgs = np.hstack((bins[0]-hbd[0],bins[1:]-hbd,bins[-1]+hbd[-1])) # assuming even spacing if binedgs is None: binedgs = tmpbinedgs # computed from bins elif lcheckVar: assert isEqual(binedgs, np.asarray(tmpbinedgs, dtype=binedgs.dtype)) if binedgs is not None: # expand bin edges if not isinstance(binedgs,(tuple,list)): binedgs = np.asarray(binedgs) elif not isinstance(binedgs,np.ndarray): raise TypeError(binedgs) tmpbins = binedgs[1:] - ( np.diff(binedgs) / 2. ) # make sure this is a float! if bins is None: bins = tmpbins # compute from binedgs elif lcheckVar: assert isEqual(bins, np.asarray(tmpbins, dtype=bins.dtype)) # return bins and binegdes return bins, binedgs
def binedges(bins=None, binedgs=None, limits=None, lcheckVar=True): ''' utility function to generate and validate bins and binegdes from either one ''' # check input if bins is None and binedgs is None: raise ArgumentError elif bins is not None and binedgs is not None: if len(bins)+1 != len(binedgs): raise ArgumentError if bins is not None: if limits is not None: vmin, vmax = limits else: raise ArgumentError # expand bins (values refer to center of bins) if isinstance(bins,(int,np.integer)): if bins == 1: bins = np.asarray(( (vmin+vmax)/2. ,)) else: bins = np.linspace(vmin,vmax,bins) elif isinstance(bins,(tuple,list)) and 0 < len(bins) < 4: bins = np.linspace(*bins) elif not isinstance(bins,(list,np.ndarray)): raise TypeError if len(bins) == 1: tmpbinedgs = np.asarray((vmin,vmax)) else: hbd = np.diff(bins) / 2. # make sure this is a float! tmpbinedgs = np.hstack((bins[0]-hbd[0],bins[1:]-hbd,bins[-1]+hbd[-1])) # assuming even spacing if binedgs is None: binedgs = tmpbinedgs # computed from bins elif lcheckVar: assert isEqual(binedgs, np.asarray(tmpbinedgs, dtype=binedgs.dtype)) if binedgs is not None: # expand bin edges if not isinstance(binedgs,(tuple,list)): binedgs = np.asarray(binedgs) elif not isinstance(binedgs,np.ndarray): raise TypeError tmpbins = binedgs[1:] - ( np.diff(binedgs) / 2. ) # make sure this is a float! if bins is None: bins = tmpbins # compute from binedgs elif lcheckVar: assert isEqual(bins, np.asarray(tmpbins, dtype=bins.dtype)) # return bins and binegdes return bins, binedgs
def train(self, dataset, observations, **kwargs): ''' loop over variables that need to be corrected and call method-specific training function ''' # figure out varlist if self.varlist is None: self._getVarlist( dataset, observations) # process all that are present in both datasets # loop over variables that will be corrected self._correction = dict() for varname in self.varlist: # get variable object var = dataset[varname] if not var.data: var.load() # assume it is a VarNC, if there is no data obsvar = observations[varname] # should be loaded if not obsvar.data: obsvar.load() # assume it is a VarNC, if there is no data assert var.data and obsvar.data, obsvar.data # check if they are actually equal if isEqual(var.data_array, obsvar.data_array, eps=eps, masked_equal=True): correction = None else: correction = self._trainVar(var, obsvar, **kwargs) # save correction parameters self._correction[varname] = correction
def run_test(fct, kw=0, laax=True): ff = partial(fct, kw=kw) shape = (500,100) data = np.arange(np.prod(shape), dtype='float').reshape(shape) assert data.shape == shape # parallel implementation using my wrapper pres = apply_along_axis(ff, axis, data, NP=2, ldebug=True, laax=laax) print pres.shape assert pres.shape == data.shape assert isZero(pres.mean(axis=axis)+kw) and isZero(pres.std(axis=axis)-1.) # straight-forward numpy version res = np.apply_along_axis(ff, axis, data) assert res.shape == data.shape assert isZero(res.mean(axis=axis)+kw) and isZero(res.std(axis=axis)-1.) # final test assert isEqual(pres, res)
def train(self, dataset, observations, **kwargs): ''' loop over variables that need to be corrected and call method-specific training function ''' # figure out varlist if self.varlist is None: self._getVarlist(dataset, observations) # process all that are present in both datasets # loop over variables that will be corrected self._correction = dict() for varname in self.varlist: # get variable object var = dataset[varname] if not var.data: var.load() # assume it is a VarNC, if there is no data obsvar = observations[varname] # should be loaded if not obsvar.data: obsvar.load() # assume it is a VarNC, if there is no data assert var.data and obsvar.data, obsvar.data # check if they are actually equal if isEqual(var.data_array, obsvar.data_array, eps=eps, masked_equal=True): correction = None else: correction = self._trainVar(var, obsvar, **kwargs) # save correction parameters self._correction[varname] = correction