def getAppxCoef2d_significant(self, arr2d, maxNumCoef, alpha): """Returns[:,j] approx. coeffcients with p-value < alpha; subsequent coef. are equal to 0. Reference: Ott, pp.606. The significance of the coef. estimated by comparing the variance drop of the model2 with the variance of the model1, where: model 1: complete model with maxNumCoef coef. different from 0 model 2: reduced model with coef. in range (0,k) different from 0 null hypothesis (H0): coef. k,k+1,...,maxNumCoef are equal to 0 if H0 rejected (pval below some alpha (e.g. 0.05) -> there exist an important coef. among coef. k,k+1,...,maxNumCoef repeat the test for k=0,1,...maxNumCoef-1 """ assert len(arr2d.shape) == 2, "2d array expected" assert 0 < maxNumCoef <= self.k coefMax = self.getAppxCoef(arr2d, maxNumCoef) curveMax = self.getAppxCurve(coefMax) SSE1 = Numeric.add.reduce((arr2d - curveMax)**2, 1) MSE1 = SSE1 / (arr2d.shape[1] - maxNumCoef) #print "SSE1[0], MSE1[0]",SSE1[0], MSE1[0] pvals = Numeric.zeros((arr2d.shape[0], maxNumCoef), Numeric.Float) for k in range( maxNumCoef): # test cofInd: [maxNum-1, maxNum-2, ..., minNum] #print "Keeping %i coeff" % (k) shpk = list(coefMax.shape) shpk[1] -= k coefk = Numeric.concatenate( (coefMax[:, :k], Numeric.zeros((shpk), Numeric.Float)), 1) curvek = self.getAppxCurve(coefk) SSE2 = Numeric.add.reduce((arr2d - curvek)**2, 1) MSdrop = (SSE2 - SSE1) / (maxNumCoef - k) F = MSdrop / MSE1 #2007-10-11: F -> F.filled(???) pvals[:, k] = scipy.stats.fprob( (maxNumCoef - k), arr2d.shape[1] - maxNumCoef, F.filled(ApproxOrthPolyBasis._F_fillValue)) pvals = Numeric.where( pvals > alpha, Numeric.resize(Numeric.arange(pvals.shape[1]), pvals.shape), pvals.shape[1]) # MAX where significant, idx where nonsignificant firstNonSignIdx = MLab.min(pvals, 1) # idx of the first non-significant coef. coefSign = Numeric.zeros(coefMax.shape, Numeric.Float) for idx in range(coefSign.shape[1]): coefSign[:, idx] = Numeric.where(idx < firstNonSignIdx, coefMax[:, idx], 0) return coefSign
def arc_by_radian(x, y, height, radian_range, thickness, gaussian_width): """ Radial arc with Gaussian fall-off after the solid ring-shaped region with the given thickness, with shape specified by the (start,end) radian_range. """ # Create a circular ring (copied from the ring function) radius = height/2.0 half_thickness = thickness/2.0 distance_from_origin = sqrt(x**2+y**2) distance_outside_outer_disk = distance_from_origin - radius - half_thickness distance_inside_inner_disk = radius - half_thickness - distance_from_origin ring = 1.0-bitwise_xor(greater_equal(distance_inside_inner_disk,0.0),greater_equal(distance_outside_outer_disk,0.0)) sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: inner_falloff = x*0.0 outer_falloff = x*0.0 else: with float_error_ignore(): inner_falloff = exp(divide(-distance_inside_inner_disk*distance_inside_inner_disk, 2.0*sigmasq)) outer_falloff = exp(divide(-distance_outside_outer_disk*distance_outside_outer_disk, 2.0*sigmasq)) output_ring = maximum(inner_falloff,maximum(outer_falloff,ring)) # Calculate radians (in 4 phases) and cut according to the set range) # RZHACKALERT: # Function float_error_ignore() cannot catch the exception when # both dividend and divisor are 0.0, and when only divisor is 0.0 # it returns 'Inf' rather than 0.0. In x, y and # distance_from_origin, only one point in distance_from_origin can # be 0.0 (circle center) and in this point x and y must be 0.0 as # well. So here is a hack to avoid the 'invalid value encountered # in divide' error by turning 0.0 to 1e-5 in distance_from_origin. distance_from_origin += where(distance_from_origin == 0.0, 1e-5, 0) with float_error_ignore(): sines = divide(y, distance_from_origin) cosines = divide(x, distance_from_origin) arcsines = arcsin(sines) phase_1 = where(logical_and(sines >= 0, cosines >= 0), 2*pi-arcsines, 0) phase_2 = where(logical_and(sines >= 0, cosines < 0), pi+arcsines, 0) phase_3 = where(logical_and(sines < 0, cosines < 0), pi+arcsines, 0) phase_4 = where(logical_and(sines < 0, cosines >= 0), -arcsines, 0) arcsines = phase_1 + phase_2 + phase_3 + phase_4 if radian_range[0] <= radian_range[1]: return where(logical_and(arcsines >= radian_range[0], arcsines <= radian_range[1]), output_ring, 0.0) else: return where(logical_or(arcsines >= radian_range[0], arcsines <= radian_range[1]), output_ring, 0.0)
def data(self, data): if data != None: self._data = data ## self._dataMA = chipstat.orng2ma(data) self._dataMA = data.toNumpyMA("a")[0] # info text self.infoa.setText("Examples: %i profiles on %i points" % (self._dataMA.shape[0], self._dataMA.shape[1])) numTotalMissing = int( Numeric.multiply.reduce(self._dataMA.shape) - MA.count(self._dataMA)) if numTotalMissing > 0: numValsByCol = MA.count(self._dataMA, 0) numEmptyCol = Numeric.add.reduce( Numeric.where(numValsByCol == 0, 1, 0)) colNonEmpty = Numeric.compress( numValsByCol != 0, Numeric.arange(self._dataMA.shape[1])) dataRemEmptyCol = self._dataMA.take(colNonEmpty, 1) self.numRowsMissing = Numeric.add.reduce( Numeric.where( MA.count(dataRemEmptyCol, 1) < dataRemEmptyCol.shape[1], 1, 0)) s1 = "" s2 = "" if numEmptyCol > 0: s1 = "s" if self.numRowsMissing > 0: s2 = "s" self.infob.setText( "missing %i values, %i column%s completely, %i row%s partially" % (numTotalMissing, numEmptyCol, s1, self.numRowsMissing, s2)) else: self.infob.setText("") else: self._data = None self._dataMA = None self.infoa.setText("No examples on input") self.infob.setText("") self.numRowsMissing = 0 self.setGuiCommonExpChip() if self.commitOnChange: self.senddata(1)
def line(y, thickness, gaussian_width): """ Infinite-length line with a solid central region, then Gaussian fall-off at the edges. """ distance_from_line = abs(y) gaussian_y_coord = distance_from_line - thickness/2.0 sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = y*0.0 else: with float_error_ignore(): falloff = exp(divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq)) return where(gaussian_y_coord<=0, 1.0, falloff)
def line(y, thickness, gaussian_width): """ Infinite-length line with a solid central region, then Gaussian fall-off at the edges. """ distance_from_line = abs(y) gaussian_y_coord = distance_from_line - thickness/2.0 sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = x*0.0 else: with float_error_ignore(): falloff = exp(divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq)) return where(gaussian_y_coord<=0, 1.0, falloff)
def getAppxCoef2d_significant(self, arr2d, maxNumCoef, alpha): """Returns[:,j] approx. coeffcients with p-value < alpha; subsequent coef. are equal to 0. Reference: Ott, pp.606. The significance of the coef. estimated by comparing the variance drop of the model2 with the variance of the model1, where: model 1: complete model with maxNumCoef coef. different from 0 model 2: reduced model with coef. in range (0,k) different from 0 null hypothesis (H0): coef. k,k+1,...,maxNumCoef are equal to 0 if H0 rejected (pval below some alpha (e.g. 0.05) -> there exist an important coef. among coef. k,k+1,...,maxNumCoef repeat the test for k=0,1,...maxNumCoef-1 """ assert len(arr2d.shape) == 2, "2d array expected" assert 0 < maxNumCoef <= self.k coefMax = self.getAppxCoef(arr2d, maxNumCoef) curveMax = self.getAppxCurve(coefMax) SSE1 = Numeric.add.reduce((arr2d-curveMax)**2,1) MSE1 = SSE1 / (arr2d.shape[1]-maxNumCoef) #print "SSE1[0], MSE1[0]",SSE1[0], MSE1[0] pvals = Numeric.zeros((arr2d.shape[0], maxNumCoef), Numeric.Float) for k in range(maxNumCoef): # test cofInd: [maxNum-1, maxNum-2, ..., minNum] #print "Keeping %i coeff" % (k) shpk = list(coefMax.shape) shpk[1] -= k coefk = Numeric.concatenate((coefMax[:,:k], Numeric.zeros((shpk), Numeric.Float)),1) curvek = self.getAppxCurve(coefk) SSE2 = Numeric.add.reduce((arr2d-curvek)**2,1) MSdrop =(SSE2-SSE1) / (maxNumCoef-k) F = MSdrop / MSE1 #2007-10-11: F -> F.filled(???) pvals[:,k] = scipy.stats.fprob((maxNumCoef-k), arr2d.shape[1]-maxNumCoef, F.filled(ApproxOrthPolyBasis._F_fillValue)) pvals = Numeric.where(pvals > alpha, Numeric.resize(Numeric.arange(pvals.shape[1]),pvals.shape), pvals.shape[1]) # MAX where significant, idx where nonsignificant firstNonSignIdx = MLab.min(pvals, 1) # idx of the first non-significant coef. coefSign = Numeric.zeros(coefMax.shape, Numeric.Float) for idx in range(coefSign.shape[1]): coefSign[:,idx] = Numeric.where(idx<firstNonSignIdx, coefMax[:,idx], 0) return coefSign
def __call__(self,**params_to_override): p = ParamOverrides(self,params_to_override) xsize,ysize = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape xsize,ysize = int(round(xsize)),int(round(ysize)) xdisparity = int(round(xsize*p.xdisparity)) ydisparity = int(round(xsize*p.ydisparity)) dotsize = int(round(xsize*p.dotsize)) bigxsize = 2*xsize bigysize = 2*ysize ndots=int(round(p.dotdensity * (bigxsize+2*dotsize) * (bigysize+2*dotsize) / min(dotsize,xsize) / min(dotsize,ysize))) halfdot = floor(dotsize/2) # Choose random colors and locations of square dots random_seed = p.random_seed random_array.seed(random_seed*12,random_seed*99) col=where(random_array.random((ndots))>=0.5, 1.0, -1.0) random_array.seed(random_seed*122,random_seed*799) xpos=floor(random_array.random((ndots))*(bigxsize+2*dotsize)) - halfdot random_array.seed(random_seed*1243,random_seed*9349) ypos=floor(random_array.random((ndots))*(bigysize+2*dotsize)) - halfdot # Construct arrays of points specifying the boundaries of each # dot, cropping them by the big image size (0,0) to (bigxsize,bigysize) x1=xpos.astype(Int) ; x1=choose(less(x1,0),(x1,0)) y1=ypos.astype(Int) ; y1=choose(less(y1,0),(y1,0)) x2=(xpos+(dotsize-1)).astype(Int) ; x2=choose(greater(x2,bigxsize),(x2,bigxsize)) y2=(ypos+(dotsize-1)).astype(Int) ; y2=choose(greater(y2,bigysize),(y2,bigysize)) # Draw each dot in the big image, on a blank background bigimage = zeros((bigysize,bigxsize)) for i in range(ndots): bigimage[y1[i]:y2[i]+1,x1[i]:x2[i]+1] = col[i] result = p.offset + p.scale*bigimage[ (ysize/2)+ydisparity:(3*ysize/2)+ydisparity , (xsize/2)+xdisparity:(3*xsize/2)+xdisparity ] for of in p.output_fns: of(result) return result
def Translate(self, t): ## #scl = 0.25 ## scl = 1 ## print "t:", t ## t1 = int(t[0]*scl) ## t2 = int(t[1]*scl) ## t3 = int(t[2]*scl) t1, t2, t3 = Numeric.where(Numeric.less(t, 0), -2, 2) xmin = self.xmin xmax = self.xmax ymin = self.ymin ymax = self.ymax zmin = self.zmin zmax = self.zmax xmin = xmin + t1 xmax = xmax + t1 nx, ny, nz = self.volSize if xmin < 0: xmax = xmax - xmin xmin = 0 if xmax > nx: xmin = xmin - (xmax - nx) xmax = nx ymin = ymin + t2 ymax = ymax + t2 if ymin < 0: ymax = ymax - ymin ymin = 0 if ymax > ny: ymin = ymin - (ymax - ny) ymax = ny zmin = zmin + t3 zmax = zmax + t3 if zmin < 0: zmax = zmax - zmin zmin = 0 if zmax > nz: zmin = zmin - (zmax - nz) zmax = nz self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax self.zmin = zmin self.zmax = zmax self.update()
def disk(x, y, height, gaussian_width): """ Circular disk with Gaussian fall-off after the solid central region. """ disk_radius = height/2.0 distance_from_origin = sqrt(x**2+y**2) distance_outside_disk = distance_from_origin - disk_radius sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = x*0.0 else: with float_error_ignore(): falloff = exp(divide(-distance_outside_disk*distance_outside_disk, 2*sigmasq)) return where(distance_outside_disk<=0,1.0,falloff)
def pca(M): "Perform PCA on M, return eigenvectors and eigenvalues, sorted." T, N = shape(M) # if there are fewer rows T than columns N, use snapshot method if T < N: C = dot(M, t(M)) evals, evecsC = eigenvectors(C) # HACK: make sure evals are all positive evals = where(evals < 0, 0, evals) evecs = 1. / sqrt(evals) * dot(t(M), t(evecsC)) else: # calculate covariance matrix K = 1. / T * dot(t(M), M) evals, evecs = eigenvectors(K) # sort the eigenvalues and eigenvectors, descending order order = (argsort(evals)[::-1]) evecs = take(evecs, order, 1) evals = take(evals, order) return evals, t(evecs)
def LoadUnknowns(self, u): """Load the unknown variables. Returns False is there are not enough known variables""" #Load vars in a convenient way p, m, d = self._matProcess, self._matMotive, self._matDischarge #Load everything into lists (in order to allow for None values) which then will be converted to Numeric vectors unkNamLst = self._unkName unkValLst = self._nuUnk*[None] unkInitValLst = self._nuUnk*[None] unkIsFixLst = self._nuUnk*[False] unkScaleFact = self._nuUnk*[1.0] #First load everytihng that is already known!! #Load some unknown vals unkValLst[w1Idx] = w1Val = p.GetPropValue(MASSFLOW_VAR) unkValLst[w2Idx] = w2Val = m.GetPropValue(MASSFLOW_VAR) unkValLst[w3Idx] = w3Val = d.GetPropValue(MASSFLOW_VAR) #Can not use three mass flows as specs self._canBeSpec[w3Idx] = where((w1Val!=None and w2Val!=None), False, True) unkValLst[p1Idx] = p1Val = p.GetPropValue(P_VAR) unkValLst[p2Idx] = p2Val = m.GetPropValue(P_VAR) unkValLst[p3Idx] = p3Val = d.GetPropValue(P_VAR) unkValLst[d1Idx] = d1Val = p.GetPropValue(MASSDEN_VAR) unkValLst[d2Idx] = d2Val = m.GetPropValue(MASSDEN_VAR) unkValLst[d3Idx] = d3Val = d.GetPropValue(MASSDEN_VAR) nozzDiamVal = self._sigNozzDiam.GetValue() thrDiamVal = self._sigThrDiam.GetValue() if nozzDiamVal != None and thrDiamVal != None: if thrDiamVal <= nozzDiamVal or nozzDiamVal <= 0.0 or thrDiamVal <= 0.0: self.InfoMessage('WrongDiamEjector', (self.GetPath(), nozzDiamVal, thrDiamVal),addToUnitOpMsg=1) return False _H1MolVal = p.GetPropValue(H_VAR) _H2MolVal = m.GetPropValue(H_VAR) _H3MolVal = d.GetPropValue(H_VAR) _MW1Val = p.GetPropValue(MOLEWT_VAR) _MW2Val = m.GetPropValue(MOLEWT_VAR) _MW3Val = d.GetPropValue(MOLEWT_VAR) self._pFracs = pFracs = p.GetCompositionValues() self._mFracs = mFracs = m.GetCompositionValues() self._dFracs = dFracs = d.GetCompositionValues() nuCmps = len(pFracs) #Two ports must have full known fracs. Make fractions Numeric arrays if None in pFracs: self._unkFracs = _P_FRACS if None in mFracs or None in dFracs: return False #Can not solve else: self._mFracs = array(self._mFracs, Float) self._dFracs = array(self._dFracs, Float) elif None in mFracs: self._unkFracs = _M_FRACS if None in dFracs: return False #Can not solve self._pFracs = array(self._pFracs, Float) self._dFracs = array(self._dFracs, Float) elif None in dFracs: self._unkFracs = _D_FRACS self._pFracs = array(self._pFracs, Float) self._mFracs = array(self._mFracs, Float) else: self._unkFracs = None self._pFracs = array(self._pFracs, Float) self._mFracs = array(self._mFracs, Float) self._dFracs = array(self._dFracs, Float) #In case ports were flashed but some vars are not there (only density in this case) if d1Val == None and p1Val != None and _H1MolVal != None and self._unkFracs != _P_FRACS: unkValLst[d1Idx] = d1Val = self.GetDensity(p1Val, _H1MolVal, pFracs) if d2Val == None and p2Val != None and _H2MolVal != None and self._unkFracs != _M_FRACS: unkValLst[d2Idx] = d2Val = self.GetDensity(p2Val, _H2MolVal, mFracs) if d3Val == None and p3Val != None and _H3MolVal != None and self._unkFracs != _D_FRACS: unkValLst[d3Idx] = d3Val = self.GetDensity(p3Val, _H3MolVal, dFracs) #Calculate surfaces S1Val = None if nozzDiamVal != None: S2Val = (PI/4.0) * (nozzDiamVal**2) else: S2Val = None if thrDiamVal != None: S3Val = (PI/4.0) * (thrDiamVal**2) else: S3Val = None unkValLst[S1Idx] = S1Val unkValLst[S2Idx] = S2Val unkValLst[S3Idx] = S3Val #Finally try to see if v are known if not None in (w1Val, d1Val, S1Val): unkValLst[v1Idx] = v1Val = w1Val/d1Val/S1Val else: unkValLst[v1Idx] = v1Val = None if not None in (w2Val, d2Val, S2Val): unkValLst[v2Idx] = v2Val = w2Val/d2Val/S2Val else: unkValLst[v2Idx] = v2Val = None if not None in (w3Val, d3Val, S3Val): unkValLst[v3Idx] = v3Val = w3Val/d3Val/S3Val else: unkValLst[v3Idx] = v3Val = None #Load Mass enthalpies if _H1MolVal != None and _MW1Val != None: unkValLst[H1Idx] = H1Val = _H1MolVal/_MW1Val else: unkValLst[H1Idx] = H1Val = None if _H2MolVal != None and _MW2Val != None: unkValLst[H2Idx] = H2Val = _H2MolVal/_MW2Val else: unkValLst[H2Idx] = H2Val = None if _H3MolVal != None and _MW3Val != None: unkValLst[H3Idx] = H3Val = _H3MolVal/_MW3Val else: unkValLst[H3Idx] = H3Val = None #If not enough info is known then return False cnt = 0 for i in range(self._nuUnk): if unkValLst[i] != None and self._canBeSpec[i]: cnt += 1 if self._nuUnk - self._nuEqns > cnt: return False #Load lists with info on which vals are knwon(fixed) already for i in range(self._nuUnk): v = unkValLst[i] if v != None and self._canBeSpec[i]: unkIsFixLst[i] = True #Now create necessary estimates #Mass flows lst = [w1Val, w2Val, w3Val] nuNones = lst.count(None) if nuNones == 1: if w1Val == None: w1Val = -w2Val + w3Val elif w2Val == None: w2Val = -w1Val + w3Val elif w3Val == None: w3Val = w1Val + w2Val elif nuNones == 2: if w1Val != None: w2Val = w1Val*10.0 w3Val = w1Val + w2Val elif w2Val != None: w1Val = w2Val/10.0 w3Val = w1Val + w2Val elif w3Val != None: w1Val = w3Val*1.0/10.0 w2Val = w3Val*9.0/10.0 elif nuNones == 3: #What to do? w1Val = 20.0 w2Val = 30.0 w3Val = w1Val + w2Val unkValLst[w1Idx] = w1Val unkValLst[w2Idx] = w2Val unkValLst[w3Idx] = w3Val #Do fractions if self._unkFracs != None: if self._unkFracs == _P_FRACS: mFlows = self._mFracs*(w2Val/Numeric.sum(self._pureCmpMW*self._mFracs)) dFlows = self._dFracs*(w3Val/Numeric.sum(self._pureCmpMW*self._dFracs)) self._pFracs = -mFlows+dFlows self._pFracs = self._pFracs/Numeric.sum(self._pFracs) #Normalize elif self._unkFracs == _M_FRACS: pFlows = self._pFracs*(w1Val/Numeric.sum(self._pureCmpMW*self._pFracs)) dFlows = self._dFracs*(w3Val/Numeric.sum(self._pureCmpMW*self._dFracs)) self._mFracs = -pFlows+dFlows self._mFracs = self._mFracs/Numeric.sum(self._mFracs) #Normalize elif self._unkFracs == _D_FRACS: pFlows = self._pFracs*(w1Val/Numeric.sum(self._pureCmpMW*self._pFracs)) mFlows = self._mFracs*(w2Val/Numeric.sum(self._pureCmpMW*self._mFracs)) self._dFracs = pFlows+mFlows self._dFracs = self._dFracs/Numeric.sum(self._dFracs) #Normalize #Pressures lst = [p1Val, p2Val, p3Val] nuNones = lst.count(None) if nuNones == 1: if p1Val == None: p1Val = p3Val/5 elif p2Val == None: p2Val = p1Val*10.0 elif p3Val == None: p3Val = p1Val*5.0 elif nuNones == 2: if p1Val != None: p2Val = p1Val*10.0 p3Val = p1Val*5.0 elif p2Val != None: p1Val = p2Val/10.0 p3Val = p1Val*5.0 elif p3Val != None: p1Val = p3Val/5.0 p2Val = p1Val*10.0 elif nuNones == 3: #What to do? p1Val = 101.0 p2Val = 800.0 p3Val = 500.0 unkValLst[p1Idx] = p1Val unkValLst[p2Idx] = p2Val unkValLst[p3Idx] = p3Val #Enthalpies if None in (H1Val, H2Val, H3Val): (H1Val, H2Val, H3Val) = self.FillInWithAverage((H1Val, H2Val, H3Val)) if None in (H1Val, H2Val, H3Val): #What to do?? #Could estimate a T and get H from there H1Val = H2Val = H3Val = 10000.0 unkValLst[H1Idx] = H1Val unkValLst[H2Idx] = H2Val unkValLst[H3Idx] = H3Val #Densities if None in (d1Val, d2Val, d3Val): (d1Val, d2Val, d3Val) = self.FillInWithAverage((d1Val, d2Val, d3Val)) if None in (d1Val, d2Val, d3Val): #What to do?? #Could get it from flashing the estimates of H and P d1Val = d2Val = d3Val = 1000.0 unkValLst[d1Idx] = d1Val unkValLst[d2Idx] = d2Val unkValLst[d3Idx] = d3Val #Surfaces lst = [S1Val, S2Val, S3Val] nuNones = lst.count(None) if nuNones == 1: if S1Val == None: S1Val = -S2Val + S3Val elif S2Val == None: S2Val = -S1Val + S3Val elif S3Val == None: S3Val = S1Val + S2Val elif nuNones == 2: if S1Val != None: S2Val = S1Val/2.0 S3Val = S1Val + S2Val elif S2Val != None: S1Val = S2Val*2.0 S3Val = S1Val + S2Val elif S3Val != None: S1Val = S3Val*2.0/3.0 S2Val = S3Val/3.0 elif nuNones == 3: #What to do? S1Val = 20.0 S2Val = 10.0 S3Val = S1Val + S2Val unkValLst[S1Idx] = S1Val unkValLst[S2Idx] = S2Val unkValLst[S3Idx] = S3Val #Velocities. Estimate based in known or newly estimated values unkValLst[v1Idx] = v1Val = w1Val/d1Val/S1Val unkValLst[v2Idx] = v2Val = w2Val/d2Val/S2Val unkValLst[v3Idx] = v3Val = w3Val/d3Val/S3Val #Load scale factors based on the numbers that the solver will be working with self.scaleFactorW = min(abs(w1Val), abs(w2Val), abs(w3Val)) if not self.scaleFactorW: self.scaleFactorW = 1.0 self.scaleFactorS = S2Val self.scaleFactorP = min(p1Val, p2Val, p3Val) self.scaleFactorPS = self.scaleFactorP*self.scaleFactorS self.scaleFactorH = min(abs(H1Val), abs(H2Val), abs(H3Val)) if not self.scaleFactorH: self.scaleFactorH = 10.0 self.scaleFactorD = min(d1Val, d2Val, d3Val) self.scaleFactorv = min(abs(v1Val), abs(v2Val), abs(v3Val)) if not self.scaleFactorv: self.scaleFactorv = 10.0 #Put them in place for solver unkScaleFact[p1Idx] = self.scaleFactorP unkScaleFact[w1Idx] = self.scaleFactorW unkScaleFact[d1Idx] = self.scaleFactorD unkScaleFact[S1Idx] = self.scaleFactorS unkScaleFact[v1Idx] = self.scaleFactorv unkScaleFact[H1Idx] = self.scaleFactorH #Do unit conversion unkValLst = self.ConvertToMKS(unkValLst) unkScaleFact = self.ConvertToMKS(unkScaleFact) self.scaleFactorP = unkScaleFact[p1Idx] self.scaleFactorW = unkScaleFact[w1Idx] self.scaleFactorD = unkScaleFact[d1Idx] self.scaleFactorS = unkScaleFact[S1Idx] self.scaleFactorv = unkScaleFact[v1Idx] self.scaleFactorH = unkScaleFact[H1Idx] self.scaleFactorPS = self.scaleFactorP*self.scaleFactorS #Load the unknowns object unkInitValLst = list(unkValLst) self._unknowns.SetNames(unkNamLst) self._unknowns.SetValues(unkValLst) self._unknowns.SetInitValues(unkInitValLst) self._unknowns.SetIsFixed(unkIsFixLst) self._unknowns.SetScaleFactors(unkScaleFact) return True
def deNAN(a, value=0.0): nans = Numeric.logical_not( Numeric.less(a, 0.0) + Numeric.greater_equal(a, 0.0)) return Numeric.where(nans, value, a)
def Solve(self): """solve for controllers in controllerList""" from sim.solver import Flowsheet flowsheet = self.flowsheet flPath = flowsheet.GetPath() details = flowsheet.GetParameterValue(Flowsheet.RECYCLE_DETAILS_VAR) # solve the controllers self.activeControllers = [] self.activeControllersBisect = [] for cont in self.controllerList: error = cont.Error() if error != None and cont.SaveBase() and cont.GetParameterValue(IGNORED_PAR) == None: #solMeth = cont.GetParameterValue(SOLUTION_METH_PAR) solMeth = BROYDEN_METH if solMeth == BROYDEN_METH or solMeth == None: self.activeControllers.append(cont) elif solMeth == BISECTION_METH: self.activeControllersBisect.append(cont) #For broyden numA = self.numberActive = len(self.activeControllers) #For bisection numABisect = self.numberActiveBisect = len(self.activeControllersBisect) if self.numberActive + self.numberActiveBisect == 0: return #For broyden maxErr1 = 0.0 totalError = 0.0 if numA > 0: self.errors = self.GetErrors(BROYDEN_METH) tolerances = self.GetTolerances(BROYDEN_METH) maxErr1 = max(Numeric.absolute(self.errors)/tolerances) totalError = Numeric.sum(Numeric.absolute(self.errors)) #For bisection maxErr2 = 0.0 totalErrorBisect = 0.0 if numABisect > 0: self.errorsBisect = self.GetErrors(BISECTION_METH) tolerancesBisect = self.GetTolerances(BISECTION_METH) maxErr2 = max(Numeric.absolute(self.errorsBisect)/tolerancesBisect) totalErrorBisect = Numeric.sum(Numeric.absolute(self.errorsBisect)) if max(maxErr1, maxErr2) <= 1.0: return if numA > 0: # calculate a jacobian if necessary newJacobian = 0 if self.jacobian == None or self.jacobian.shape[0] != numA: self.CalcJacobian() newJacobian = 1 #For broyden if numA > 0: oldErrors = zeros(self.numberActive, Float) oldVars = zeros(self.numberActive, Float) vars = zeros(self.numberActive, Float) # using scaled variables so base is 0 minV = self.GetMinimums(BROYDEN_METH) maxV = self.GetMaximums(BROYDEN_METH) #For bisection if numABisect > 0: oldErrorsBisect = zeros(numABisect, Float) oldVarsBisect = zeros(numABisect, Float) varsBisect = zeros(numABisect, Float) # using scaled variables so base is 0 lowBound = self.GetMinimums(BISECTION_METH) upBound = self.GetMaximums(BISECTION_METH) minStepSize = 0.0001 #Initialize boundaries if numABisect > 0: sign = 1.0 #We already have errors @ varsBisect, lets get errors in upBound # adjust the output self.SetOutput(upBound, BISECTION_METH) # recalculate flowsheet self.flowsheet.InnerSolve() changeInSign = self.GetErrors(BISECTION_METH)*self.errorsBisect for i in range(numABisect): #Didn't change sign if sign*changeInSign[i] > 0.0: upBound[i] = varsBisect[i] sign = 1.0 #sign changed else: lowBound[i] = varsBisect[i] sign = -1.0 iter = 0 maxIter = flowsheet.GetParameterValue(MAXITERCONT_PAR) if not maxIter: maxIter = 1E10 #A very large number while iter < maxIter: #main loop converged = 0 iter += 1 #First bisect if numABisect > 0: oldErrorsBisect[:] = self.errorsBisect[:] oldTotalErrorBisect = totalErrorBisect varsBisect[:] = lowBound + (upBound-lowBound)/2.0 # adjust the output self.SetOutput(varsBisect, BISECTION_METH) # recalculate flowsheet flowsheet.InnerSolve() self.errorsBisect = self.GetErrors(BISECTION_METH) changeInSign = self.errorsBisect*oldErrorsBisect for i in range(numABisect): #Didn't change sign if sign*changeInSign[i] > 0.0: upBound[i] = varsBisect[i] sign = 1.0 #sign changed else: lowBound[i] = varsBisect[i] sign = -1.0 maxGap = max(upBound - lowBound) if maxGap < minStepSize: # step size too small - bail break totalErrorBisect = Numeric.sum(Numeric.absolute(self.errorsBisect)) #Do broyden if numA > 0: oldErrors[:] = self.errors[:] oldTotalError = totalError oldVars[:] = vars[:] adjustment = -dot(self.jacobian, self.errors) # initial step length limited so no controller exceeds its max step maxAdjustment = max(Numeric.absolute(adjustment)) if maxAdjustment > 1.0: stepLength = 1.0 / maxAdjustment else: stepLength = 1.0 adj = stepLength * adjustment newvars = vars + adj minlimit = min(where(newvars < minV, (minV - vars)/adj, 1.0)) maxlimit = min(where(newvars > maxV, (maxV - vars)/adj, 1.0)) stepLength *= min(minlimit, maxlimit) # loop until reduction in errors or step length become too small while 1: actualAdjustment = stepLength * adjustment vars += actualAdjustment # adjust the output self.SetOutput(vars) # recalculate flowsheet flowsheet.InnerSolve() # check errors self.errors = self.GetErrors() totalError = Numeric.sum(Numeric.absolute(self.errors)) if totalError < oldTotalError: break # errors did not go down - back down step size vars[:] = oldVars[:] if stepLength < minStepSize: # step size too small - bail break stepLength /= 4 if details: try: for cont in self.activeControllers + self.activeControllersBisect: target = cont.targetPort.GetValue() current = cont.monitoredPort.GetValue() flowsheet.InfoMessage('RecycleErrorDetail',(cont.GetPath(), cont.targetPort.GetType().name, current, target)) except: pass flowsheet.InfoMessage('ControllerTotalError', (flPath, totalError+totalErrorBisect)) #Broyden if numA > 0: maxErr1 = max(Numeric.absolute(self.errors)/tolerances) #Bisect if numABisect > 0: maxErr2 = max(Numeric.absolute(self.errorsBisect)/tolerancesBisect) if max(maxErr1, maxErr2) <= 1.0: converged = 1 break if numA > 0: if totalError >= oldTotalError: # smallest step didn't work # if already has new jacobian, exit if newJacobian: break else: # other wise try a fresh one self.CalcJacobian() newJacobian = 1 minV = self.GetMinimums() maxV = self.GetMaximums() else: # update jacobian self.jacobian = self.UpdateJacobian(self.jacobian, actualAdjustment, (self.errors - oldErrors)) newJacobian = 0 if not converged: flowsheet.InfoMessage('ControllerConvergeFail', flPath)
def _magFix(self, catalogFile): """This private method receives a path to a catalog file and sifts through the MAGERR field looking for values > 10. It sets the corresponding MAG field = -99 and sets that object's MAGERR field to 0.0. catalogFile is a path not a file object.""" # fillHeader will return a list of tuples where which looks like # # [(1, 'NUMBER'), # (2, 'X_IMAGE'), # (3, 'Y_IMAGE'), # ... # (12, 'MAG_ISOCOR'), # (13, 'MAGERR_ISOCOR'), # (14, 'FLUX_APER', 1) # (15, 'FLUX_APER', 2), # (16, 'FLUX_APER', 3), # ... # ] # # The tuples are either of length 2 or 3. If len is 3, the 3rd item of the # tuple is the nth occurance of that column identifier. This occurs on those # columns of MAGs and MAGERRs for a series of increasingly larger apertures. # newFieldList will be a list of Numeric arrays containing the columns of the catalogs. # This list will contain fields which have not been altered, i.e. all fields other than # MAG_* and MAGERR_*, and the new MAG and MAGERR fields which have been corrected. # Once the list is complete, it is tuple-ized and send to the tableio pu_data function. newFieldList = [] newMagsList = [] newMagErrsList = [] newMagHeaders = [] newMagErrHeaders = [] newHeaders = [] magCols = [] magErrCols = [] selectSet = fillHeader(catalogFile) print "Searching catalog for required columns, MAG, MAGERR" for i in range(len(selectSet)): if len(selectSet[i]) == 2: column, name = selectSet[i] paramNames = name.split("_") if "MAG" in paramNames: magCols.append((column, name)) elif "MAGERR" in paramNames: magErrCols.append((column, name)) else: oldField = tableio.get_data(catalogFile, (column - 1)) newFieldList.append(oldField) newHeaders.append(name) continue else: column, name, id = selectSet[i] paramNames = name.split("_") if "MAG" in paramNames: magCols.append((column, name, id)) elif "MAGERR" in paramNames: magErrCols.append((column, name, id)) else: oldField = tableio.get_data(catalogFile, (column - 1)) newFieldList.append(oldField) newHeaders.append(name) continue # We now have # catalog field --> list # -------------------------------- # MAG_* --> magCols # MAGERR_* --> magErrCols # # The algorithm will be to step through the magErrCols columns, extracting those fields # via get_data and getting Numeric arrays. The matching mag columns are slurped as well. # We search the magErrCols arrays looking for >= 10 values and then marking the those mags # as -99.0 and the matching magerrs as 0.0 # See Bugzilla bug #2700 for item in magErrCols: magErrAperId = None # item may be of len 2 or 3 if len(item) == 2: magErrColId, magErrColName = item else: magErrColId, magErrColName, magErrAperId = item magErrKind = magErrColName.split("_")[1] # ISO, ISOCORR, etc. print "\n\nMAG type:", magErrKind if magErrAperId: print magErrColName, "Aper id is", magErrAperId print "Getting\t", magErrColName, "\tfield", magErrColId # MAGERR array: magErrs = tableio.get_data(catalogFile, magErrColId - 1) matchingMagColName = None matchingMagColId = None #----------------------- Search for matching MAG_* field -----------------------# for magitems in magCols: # We know that the magErrColName is MAGERR and if magErrNameId is true then # the tuple is of len 3, i.e. a MAGERR_APER field. We look for the matching # MAG_APER field id, 1, 2, 3... etc. if len(magitems) == 3: magColId, magColName, magAperId = magitems if magColName == "MAG_" + magErrKind: matchingMagColName = magColName #print "Found matching field type:",magColName,"in field",magColId if magAperId == magErrAperId: print "Found matching aperture id." print "MAG_APER id: ", magAperId, "MAGERR_APER id: ", magErrAperId matchingMagColId = magColId matchingMags = tableio.get_data( catalogFile, magColId - 1) break else: continue else: magColId, magColName = magitems if magColName == "MAG_" + magErrKind: print "Found matching field type:", magColName, "in field", magColId matchingMagColName = magColName matchingMagColId = magColId matchingMags = tableio.get_data( catalogFile, magColId - 1) break else: continue #--------------------------------------------------------------------------------# print " MAG err field:", magErrColName, magErrColId print " Mag field:", matchingMagColName, matchingMagColId # Now the grunt work on the arrays, # magErrs, matchingMags # # update: flagging all MAGs as -99 when the corresponding MAGERR > 10 # introduced a bug which unintentionally reset the magnitudes # SExtractor had flagged with a MAG = 99.0 and a MAGERR = 99.0 # This now checks for a MAGERR of 99 and does not reset the MAG value # if MAGERR = 99.0 but does for all other MAGERRS > 10.0 badMagErrs1 = Numeric.where(magErrs >= 10, 1, 0) badMagErrs2 = Numeric.where(magErrs != 99.0, 1, 0) badMagErrs = badMagErrs1 * badMagErrs2 del badMagErrs1, badMagErrs2 newMags = Numeric.where(badMagErrs, -99.0, matchingMags) newMagErrs = Numeric.where(badMagErrs, 0.0, magErrs) newMagsList.append(newMags) newMagHeaders.append(matchingMagColName) newMagErrsList.append(newMagErrs) newMagErrHeaders.append(magErrColName) # concatenate the lists. This is done to preserve the MAG_APER and MAGERR_APER # grouping of the original SExtractor catalog. newFieldList = newFieldList + newMagsList newFieldList = newFieldList + newMagErrsList newHeaders = newHeaders + newMagHeaders newHeaders = newHeaders + newMagErrHeaders newVariables = tuple(newFieldList) # rename the old catalog file as catalogFile.old os.rename(catalogFile, catalogFile + ".old") self.outputList[os.path.basename(catalogFile) + ".old"] = [os.path.basename(catalogFile)] fob = open(catalogFile, 'w') fob.write("## " + ptime() + "\n") fob.write("## " + self.modName + " catalog regenerated by _magFix method.\n") fob.write( '## (This file was generated automatically by the ACS Pipeline.)\n##\n' ) fob.write( "## This catalog has been photometrically corrected to remove\n") fob.write("## 'bad' magnitude values.\n") fob.write("##\n") for i in range(len(newHeaders)): fob.write("# " + str(i + 1) + "\t" + newHeaders[i] + "\n") fob.close() tableio.put_data(catalogFile, newVariables, append="yes") return
def __call__(self, data, data_min=None, data_max=None, val_min=None, val_max=None, map_type='linear', powerOf2=0): """ (data, data_min=None, data_max=None, val_min=None, val_max=None, map_type = 'linear', powerOf2=0) Maps an array of floats to integer values. data -- 3D numeric array; data_min, data_max -- min and max data values(other than actual array minimum/maximum) to map to integer values - val_min and val_max - in range (0 ... int_limit); map_type -- can be 'linear' or 'log'; powerOf2 -- if set to 1, then if the data array dimensions are not power of 2 - the returned array will be padded with zeros so that its dims are power of 2. """ # if data_min/max and val_min/max specified then the mapping # will proceed as follows: # [arr_min, data_min] is mapped to [int_min, val_min], # [data_min, data_max] is maped to [val_min, val_max], # [data_max, arr_max] is mapped to [val_max, int_limit]. int_limit = self.int_limit int_min = self.int_min shape = data.shape assert len(shape)==3 nx, ny, nz = shape arrsize = nx*ny*nz #arr_max = Numeric.maximum.reduce(data.ravel()) #arr_min = Numeric.minimum.reduce(data.ravel()) maxif = Numeric.maximum.reduce arr_max = maxif(maxif(maxif(data))) minif = Numeric.minimum.reduce arr_min = minif(minif(minif(data))) #print "min(arr)=%f" % arr_min #print "max(arr)=%f" % arr_max if val_min != None: assert val_min >= 0 and val_min < int_limit else: val_min = int_min if val_max != None: assert val_max <= int_limit and val_max > 0 else: val_max = int_limit if data_min != None: if data_min < arr_min: data_min = arr_min else: data_min = arr_min if data_max != None: if data_max > arr_max: data_max = arr_max else: data_max = arr_max print "mapping data_min %4f to val_min %d, data_max %4f to val_max %d"\ % (data_min, val_min, data_max, val_max) if map_type == 'linear': k2,c2 = self.ScaleMap((val_min, val_max), (data_min, data_max)) n_intervals = 3 if abs(data_min-arr_min) < 0.00001: # data_min==arr_min k1,c1 = k2, c2 n_intervals = n_intervals-1 else : k1, c1 = self.ScaleMap((int_min, val_min), (arr_min, data_min)) if abs(data_max-arr_max) < 0.00001: # data_max == arr_max k3, c3 = k2, c2 n_intervals = n_intervals-1 else: k3, c3 = self.ScaleMap((val_max, int_limit), (data_max, arr_max)) t1 = time() #print "n_intervals = ", n_intervals if n_intervals == 2: if data_max == arr_max: #print "data_max == arr_max" new_arr = Numeric.where(Numeric.less(data, data_min), k1*data+c1, k2*data+c2 ) elif data_min == arr_min: #print "data_min == arr_min" new_arr = Numeric.where(Numeric.greater_equal(data, data_max), k3*data+c3, k2*data+c2) elif n_intervals == 3: new_arr1 = Numeric.where(Numeric.less(data, data_min), k1*data+c1, k2*data+c2) new_arr = Numeric.where(Numeric.greater_equal(data, data_max), k3*data+c3, new_arr1) del(new_arr1) else : new_arr = k2*data+c2 arr = Numeric.transpose(new_arr).astype(self.int_type) del(new_arr) t2 = time() print "time to map : ", t2-t1 elif map_type == 'log': if arr_min < 0: diff = abs(arr_min)+1.0 elif arr_min >= 0 and arr_min < 1.0: diff = 1.0 elif arr_min >= 1.0: diff=0 k1, c1 = self.ScaleMap( (int_min, int_limit), (log(arr_min+diff), log(arr_max+diff)) ) arr=Numeric.transpose(k1*Numeric.log10(data+diff)+c1).astype(self.int_type) self.data_min = data_min self.data_max = data_max self.val_min = val_min self.val_max = val_max if powerOf2: nx1, ny1, nz1 = nx, ny, nz res, power = isPowerOf2(nx) if not res: nx1 = 2**power res, power = isPowerOf2(ny) if not res: ny1 = 2**power res, power = isPowerOf2(nz) if not res: nz1 = 2**power dx, dy, dz = 0, 0, 0 if nx1 != nx or ny1 != ny or nz1 != nz: #print "new data size: ", nx1,ny1,nz1 dx = (nx1-nx)/2. ; dy = (ny1-ny)/2. ; dz = (nz1-nz)/2. #narr = Numeric.zeros((nx1,ny1,nz1), self.int_type) #narr[:nx,:ny,:nz] = arr[:,:,:] narr = Numeric.zeros((nz1,ny1,nx1), self.int_type) narr[:nz,:ny,:nx] = arr[:,:,:] self.arr = narr #arr = Numeric.zeros((nx1,ny1,nz1), self.int_type) #arr[:nx,:ny,:nz] = new_arr[:,:,:] #arr = Numeric.transpose(arr).astype(self.int_type) #self.arr = arr return narr self.arr = arr return arr
def rgrd(self, dataIn, missingValueIn, missingMatch, logYes = 'yes', positionIn = None, missingValueOut = None): """ #--------------------------------------------------------------------------------- # # PURPOSE: To perform all the tasks required to regrid the input data, dataIn, into the ouput data, # dataout along the level dimension only. # # DEFINITION: # # def rgrd(self, dataIn, missingValueIn, missingMatch, positionIn = None, missingValueOut = None): # # # PASSED : dataIn -- data to regrid # # missingValueIn -- the missing data value to use in setting missing in the mask. It is required # and there are two choices: # None -- there is no missing data # A number -- the value to use in the search for possible missing data. # The presence of missing data at a grid point leads to recording 0.0 in the mask. # # missingMatch -- the comparison scheme used in searching for missing data in dataIn using the value passed # in as missingValueIn. The choices are: # None -- used if None is the entry for missingValueIn # exact -- used if missingValue is the exact value from the file # greater -- the missing data value is equal to or greater than missingValueIn # less -- the missing data value is equal to or less than missingValueIn # # logYes -- choose the level regrid as linear in log of level or linear in level. Set to # 'yes' for log. Anything else is linear in level. # # # # positionIn -- a tuple with the numerical position of the dimensions # in C or Python order specified in the sequence longitude, # latitude, level and time. Longitude, latitude and level are # required. If time is missing submit None in its slot in the # tuple. Notice that the length of the tuple is always four. # # Explicitly, in terms of the shape of dataIn as returned by Python's shape function # # positionIn[0] contains the position of longitude in dataIn # positionIn[1] contains the position of latitude in dataIn # positionIn[2] contains the position of level in dataIn or None # positionIn[3] contains the position of time in dataIn or None # # As examples: # If the C order shape of 4D data is # (number of longitudes, number of times, number of levels, number of latitudes) # submit # (0, 3, 2, 1) # # If the C order shape of 3D data is # (number of longitudes, number of times, number oflatitudes) # submit # (0, 2, 1, None) # # Send in None if the shape is a subset of (time, level, # latitude, longitude) which is evaluated as follows: # 3D -- code assumes (2,1,0,None) # 4D -- code assumes (3,2,1,0) # # missingValueOut -- the value for the missing data used in writing the output data. If left at the # default entry, None, the code uses missingValueIn if present or as a last resort # 1.0e20 # # # RETURNED : dataOut -- the regridded data # # # USAGE: # # Example 1. To regrid dataIn into dataOut using all the defaults where None, None signifies no # missing data. # dataOut = x.rgrd(dataIn, None, None) # # Example 2. To regrid dataIn into dataOut using 1.0e20 and greater as the missing data # # dataOut = x.rgrd(dataIn, 1.e20, 'greater') # #---------------------------------------------------------------------------------------------------------------------""" # check the required input -- dataIn, missingValueIn and missingMatch # make sure that dataIn is an array try: z = len(dataIn) except TypeError: sendmsg('Error in calling the rgrd method -- dataIn must be an array') raise TypeError # check the missingValueIn pass if missingValueIn != None: try: z = abs(missingValueIn) except TypeError: sendmsg('Error in calling the rgrd method -- missingvalueIn must be None or a number. Now it is ', missingValueIn) raise TypeError # check the missingMatch pass missingPossibilities = ['greater', 'equal', 'less', None] if missingMatch not in missingPossibilities: msg = 'Error in missingMatch -- it must be None or the string greater, equal, or less. Now it is ' sendmsg(msg, missingMatch) raise ValueError # --- Check data type and change to float if necessary ---- if dataIn.dtype.char != 'f': dataIn = dataIn.astype(Numeric.Float32) dataShape = dataIn.shape numberDim = len(dataShape) if numberDim < 2: msg = 'Error in call to rgrd -- data must have at least 2 dimensions' sendmsg(msg) raise TypeError # --- evaluate positionIn ---- # --- make standard positionIn as a check---- positionList =[] for n in range(numberDim): # insert a sequence of numbers positionList.append(n) positionList.reverse() for n in range(numberDim, 4): # fill end of list with Nones positionList.append(None) positionCheck = tuple(positionList) standardPosition = 0 # transpose required if positionIn == None: # construct the default positionIn tuple positionIn = positionCheck standardPosition = 1 # no need for a transpose with this data else: if positionIn == positionCheck: # compare to the standard standardPosition = 1 # no need for a transpose with this data if len(positionIn) != 4: msg = 'Error in call to rgrd -- positionIn must be a tuple of length 4' sendmsg(msg) raise TypeError if standardPosition == 0: # transpose data to the standard order (t,z,y,x) newOrder, inverseOrder = checkorder(positionIn) dataIn = Numeric.transpose(dataIn, newOrder) # transpose data to standard order (t,z,y,x) dataIn = Numeric.array(dataIn.astype(Numeric.Float32), Numeric.Float32) # make contiguous # set dimension sizes and check for consistency if positionIn[0] != None: self.nlon = (dataShape[ positionIn[0] ]) else: self.nlon = 0 if positionIn[1] != None: self.nlat = (dataShape[ positionIn[1] ]) else: self.nlat = 0 if positionIn[2] != None: if self.nlevi != (dataShape[ positionIn[2] ]): msg = 'Level size is inconsistent with input data' sendmsg(msg) raise ValueError if positionIn[3] != None: self.ntime = (dataShape[ positionIn[3] ]) else: self.ntime = 0 # allocate memory for dataOut -- the array with new number of levels outList = list(dataIn.shape) for i in range(len(outList)): if outList[i] == self.nlevi: outList[i] = self.nlevo break dataOut = Numeric.zeros(tuple(outList), Numeric.Float32) # memory for aout if missingMatch == None: # if no missing do not pass None missingMatch = 'none' if missingValueIn == None: # if no missing do not pass None missingValueIn = 1.333e33 if logYes != 'yes': logYes = 'no' levIn = self.axisIn[:].astype(Numeric.Float64) levOut = self.axisOut[:].astype(Numeric.Float64) _regrid.rgdpressure(self.nlevi, self.nlevo, self.nlat, self.nlon, self.ntime, missingValueIn, missingMatch, logYes, levIn, levOut, dataIn, dataOut) if missingMatch == 'none': # if no missing do not pass None missingMatch = None if missingValueIn == 1.333e33: missingValueIn = None if standardPosition == 0: dataOut = Numeric.transpose(dataOut, inverseOrder) # transpose data to original order dataOut = Numeric.array(dataOut.astype(Numeric.Float32), Numeric.Float32) # make contiguous if missingValueOut != None: # set the missing value in data to missingValueOut if missingMatch == 'greater': if missingValueIn > 0.0: missing = 0.99*missingValueIn else: missing = 1.01*missingValueIn dataOut = Numeric.where(Numeric.greater(dataOut,missing), missingValueOut, dataOut) elif missingMatch == 'equal': missing = missingValueIn dataOut = Numeric.where(Numeric.equal(dataOut,missing), missingValueOut, dataOut) elif missingMatch == 'less': if missingValueIn < 0.0: missing = 0.99*missingValueIn else: missing = 1.01*missingValueIn dataOut = Numeric.where(Numeric.less(dataOut,missing), missingValueOut, dataOut) return dataOut
# Those objects with flux equal or less than 0 are assigned a magnitude of 99 # and a limiting magnitude equal to their SExtractor photometric error. This # is interpreted by BPZ as a nondetection with zero flux and 1-sigma error # equal to the limiting magnitude nondetected = Numeric.less_equal( flux[i, :], 0.0) * Numeric.greater(fluxerr[i, :], 0.0) # Those objects with error flux and flux equal to 0 are assigned a magnitude of -99 # and a flux of 0, which is interpreted by SExtractor as a non-observed object nonobserved = Numeric.less_equal(fluxerr[i, :], 0.0) # When flux error > 100*(flux), mark as nonobserved (Benitez, 24-Oct-03). nonobserved = Numeric.where( fluxerr[i, :] > 100 * (abs(flux[i, :])), 1.0, nonobserved[:]) detected = Numeric.logical_not(nonobserved + nondetected) # Get the zero point for the final magnitudes zpoint = fUtil.zeroPoint( fitsfile) # pass the fits file to zeroPoint func pardict["Zeropoint"] = str(zpoint) pardict["Zeropoint_Error"] = zpoint_err self.logfile.write("Photometric ZeroPoint of " + os.path.basename(fitsfile) + ": " + str(zpoint)) #---------------- Temporary zero point correction --------------------# ## Commented 24-Sep-2002 as per Bugzilla bug #1800 ##
def LoadUnknowns(self, u): """Load the unknown variables. Returns False is there are not enough known variables""" #Load vars in a convenient way p, m, d = self._matProcess, self._matMotive, self._matDischarge #Load everything into lists (in order to allow for None values) which then will be converted to Numeric vectors unkNamLst = self._unkName unkValLst = self._nuUnk * [None] unkInitValLst = self._nuUnk * [None] unkIsFixLst = self._nuUnk * [False] unkScaleFact = self._nuUnk * [1.0] #First load everytihng that is already known!! #Load some unknown vals unkValLst[w1Idx] = w1Val = p.GetPropValue(MASSFLOW_VAR) unkValLst[w2Idx] = w2Val = m.GetPropValue(MASSFLOW_VAR) unkValLst[w3Idx] = w3Val = d.GetPropValue(MASSFLOW_VAR) #Can not use three mass flows as specs self._canBeSpec[w3Idx] = where((w1Val != None and w2Val != None), False, True) unkValLst[p1Idx] = p1Val = p.GetPropValue(P_VAR) unkValLst[p2Idx] = p2Val = m.GetPropValue(P_VAR) unkValLst[p3Idx] = p3Val = d.GetPropValue(P_VAR) unkValLst[d1Idx] = d1Val = p.GetPropValue(MASSDEN_VAR) unkValLst[d2Idx] = d2Val = m.GetPropValue(MASSDEN_VAR) unkValLst[d3Idx] = d3Val = d.GetPropValue(MASSDEN_VAR) nozzDiamVal = self._sigNozzDiam.GetValue() thrDiamVal = self._sigThrDiam.GetValue() if nozzDiamVal != None and thrDiamVal != None: if thrDiamVal <= nozzDiamVal or nozzDiamVal <= 0.0 or thrDiamVal <= 0.0: self.InfoMessage('WrongDiamEjector', (self.GetPath(), nozzDiamVal, thrDiamVal), addToUnitOpMsg=1) return False _H1MolVal = p.GetPropValue(H_VAR) _H2MolVal = m.GetPropValue(H_VAR) _H3MolVal = d.GetPropValue(H_VAR) _MW1Val = p.GetPropValue(MOLEWT_VAR) _MW2Val = m.GetPropValue(MOLEWT_VAR) _MW3Val = d.GetPropValue(MOLEWT_VAR) self._pFracs = pFracs = p.GetCompositionValues() self._mFracs = mFracs = m.GetCompositionValues() self._dFracs = dFracs = d.GetCompositionValues() nuCmps = len(pFracs) #Two ports must have full known fracs. Make fractions Numeric arrays if None in pFracs: self._unkFracs = _P_FRACS if None in mFracs or None in dFracs: return False #Can not solve else: self._mFracs = array(self._mFracs, Float) self._dFracs = array(self._dFracs, Float) elif None in mFracs: self._unkFracs = _M_FRACS if None in dFracs: return False #Can not solve self._pFracs = array(self._pFracs, Float) self._dFracs = array(self._dFracs, Float) elif None in dFracs: self._unkFracs = _D_FRACS self._pFracs = array(self._pFracs, Float) self._mFracs = array(self._mFracs, Float) else: self._unkFracs = None self._pFracs = array(self._pFracs, Float) self._mFracs = array(self._mFracs, Float) self._dFracs = array(self._dFracs, Float) #In case ports were flashed but some vars are not there (only density in this case) if d1Val == None and p1Val != None and _H1MolVal != None and self._unkFracs != _P_FRACS: unkValLst[d1Idx] = d1Val = self.GetDensity(p1Val, _H1MolVal, pFracs) if d2Val == None and p2Val != None and _H2MolVal != None and self._unkFracs != _M_FRACS: unkValLst[d2Idx] = d2Val = self.GetDensity(p2Val, _H2MolVal, mFracs) if d3Val == None and p3Val != None and _H3MolVal != None and self._unkFracs != _D_FRACS: unkValLst[d3Idx] = d3Val = self.GetDensity(p3Val, _H3MolVal, dFracs) #Calculate surfaces S1Val = None if nozzDiamVal != None: S2Val = (PI / 4.0) * (nozzDiamVal**2) else: S2Val = None if thrDiamVal != None: S3Val = (PI / 4.0) * (thrDiamVal**2) else: S3Val = None unkValLst[S1Idx] = S1Val unkValLst[S2Idx] = S2Val unkValLst[S3Idx] = S3Val #Finally try to see if v are known if not None in (w1Val, d1Val, S1Val): unkValLst[v1Idx] = v1Val = w1Val / d1Val / S1Val else: unkValLst[v1Idx] = v1Val = None if not None in (w2Val, d2Val, S2Val): unkValLst[v2Idx] = v2Val = w2Val / d2Val / S2Val else: unkValLst[v2Idx] = v2Val = None if not None in (w3Val, d3Val, S3Val): unkValLst[v3Idx] = v3Val = w3Val / d3Val / S3Val else: unkValLst[v3Idx] = v3Val = None #Load Mass enthalpies if _H1MolVal != None and _MW1Val != None: unkValLst[H1Idx] = H1Val = _H1MolVal / _MW1Val else: unkValLst[H1Idx] = H1Val = None if _H2MolVal != None and _MW2Val != None: unkValLst[H2Idx] = H2Val = _H2MolVal / _MW2Val else: unkValLst[H2Idx] = H2Val = None if _H3MolVal != None and _MW3Val != None: unkValLst[H3Idx] = H3Val = _H3MolVal / _MW3Val else: unkValLst[H3Idx] = H3Val = None #If not enough info is known then return False cnt = 0 for i in range(self._nuUnk): if unkValLst[i] != None and self._canBeSpec[i]: cnt += 1 if self._nuUnk - self._nuEqns > cnt: return False #Load lists with info on which vals are knwon(fixed) already for i in range(self._nuUnk): v = unkValLst[i] if v != None and self._canBeSpec[i]: unkIsFixLst[i] = True #Now create necessary estimates #Mass flows lst = [w1Val, w2Val, w3Val] nuNones = lst.count(None) if nuNones == 1: if w1Val == None: w1Val = -w2Val + w3Val elif w2Val == None: w2Val = -w1Val + w3Val elif w3Val == None: w3Val = w1Val + w2Val elif nuNones == 2: if w1Val != None: w2Val = w1Val * 10.0 w3Val = w1Val + w2Val elif w2Val != None: w1Val = w2Val / 10.0 w3Val = w1Val + w2Val elif w3Val != None: w1Val = w3Val * 1.0 / 10.0 w2Val = w3Val * 9.0 / 10.0 elif nuNones == 3: #What to do? w1Val = 20.0 w2Val = 30.0 w3Val = w1Val + w2Val unkValLst[w1Idx] = w1Val unkValLst[w2Idx] = w2Val unkValLst[w3Idx] = w3Val #Do fractions if self._unkFracs != None: if self._unkFracs == _P_FRACS: mFlows = self._mFracs * ( w2Val / Numeric.sum(self._pureCmpMW * self._mFracs)) dFlows = self._dFracs * ( w3Val / Numeric.sum(self._pureCmpMW * self._dFracs)) self._pFracs = -mFlows + dFlows self._pFracs = self._pFracs / Numeric.sum( self._pFracs) #Normalize elif self._unkFracs == _M_FRACS: pFlows = self._pFracs * ( w1Val / Numeric.sum(self._pureCmpMW * self._pFracs)) dFlows = self._dFracs * ( w3Val / Numeric.sum(self._pureCmpMW * self._dFracs)) self._mFracs = -pFlows + dFlows self._mFracs = self._mFracs / Numeric.sum( self._mFracs) #Normalize elif self._unkFracs == _D_FRACS: pFlows = self._pFracs * ( w1Val / Numeric.sum(self._pureCmpMW * self._pFracs)) mFlows = self._mFracs * ( w2Val / Numeric.sum(self._pureCmpMW * self._mFracs)) self._dFracs = pFlows + mFlows self._dFracs = self._dFracs / Numeric.sum( self._dFracs) #Normalize #Pressures lst = [p1Val, p2Val, p3Val] nuNones = lst.count(None) if nuNones == 1: if p1Val == None: p1Val = p3Val / 5 elif p2Val == None: p2Val = p1Val * 10.0 elif p3Val == None: p3Val = p1Val * 5.0 elif nuNones == 2: if p1Val != None: p2Val = p1Val * 10.0 p3Val = p1Val * 5.0 elif p2Val != None: p1Val = p2Val / 10.0 p3Val = p1Val * 5.0 elif p3Val != None: p1Val = p3Val / 5.0 p2Val = p1Val * 10.0 elif nuNones == 3: #What to do? p1Val = 101.0 p2Val = 800.0 p3Val = 500.0 unkValLst[p1Idx] = p1Val unkValLst[p2Idx] = p2Val unkValLst[p3Idx] = p3Val #Enthalpies if None in (H1Val, H2Val, H3Val): (H1Val, H2Val, H3Val) = self.FillInWithAverage( (H1Val, H2Val, H3Val)) if None in (H1Val, H2Val, H3Val): #What to do?? #Could estimate a T and get H from there H1Val = H2Val = H3Val = 10000.0 unkValLst[H1Idx] = H1Val unkValLst[H2Idx] = H2Val unkValLst[H3Idx] = H3Val #Densities if None in (d1Val, d2Val, d3Val): (d1Val, d2Val, d3Val) = self.FillInWithAverage( (d1Val, d2Val, d3Val)) if None in (d1Val, d2Val, d3Val): #What to do?? #Could get it from flashing the estimates of H and P d1Val = d2Val = d3Val = 1000.0 unkValLst[d1Idx] = d1Val unkValLst[d2Idx] = d2Val unkValLst[d3Idx] = d3Val #Surfaces lst = [S1Val, S2Val, S3Val] nuNones = lst.count(None) if nuNones == 1: if S1Val == None: S1Val = -S2Val + S3Val elif S2Val == None: S2Val = -S1Val + S3Val elif S3Val == None: S3Val = S1Val + S2Val elif nuNones == 2: if S1Val != None: S2Val = S1Val / 2.0 S3Val = S1Val + S2Val elif S2Val != None: S1Val = S2Val * 2.0 S3Val = S1Val + S2Val elif S3Val != None: S1Val = S3Val * 2.0 / 3.0 S2Val = S3Val / 3.0 elif nuNones == 3: #What to do? S1Val = 20.0 S2Val = 10.0 S3Val = S1Val + S2Val unkValLst[S1Idx] = S1Val unkValLst[S2Idx] = S2Val unkValLst[S3Idx] = S3Val #Velocities. Estimate based in known or newly estimated values unkValLst[v1Idx] = v1Val = w1Val / d1Val / S1Val unkValLst[v2Idx] = v2Val = w2Val / d2Val / S2Val unkValLst[v3Idx] = v3Val = w3Val / d3Val / S3Val #Load scale factors based on the numbers that the solver will be working with self.scaleFactorW = min(abs(w1Val), abs(w2Val), abs(w3Val)) if not self.scaleFactorW: self.scaleFactorW = 1.0 self.scaleFactorS = S2Val self.scaleFactorP = min(p1Val, p2Val, p3Val) self.scaleFactorPS = self.scaleFactorP * self.scaleFactorS self.scaleFactorH = min(abs(H1Val), abs(H2Val), abs(H3Val)) if not self.scaleFactorH: self.scaleFactorH = 10.0 self.scaleFactorD = min(d1Val, d2Val, d3Val) self.scaleFactorv = min(abs(v1Val), abs(v2Val), abs(v3Val)) if not self.scaleFactorv: self.scaleFactorv = 10.0 #Put them in place for solver unkScaleFact[p1Idx] = self.scaleFactorP unkScaleFact[w1Idx] = self.scaleFactorW unkScaleFact[d1Idx] = self.scaleFactorD unkScaleFact[S1Idx] = self.scaleFactorS unkScaleFact[v1Idx] = self.scaleFactorv unkScaleFact[H1Idx] = self.scaleFactorH #Do unit conversion unkValLst = self.ConvertToMKS(unkValLst) unkScaleFact = self.ConvertToMKS(unkScaleFact) self.scaleFactorP = unkScaleFact[p1Idx] self.scaleFactorW = unkScaleFact[w1Idx] self.scaleFactorD = unkScaleFact[d1Idx] self.scaleFactorS = unkScaleFact[S1Idx] self.scaleFactorv = unkScaleFact[v1Idx] self.scaleFactorH = unkScaleFact[H1Idx] self.scaleFactorPS = self.scaleFactorP * self.scaleFactorS #Load the unknowns object unkInitValLst = list(unkValLst) self._unknowns.SetNames(unkNamLst) self._unknowns.SetValues(unkValLst) self._unknowns.SetInitValues(unkInitValLst) self._unknowns.SetIsFixed(unkIsFixLst) self._unknowns.SetScaleFactors(unkScaleFact) return True