def merge(hrus, dominant_hrus, nodata_value): """ Non-dominant HRUs are merged into neighboring dominant HRUs using a Euclidean allocation method. Parameters ---------- hrus: array original hru1 raster pre-threshold dominant_hrus: array dominant hrus obtained from hru1.shp post-threshold nodata_value: float nodata_value for hru1.tif (carried over from original raster) Returns ------- hrus: array hru raster with non-dominant hrus merged into dominant hrus """ # inside_watershed will have values for which merging step is carried inside_watershed_indexes = nonzero(hrus != nodata_value) outside_watershed_indexes = nonzero(hrus == nodata_value) # copy hru values hrus_test = hrus.copy() # change the default nodata value to 1 hrus_test[outside_watershed_indexes] = 1 # set type increases following loop's performance dominant_hrus_set = set(dominant_hrus) # set dominant hrus to 0 and non-dominant to 1 for i in range(0, len(inside_watershed_indexes[0])): if hrus_test[inside_watershed_indexes[0][i]][ inside_watershed_indexes[1][i]] in dominant_hrus_set: hrus_test[inside_watershed_indexes[0][i]][ inside_watershed_indexes[1][i]] = 0 else: hrus_test[inside_watershed_indexes[0][i]][ inside_watershed_indexes[1][i]] = 1 # perform eclidean allocation, returns the indexes # of the nearest dominant hru indexes = ndimage.distance_transform_edt(hrus_test, return_indices=True)[1] # rows and columns of the indexes rows = indexes[0] cols = indexes[1] # use indexes to update non-dominant hrus with nearest dominant hru for i in range(0, len(rows)): for j in range(0, len(rows[0])): hrus[i][j] = hrus[rows[i][j]][cols[i][j]] # reset nodata now that merging is complete hrus[outside_watershed_indexes] = nodata_value return hrus
def extract(condition, arr): """Return the elements of ravel(arr) where ravel(condition) is True (in 1D). Equivalent to compress(ravel(condition), ravel(arr)). """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def _mate_parents(self,p1,p2): size = self.graph.vcount child_tour = np.zeros(self.graph.vcount) + p2.tour while(1) : a = np.random.randint(0, size, 2) if(a[0]!=a[1]): temp = min(a[0],a[1]) a[1] = a[0]+a[1] - temp a[0] = temp break; index_list = [] for i in range(a[0],a[1] + 1): temp = nonzero(p2.tour == p1.tour[i] ) index_list.append(temp[0][0]) index_list.sort() for i in range(index_list.__len__()): temp = a[0] + i child_tour[index_list[i]] = p1.tour[temp] child = Tour() child.tour = child_tour child.tour_cost = self.graph._get_tour_cost(child_tour) return child
def selectJ(i,oS,Ei): maxK = -1;maxDeltaE = 0;Ej = 0 oS.eCache[i] = [1,Ei] validEcacheList = nonzero(oS.eCache[:,0].A)[0] if(len(validEcacheList)) > 1: for k in validEcacheList: if k == i:continue Ek = calcEk(oS, k) deltaE = abs(Ei - Ek) if(deltaE > maxDeltaE): maxK = k;maxDeltaE = deltaE;Ej = Ek return maxK,Ej else: j = selectJrand(i, oS.m) Ej = calcEk(oS, j) return j,Ej
def smoP(dataMatIn,classLabels,C,toler,maxIter,kTup=('lin',0)): oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler,kTup) iter = 0 entireSet = True;alphaPairsChanged = 0 while(iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)): alphaPairsChanged = 0 if entireSet: for i in range(oS.m): alphaPairsChanged += innerL(i,oS) print "fullSet, iter: %d i:%d,pairs changed %d" % (iter,i,alphaPairsChanged) iter += 1 else: nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0] for i in nonBoundIs: alphaPairsChanged += innerL(i,oS) print "non-bound, iter: %d i:%d,pairs changed %d" % (iter,i,alphaPairsChanged) iter += 1 if entireSet:entireSet = False; elif (alphaPairsChanged == 0):entireSet = True; print "iteration number:% d" % iter return oS.b,oS.alphas
def KMeans(dataSet, k, distMeas=disEclud, createCen=randCent): m = np.shape(dataSet)[0] clusterAssment = np.mat(np.zeros(m, 2)) centroids = randCent(dataSet, k) clusterChanged = True while clusterChanged: clusterChanged = False for i in range(m): minDist = inf minIndex = -1 for j in range(k): distJI = distMeas(centroids[j, :], dataSet[i, :]) if distJI < minDist: minDist = distJI minIndex = j if clusterAssment[i, :] != minIndex: clusterAssment = True clusterAssment[i, :] = minIndex, minDist**2 print(centroids) for cent in range(k): pstInClust = dataSet([nonzero(clusterAssment[:0]).A == cent][0]) centroids[cent, :] = mean(pstInClust, axis=0) return centroids, clusterAssment
def testDigits(kTup=('rbf',10)): dataArr,labelArr = loadImages('trainingDigits') b,alphas = smoP(dataArr,labelArr,200,0.0001,10000,kTup) datMat = mat(dataArr);labelMat = mat(labelArr).transpose() svInd=nonzero(alphas.A >0)[0] sVs = datMat[svInd] labelSV = labelMat[svInd] print "there are %d support vectors" % shape(sVs)[0] m,n = shape(datMat) errorCount = 0 for i in range(m): kernelEval = kernelTrans(sVs, datMat[i,:], kTup) predict = kernelEval.T * multiply(labelSV,alphas[svInd]) + b if sign(predict) != sign(labelArr[i]):errorCount += 1 print "the training error rate is: %f" % (float(errorCount) / m) dataArr,labelArr = loadImages('testDigits') errorCount = 0 datMat = mat(dataArr);labelMat=mat(labelArr).transpose() m,n = shape(datMat) for i in range(m): kernelEval = kernelTrans(sVs, datMat[i,:], kTup) predict = kernelEval.T * multiply(labelSV,alphas[svInd]) + b if sign(predict) != sign(labelArr[i]):errorCount += 1 print "the test error rate is:%f" % (float(errorCount)/m)
def testRbf(k1=0.9): dataArr,labelArr = loadDataSet('testSetRBF.txt') b,alphas = smoP(dataArr,labelArr,200,0.0001,10000,('rbf',k1)) datMat = mat(dataArr);labelMat = mat(labelArr).transpose() svInd = nonzero(alphas.A > 0)[0] sVs = datMat[svInd] labelSV = labelMat[svInd] print "there are %d Support Vectors" % shape(sVs)[0] m,n = shape(datMat) errorCount = 0 for i in range(m): kernelEval = kernelTrans(sVs, datMat[i,:], ('rbf',k1)) predict = kernelEval.T * multiply(labelSV,alphas[svInd]) +b if sign(predict) != sign(labelArr[i]):errorCount += 1 print "the training error rate is:%f" % (float(errorCount) / m) dataArr,labelArr = loadDataSet('testSetRBF2.txt') errorCount = 0 datMat = mat(dataArr);labelMat = mat(labelArr).transpose() m,n = shape(datMat) for i in range(m): kernelEval = kernelTrans(sVs, datMat[i,:], ('rbf',k1)) predict = kernelEval.T * multiply(labelSV,alphas[svInd]) + b if sign(predict) != sign(labelArr[i]):errorCount += 1 print "the test error rate is: %f" % (float(errorCount)/m)
def ChangeGreenToWhite(I, Gthr): R, G, B = cv2.split(I) I2 = nonzero((G >= Gthr)) # Zwraca indexy wszystkich elementow dla ktorych condition is true I[I2] = [255, 255, 255] return I
def ChangeGreenScreenRGB(I, Ibcg, Gthr): R, G, B = cv2.split(I) I2 = nonzero((G >= Gthr)) I[I2] = Ibcg[I2] return I
def ChangeGreenToWhite(I, Gthr): R,G,B = cv2.split(I) I2 = nonzero((G>=Gthr)) #Zwraca indexy wszystkich elementow dla ktorych condition is true I[I2] = [255, 255, 255] return I
def ChangeGreenScreenRGB(I, Ibcg, Gthr): R,G,B = cv2.split(I) I2 = nonzero((G>=Gthr)) I[I2] = Ibcg[I2] return I
def replaceNanWithMean(self): datMat = self.loadDataSet('secom.data', ' ') numFeat = shape(datMat)[1] for i in range(numFeat): meanVal = mean(datMat[nonzero(~isnan(datMat[:, i].A))[0], i]) datMat[nonzero(isnan(datMat[:, i].A))[0], i] = meanVal