def get_corresponding_points(points1, points2, guess_tfm, rows=None, cols=None): """ Returns two lists of points such that the transform explains the relation between pointsets the most. Also, returns the norm of the difference between point sets. tfm is from cam1 -> cam2 """ if not rows: rows = cb_rows if not cols: cols = cb_cols points1 = np.asarray(points1) points2 = np.asarray(points2) p12 = np.c_[points1,points2] p12 = p12[np.bitwise_not(np.isnan(p12).any(axis=1)),:] p1 = p12[:,0:3] p2 = p12[:,3:6] est = np.c_[p2,np.ones((p2.shape[0],1))].dot(guess_tfm.T)[:,0:3] dist = nlg.norm(p1-est,ord=np.inf) corr = range(rows*cols-1,-1,-1) p12r = np.c_[points1,points2[corr,:]] p12r = p12r[np.bitwise_not(np.isnan(p12r).any(axis=1)),:] p1r = p12r[:,0:3] p2r = p12r[:,3:6] est = np.c_[p2r,np.ones((p2r.shape[0],1))].dot(guess_tfm.T)[:,0:3] dist_new = nlg.norm(p1r-est, ord=np.inf) if dist_new < dist: points1, points2, dist = p1, p2, dist_new else: points1, points2 = p1, p2 return points1, points2, dist
def computeState(isFix,md): ''' generic function that determines event start and end isFix - 1d array, time series with one element for each gaze data point, 1 indicates the event is on, 0 - off md - minimum event duration returns list with tuples with start and end for each event (values in frames) timeseries analogue to isFix but the values correspond to the list ''' fixations=[] if isFix.sum()==0: return np.int32(isFix),[] fixon = np.bitwise_and(isFix, np.bitwise_not(np.roll(isFix,1))).nonzero()[0].tolist() fixoff=np.bitwise_and(np.roll(isFix,1), np.bitwise_not(isFix)).nonzero()[0].tolist() if len(fixon)==0 and len(fixoff)==0: fixon=[0]; fixoff=[isFix.size-1] if fixon[-1]>fixoff[-1]:fixoff.append(isFix.shape[0]-1) if fixon[0]>fixoff[0]:fixon.insert(0,0) if len(fixon)!=len(fixoff): print 'invalid fixonoff';raise TypeError for f in range(len(fixon)): fs=fixon[f];fe=(fixoff[f]+1);dur=fe-fs if dur<md[0] or dur>md[1]: isFix[fs:fe]=False else: fixations.append([fs,fe-1]) #fixations=np.array(fixations) return isFix,fixations
def plot(): py.plot(x[mask], y[mask],'bo') ylim = py.ylim() xlim = py.xlim() print xlim py.plot(x[np.bitwise_not(mask)], y[np.bitwise_not(mask)],'o',markerfacecolor='w') py.plot(xx, newFit(xx),'r-') # ylim = py.ylim() if xlim[0] < root < xlim[1]: py.plot([root,root],ylim,'r--') py.ylim(ylim) py.xlim(xlim) mean = np.mean(y[mask]) py.plot(xlim,[mean,mean], 'b--') py.grid() return
def trayImage(self,tray): xx = self._ii.max()+1 yy = self._jj.max()+1 map = np.zeros(xx*yy).reshape(yy,xx) for i in range(len(self._ii)): map[self._jj[i]][self._ii[i]] = 1.0 xjj = self._jj[np.bitwise_not(self.obs[tray])] xii = self._ii[np.bitwise_not(self.obs[tray])] for i in range(len(xii)): map[xjj[i]][xii[i]] = 2.0 if tray == self._repeatTray: maskRep = self.repeatInfo['nobs'] > 0 idx = np.arange(len(maskRep))[maskRep] xjj = self._jj[maskRep] xii = self._ii[maskRep] for i in range(len(xii)): map[xjj[i]][xii[i]] += self.repeatInfo['nobs'][idx[i]] else: xjj = self._jj[np.bitwise_not(self.obs[tray])] xii = self._ii[np.bitwise_not(self.obs[tray])] for i in range(len(xii)): map[xjj[i]][xii[i]] = 2.0+self._nrepeat return map
def removeShortEvs(tsin,md): """ >>> ts=np.array([1,1,1,0,1,1,1,0,0,1,1,1,0,0,0,1,1,1, 0,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,1,1,0,0,1,0,1,0,1]) >>> print ts >>> print removeShortEvs(ts==1,2,3) """ evs=[] if not np.any(tsin): return np.int32(tsin) if np.all(tsin): return np.int32(tsin) tser=np.copy(tsin) ton = np.bitwise_and(tser, np.bitwise_not(np.roll(tser,1))).nonzero()[0].tolist() toff=np.bitwise_and(np.roll(tser,1), np.bitwise_not(tser)).nonzero()[0].tolist() if ton[-1]>toff[-1]:toff.append(tser.shape[0]) if ton[0]>toff[0]:ton.insert(0,0) assert len(ton)==len(toff) #print np.int32(np.bitwise_and(tser,np.bitwise_not(np.roll(tser,1)))) #print np.int32(np.bitwise_and(np.roll(tser,1),np.bitwise_not(tser))) for f in range(len(ton)): ts=ton[f];te=toff[f];dur=te-ts #print ts, te,dur if dur<md: tsin[ts:te]-=1 #tsin -= temp[:,val] return np.int32(tsin)
def draw_path(self, path, color=(0.7, 0.5, 0.3)): points, vectors, processes = [], [], [] for k in range(len(path)-1): points.append(path[k][0]) vectors.append(path[k+1][0] - path[k][0]) processes.append(path[k][2]) points, vectors = np.array(points), np.array(vectors) processes = np.array(processes) pnts, vctrs = points[processes], vectors[processes] mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2], vctrs[:, 0], vctrs[:, 1], vctrs[:, 2], color=color, mode='2ddash', scale_factor=1, line_width=5.0) mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2], vctrs[:, 0], vctrs[:, 1], vctrs[:, 2], color=color, mode='arrow', scale_factor=3, scale_mode='scalar', line_width=5.0) pnts = points[np.bitwise_not(processes)] vctrs = vectors[np.bitwise_not(processes)] mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2], vctrs[:, 0], vctrs[:, 1], vctrs[:, 2], color=(0.6, 0.6, 0.6), mode='2ddash', scale_factor=1, line_width=2.0) mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2], vctrs[:, 0], vctrs[:, 1], vctrs[:, 2], color=(0.6, 0.6, 0.6), mode='arrow', scale_factor=2, scale_mode='scalar', line_width=2.0)
def _numpy(self, data, weights, shape): q = self.quantity(data) self._checkNPQuantity(q, shape) self._checkNPWeights(weights, shape) weights = self._makeNPWeights(weights, shape) newentries = weights.sum() import numpy selection = numpy.isnan(q) numpy.bitwise_not(selection, selection) subweights = weights.copy() subweights[selection] = 0.0 self.nanflow._numpy(data, subweights, shape) # avoid nan warning in calculations by flinging the nans elsewhere numpy.bitwise_not(selection, selection) q = numpy.array(q, dtype=numpy.float64) q[selection] = float("-inf") weights = weights.copy() weights[selection] = 0.0 selection = numpy.empty(q.shape, dtype=numpy.bool) for threshold, sub in self.bins: numpy.less(q, threshold, selection) subweights[:] = weights subweights[selection] = 0.0 sub._numpy(data, subweights, shape) # no possibility of exception from here on out (for rollback) self.entries += float(newentries)
def interpolateBlinks(t,d,hz): ''' Interpolate short missing intervals d - 1d array, time series with gaze data, np.nan indicates blink hz - gaze data recording rate ''' isblink= np.isnan(d) if isblink.sum()<2 or isblink.sum()>(isblink.size-2): return d blinkon = np.bitwise_and(isblink,np.bitwise_not( np.roll(isblink,1))).nonzero()[0].tolist() blinkoff=np.bitwise_and(np.roll(isblink,1), np.bitwise_not(isblink)).nonzero()[0].tolist() if len(blinkon)==0 and len(blinkoff)==0: return d #print 'bla',len(blinkon), len(blinkoff) if blinkon[-1]>blinkoff[-1]: blinkoff.append(t.size-1) if blinkon[0]>blinkoff[0]: blinkon.insert(0,0) if len(blinkon)!=len(blinkoff): print 'Blink Interpolation Failed' raise TypeError f=interp1d(t[~isblink],d[~isblink],bounds_error=False) for b in range(len(blinkon)): bs=blinkon[b]-1 be=(blinkoff[b]) if (be-bs)<INTERPMD*hz: d[bs:be]=f(t[bs:be]) #for c in [7,8]: tser[bs:be,c]=np.nan return d
def projectVecs2Depth(self, T, vA, vB): shape = vB.shape if len(shape) == 2: vC = vA + vB A = np.linalg.norm(vA) B = np.linalg.norm(vB) C = np.linalg.norm(vC) vADotvB = (vA * vB).sum() vBDotvC = (vB * -vC).sum() vADotvC = (-vC * vA).sum() alpha = np.arccos(vADotvB / (A * B)) beta = np.arccos(vBDotvC / (B * C)) gamma = np.arccos(vADotvC / (A * C)) if alpha == PI: return vA / A * T if alpha == 0: return vA / A * -T if alpha + beta + gamma != PI: alpha = PI - alpha beta = np.arcsin(A * np.sin(alpha) / T) gamma = PI - alpha - beta B_new = np.sin(gamma) * T / np.sin(alpha) vB = vB / B * B_new vC = vA + vB return vC if len(shape) == 3: h, w, d = shape vA = vA.reshape((1, 1, 3)) vC = vA + vB A = self.normVec(vA) B = self.normVec(vB) C = self.normVec(vC) vADotvB = (vA * vB).sum(axis=2) vBDotvC = (vB * -vC).sum(axis=2) vADotvC = (-vC * vA).sum(axis=2) alpha = np.arccos(vADotvB / (A * B)) beta = np.arccos(vBDotvC / (B * C)) gamma = np.arccos(vADotvC / (A * C)) mask1 = alpha == 0 mask2 = alpha + beta + gamma != PI alpha = alpha * np.bitwise_not(mask2) + \ PI - alpha * mask2 # Avoid division by zero alpha += 1 * mask1 beta = np.arcsin(A * np.sin(alpha) / T) gamma = PI - alpha - beta B_new = np.sin(gamma) * T / np.sin(alpha) vB = vB * (B_new / B).reshape((h, w, 1)) vC = vA + vB vC = vC * np.bitwise_not(mask1).reshape((h, w, 1)) + \ (vA / A * T * mask1.reshape((h, w, 1))) return vC
def _numpy(self, data, weights, shape): q = self.quantity(data) self._checkNPQuantity(q, shape) self._checkNPWeights(weights, shape) weights = self._makeNPWeights(weights, shape) newentries = weights.sum() import numpy selection = numpy.isnan(q) numpy.bitwise_not(selection, selection) subweights = weights.copy() subweights[selection] = 0.0 self.nanflow._numpy(data, subweights, shape) # avoid nan warning in calculations by flinging the nans elsewhere numpy.bitwise_not(selection, selection) q = numpy.array(q, dtype=numpy.float64) q[selection] = self.high weights = weights.copy() weights[selection] = 0.0 numpy.greater_equal(q, self.low, selection) subweights[:] = weights subweights[selection] = 0.0 self.underflow._numpy(data, subweights, shape) numpy.less(q, self.high, selection) subweights[:] = weights subweights[selection] = 0.0 self.overflow._numpy(data, subweights, shape) if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)): # Numpy defines histograms as including the upper edge of the last bin only, so drop that weights[q == self.high] == 0.0 h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights) for hi, value in zip(h, self.values): value.fill(None, float(hi)) else: q = numpy.array(q, dtype=numpy.float64) numpy.subtract(q, self.low, q) numpy.multiply(q, self.num, q) numpy.divide(q, self.high - self.low, q) numpy.floor(q, q) q = numpy.array(q, dtype=int) for index, value in enumerate(self.values): numpy.not_equal(q, index, selection) subweights[:] = weights subweights[selection] = 0.0 value._numpy(data, subweights, shape) # no possibility of exception from here on out (for rollback) self.entries += float(newentries)
def _numpy(self, data, weights, shape): q = self.quantity(data) self._checkNPQuantity(q, shape) self._checkNPWeights(weights, shape) weights = self._makeNPWeights(weights, shape) newentries = weights.sum() import numpy selection = numpy.isnan(q) numpy.bitwise_not(selection, selection) subweights = weights.copy() subweights[selection] = 0.0 self.nanflow._numpy(data, subweights, shape) # avoid nan warning in calculations by flinging the nans elsewhere numpy.bitwise_not(selection, selection) q = numpy.array(q, dtype=numpy.float64) q[selection] = 0.0 weights = weights.copy() weights[selection] = 0.0 if all(isinstance(v, Count) and v.transform is identity for c, v in self.bins) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)): h, _ = numpy.histogram(q, [float("-inf")] + [(c1 + c2)/2.0 for (c1, v1), (c2, v2) in zip(self.bins[:-1], self.bins[1:])] + [float("inf")], weights=weights) for hi, (c, v) in zip(h, self.bins): v.fill(None, float(hi)) else: selection = numpy.empty(q.shape, dtype=numpy.bool) selection2 = numpy.empty(q.shape, dtype=numpy.bool) for index in xrange(len(self.bins)): if index == 0: high = (self.bins[index][0] + self.bins[index + 1][0])/2.0 numpy.greater_equal(q, high, selection) elif index == len(self.bins) - 1: low = (self.bins[index - 1][0] + self.bins[index][0])/2.0 numpy.less(q, low, selection) else: low = (self.bins[index - 1][0] + self.bins[index][0])/2.0 high = (self.bins[index][0] + self.bins[index + 1][0])/2.0 numpy.less(q, low, selection) numpy.greater_equal(q, high, selection2) numpy.bitwise_or(selection, selection2, selection) subweights[:] = weights subweights[selection] = 0.0 self.bins[index][1]._numpy(data, subweights, shape) # no possibility of exception from here on out (for rollback) self.entries += float(newentries)
def calnewpat(pat,slnroi,tabroipat,tabroi): print 'new pattern : ',pat if pat=='HCpret': pat1='HC' pat2='reticulation' elif pat=='HCpbro': pat1='HC' pat2='bronchiectasis' elif pat=='GGpbro': pat1='ground_glass' pat2='bronchiectasis' elif pat == 'GGpret': pat1='ground_glass' pat2='reticulation' elif pat=='bropret': pat1='bronchiectasis' pat2='reticulation' for i in slnroi: tab1=np.copy(tabroipat[pat1][i]) np.putmask(tab1,tab1>0, 255) tab2=np.copy(tabroipat[pat2][i]) np.putmask(tab2,tab2>0, 255) tab3=np.copy(tabroipat[pat][i]) np.putmask(tab3,tab3>0, 255) taball=np.bitwise_or(tab2,tab1) taball=np.bitwise_or(taball,tab3) np.putmask(taball, taball> 0, 255) taballnot=np.bitwise_not(taball) tab=np.bitwise_and(tab1,tab2) if tab.max()>0: tab3=np.bitwise_or(tab3,tab) tabn=np.bitwise_not(tab3) tab1=np.bitwise_and(tab1,tabn) np.putmask(tab1, tab1> 0, classif[pat1]+1) tab2=np.bitwise_and(tab2,tabn) np.putmask(tab2, tab2> 0, classif[pat2]+1) np.putmask(tab, tab> 0, classif[pat]+1) tabroi[i]=np.bitwise_and(tabroi[i],taballnot) tabroi[i]=np.bitwise_or(tabroi[i],tab1) tabroi[i]=np.bitwise_or(tabroi[i],tab2) tabroi[i]=np.bitwise_or(tabroi[i],tab) return tabroi
def tseries2eventlist(tser): tser=np.int32(tser) if tser.sum()==0: return [] d=np.bitwise_and(tser,np.bitwise_not(np.roll(tser,1))) on = (d[1:].nonzero()[0]+1).tolist() d=np.bitwise_and(np.roll(tser,1),np.bitwise_not(tser)) off=d[1:].nonzero()[0].tolist() if len(off)==0:off.append(tser.shape[0]-1) if len(on)==0: on.insert(0,0) if on[-1]>off[-1]: off.append(tser.shape[0]-1) if on[0]>off[0]: on.insert(0,0) if len(on)!=len(off): print 'invalid fixonoff';raise TypeError out=np.array([on,off]).T return out.tolist()
def _numpy(self, data, weights, shape): q = self.quantity(data) self._checkNPQuantity(q, shape) self._checkNPWeights(weights, shape) weights = self._makeNPWeights(weights, shape) newentries = weights.sum() import numpy selection = numpy.isnan(q) numpy.bitwise_not(selection, selection) subweights = weights.copy() subweights[selection] = 0.0 self.nanflow._numpy(data, subweights, shape) # switch to float here like in bin.py else numpy throws # TypeError on trivial integer cases such as: # >>> q = numpy.array([1,2,3,4]) # >>> np.divide(q,1,q) # >>> np.floor(q,q) q = numpy.array(q, dtype=numpy.float64) neginfs = numpy.isneginf(q) posinfs = numpy.isposinf(q) numpy.subtract(q, self.origin, q) numpy.divide(q, self.binWidth, q) numpy.floor(q, q) q = numpy.array(q, dtype=numpy.int64) q[neginfs] = LONG_MINUSINF q[posinfs] = LONG_PLUSINF selected = q[weights > 0.0] selection = numpy.empty(q.shape, dtype=numpy.bool) for index in numpy.unique(selected): if index != LONG_NAN: bin = self.bins.get(index) if bin is None: bin = self.value.zero() self.bins[index] = bin numpy.not_equal(q, index, selection) subweights[:] = weights subweights[selection] = 0.0 bin._numpy(data, subweights, shape) # no possibility of exception from here on out (for rollback) self.entries += float(newentries)
def create_qtable(gtab, origin): """ create a normalized version of gradients Parameters ---------- gtab : GradientTable origin : (3,) ndarray center of qspace Returns ------- qtable : ndarray """ bv = gtab.bvals bsorted = np.sort(bv[np.bitwise_not(gtab.b0s_mask)]) for i in range(len(bsorted)): bmin = bsorted[i] try: if np.sqrt(bv.max() / bmin) > origin + 1: continue else: break except ZeroDivisionError: continue bv = np.sqrt(bv / bmin) qtable = np.vstack((bv, bv, bv)).T * gtab.bvecs return np.floor(qtable + .5)
def _mask_trigs(events, mask, mask_type): """Helper function for masking digital trigger values""" if mask is not None: if not isinstance(mask, int): raise TypeError('You provided a(n) %s.' % type(mask) + 'Mask must be an int or None.') n_events = len(events) if n_events == 0: return events.copy() if mask is not None: if mask_type is None: warn("The default setting will change from 'not_and' " "to 'and' in v0.14.", DeprecationWarning) mask_type = 'not_and' if mask_type == 'not_and': mask = np.bitwise_not(mask) elif mask_type != 'and': if mask_type is not None: raise ValueError("'mask_type' should be either 'and'" " or 'not_and', instead of '%s'" % mask_type) events[:, 1:] = np.bitwise_and(events[:, 1:], mask) events = events[events[:, 1] != events[:, 2]] return events
def dirtnalysis (img, res, MaskEdges=True, retSizes=False, verbose=False): """ Runs molybdenum analysis on a given image. Inputs: - img - image as a numpy.ndarray - res - resolution of the image in square microns/pixel Key-Word Arguments: - MaskEdges = True - option to automatically mask off the background if set to True - retSizes = False - option to returnt the dirt size data if set to True - verbose = False - prints verbose output if set to True. Returns a tuple containing: num - number of dirt particles area - area of the dirt in the image in square microns threshed - the dirt thresholded image (white dirt on black background) as a numpy ndarray sizes[optional] - a 1-dimensional numpy ndarray listing out the sizes (area) of each dirt particle in pixels """ # Dirt analysis threshed, masked = isolateDirt(img, verbose=verbose) area,num,sizes,labelled = meas.calcDirt(threshed, res, returnSizes=True, returnLabelled=True, getAreaInSquaremm=True) area = round(area,5) threshed = (masked/255)*np.bitwise_not(threshed) # put all results into return tuples if retSizes: return num, area, threshed, sizes else: return num, area, threshed
def fieldMask(self, field, numberOfFieldsPerCircle): """ Returns a square matrix of size 3 * self.markerSizePixels, where the elements corresponding to the given field are 1, and all other elements are 0. """ if (field, numberOfFieldsPerCircle) in self.fieldMaskCache: return self.fieldMaskCache[(field, numberOfFieldsPerCircle)] else: halfSize = 3 * self.markerSizePixels // 2 result = np.zeros((halfSize * 2, halfSize * 2), dtype = np.uint8) fillColor = 255 if field == 0: # Background field, return a rectangle around the circles result = np.bitwise_not(self.markerMask()) elif 0 < field and field <= 2 * numberOfFieldsPerCircle: if field <= numberOfFieldsPerCircle: # First circle y = - 3 * self.markerSizePixels // 4 rotationAngle = (-90 + (field - 1) * 360 // numberOfFieldsPerCircle) % 360 else: # Second circle y = 3 * self.markerSizePixels // 4 rotationAngle = (90 - (field - numberOfFieldsPerCircle) * 360 // numberOfFieldsPerCircle) % 360 cv2.ellipse(result, (halfSize, halfSize + y), (self.markerSizePixels // 2, self.markerSizePixels // 2), rotationAngle, 0, 360 // numberOfFieldsPerCircle, fillColor, cv2.FILLED) else: raise Exception("MarkerCandidate.fieldMask: invalid field: " + str(field)) self.fieldMaskCache[(field, numberOfFieldsPerCircle)] = result return result
def genebackground(namedir,listroi): for sln in listroi: tabpbac=np.copy(tabslung[sln]) # patok=False for pat in usedclassifall: if pat !=fidclass(0,classifall): # print sln,pat tabpat=tabroipat[pat][sln] if tabpat.max()>0: patok=True # tabp=cv2.cvtColor(tabpat,cv2.COLOR_BGR2GRAY) np.putmask(tabpat,tabpat>0,255) mask=np.bitwise_not(tabpat) tabpbac=np.bitwise_and(tabpbac,mask) # print tabroipat[fidclass(0,classif)][sln].shape tabroipat[fidclass(0,classifall)][sln]=tabpbac if patok: labeldir=os.path.join(namedir,fidclass(0,classifall)) if not os.path.exists(labeldir): os.mkdir(labeldir) namepat=tabscanName[sln]+'.'+typei1 imgcoreScan=os.path.join(labeldir,namepat) # imgcoreScan=os.path.join(locadir,namepat) tabtowrite=colorimage(tabroipat[fidclass(0,classifall)][sln],classifc[fidclass(0,classifall)]) # tabtowrite=colorimage(tabroipat[fidclass(0,classifall)][sln],(100,100,100)) # tabtowrite=cv2.cvtColor(tabtowrite,cv2.COLOR_BGR2RGB) cv2.imwrite(imgcoreScan,tabtowrite)
def highlightedImage(self,background,motion,number): redChannel = background[:,:,2] #highlight motion background[:,:,2] = np.bitwise_and(np.bitwise_not(motion), redChannel) + np.bitwise_and(motion, redChannel//3 + 168) cv2.putText(background,'motion!',(self.frame_size[1]-50,self.frame_size[0]//2), self.font, 1, (0,0,255), 2) cv2.putText(background,str(number),(self.frame_size[1]//2-100,self.frame_size[0]//2-100), self.font, 2, (0,255,0), 2) return background
def mask_table(region, table, negate=False, racol='ra', deccol='dec'): """ Apply a given mask (region) to the table, removing all the rows with ra/dec inside the region If negate=False then remove the rows with ra/dec outside the region. Parameters ---------- region : :class:`AegeanTools.regions.Region` Region to mask. table : Astropy.table.Table Table to be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. racol, deccol : str The name of the columns in `table` that should be interpreted as ra and dec. Default = 'ra', 'dec' Returns ------- masked : Astropy.table.Table A view of the given table which has been masked. """ inside = region.sky_within(table[racol], table[deccol], degin=True) if not negate: mask = np.bitwise_not(inside) else: mask = inside return table[mask]
def robust_l2(obs_phase, freqs, solve_cs=True): '''Solve the tec and cs for multiple datasets. `obs_phase` : `numpy.ndarray` the measured phase with shape (num_freqs, ) `freqs` : `numpy.ndarray` the frequencies at the datapoints (num_freqs,) `solve_cs` : (optional) bool Whether to solve cs (True) ''' obs_phase = phase_unwrapp1d(obs_phase) if solve_cs: def residuals(m, freqs, obs_phase): tec,cs = m[0],m[1] return calc_phase(tec,freqs,cs=cs) - obs_phase else: def residuals(m, freqs, obs_phase): tec,cs = m[0],m[1] return calc_phase(tec,freqs,cs=0.) - obs_phase nan_mask = np.bitwise_not(np.isnan(obs_phase)) obs_phase_ = obs_phase[nan_mask] freqs_ = freqs[nan_mask] m0 = [0.0, 0.] m = least_squares(residuals,m0,loss='soft_l1',f_scale=90.*np.pi/180.,args=(freqs_,obs_phase_)) if solve_cs: return m.x[0], m.x[1] else: return m.x[0], 0.
def create_chi(g_0, lamda, chi0, A, B, C, M, d, mag_limit=1e-8, mpy_limit=1e-9): A = A.astype(np.complex128) B = B.astype(np.complex128) C = C.astype(np.complex128) # C = B*0.0 m_x = M[..., 0] m_y = M[..., 1] m_z = M[..., 2] chi_xx = chi0 + A + C * m_x * m_x chi_yy = chi0 + A + C * m_y * m_y chi_zz = chi0 + A + C * m_z * m_z chi_xy = -1.0j * B * m_z + C * m_x * m_y chi_yx = -(-1.0j * B * m_z + C * m_x * m_y) chi_xz = 1.0j * B * m_y + C * m_x * m_z chi_zx = -(1.0j * B * m_y + C * m_x * m_z) chi_yz = -1.0j * B * m_x + C * m_y * m_z chi_zy = -(-1.0j * B * m_x + C * m_y * m_z) chi = ((chi_xx, chi_xy, chi_xz), (chi_yx, chi_yy, chi_yz), (chi_zx, chi_zy, chi_zz)) # Take into account non-magnetic materials: non_mag = np.bitwise_and(np.abs(B) < mag_limit, np.abs(C) < mag_limit) # Ignore the ambient (vacuum) non_mag[0] = False # Take into account the matrix singularity arising when M||Y mpy = np.bitwise_and(np.abs(m_y - 1.0) < mpy_limit, np.bitwise_not(non_mag)) return chi, non_mag, mpy
def create_test_data(namedirtopcf,pat,tabscan,tabsroi,tabslung,datascan,tabscanName): (top,tail)=os.path.split(namedirtopcf) print 'create test data for :', tail, 'pattern :',pat pathpat=os.path.join(namedirtopcf,pat) list_image=[name for name in os.listdir(pathpat) if name.find('.'+typei1)>0] # if len(list_image)==0: # list_image=[name for name in os.listdir(pathpat) if name.find('.'+typei)>0] # if len(list_image)>0: for l in list_image: pos=l.find('.'+typei1) ext=l[pos:len(l)] numslice=rsliceNum(l,'_',ext) # print numslice,tabroipat[numslice] if pat not in tabroipat[numslice]: tabroipat[numslice].append(pat) if numslice not in numsliceok: numsliceok.append(numslice) datascan=peparescan(numslice,tabscan[numslice],tabslung[numslice],datascan) tabroi[numslice]=np.zeros((tabscan.shape[1],tabscan.shape[2]), np.uint8) # print numslice,tabroipat[numslice] # tabl=tabslung[numslice].copy() # np.putmask(tabl,tabl>0,1) newroi = cv2.imread(os.path.join(pathpat, l), 0) if newroi.max()==0: print pathpat,l print newroi.shape print newroi.max(),newroi.min() print 'error image empty' sys.exit() img=cv2.resize(newroi,(image_cols, image_rows),interpolation=cv2.INTER_LINEAR) np.putmask(tabroi[numslice], img > 0, 0) # if classif[pat]>0: np.putmask(img, img > 0, classif[pat]) # else: # np.putmask(img, img > 0, classif['lung']) tablung=np.copy(tabslung[numslice]) np.putmask(tablung,tablung>0,255) img=np.bitwise_and(tablung, img) tabroi[numslice]+=img np.putmask(tablung,tablung>0,classif['healthy']) tabroii=np.copy(tabroi[numslice]) np.putmask(tabroii,tabroii>0,255) mask=np.bitwise_not(tabroii) img=np.bitwise_and(tablung, mask) tabroif=np.bitwise_or(img,tabroi[numslice]) tabroi[numslice]=tabroif # return tabroi,datascan
def computeState(isFix,md,nfm=np.inf): fixations=[] if isFix.sum()==0: return np.int32(isFix),[] fixon = np.bitwise_and(isFix, np.bitwise_not(np.roll(isFix,1))).nonzero()[0].tolist() fixoff=np.bitwise_and(np.roll(isFix,1), np.bitwise_not(isFix)).nonzero()[0].tolist() if len(fixon)==0 and len(fixoff)==0: fixon=[0]; fixoff=[isFix.size-1] if fixon[-1]>fixoff[-1]:fixoff.append(isFix.shape[0]-1) if fixon[0]>fixoff[0]:fixon.insert(0,0) if len(fixon)!=len(fixoff): print 'invalid fixonoff';raise TypeError for f in range(len(fixon)): fs=fixon[f];fe=(fixoff[f]+1);dur=fe-fs if dur<md[0] or dur>md[1]: isFix[fs:fe]=False else: fixations.append([fs,fe-1]) return isFix,fixations
def import_slab_data(): filename = './alu_slab1.0_clip.xyz' filename = './kur_slab1.0_clip.xyz' # filename = './aluslab.xyz' data = np.loadtxt(filename) data = data[np.bitwise_not(np.isnan(data[:,2])),:] data = data[np.bitwise_and(np.bitwise_and(data[:,1]>=Latmin, data[:,1]<=Latmax),data[:,2]>-250)] # select data between latitude 45 and 50 degree and depth above 250km return data
def test_unary_bitops(self): from numpy import bitwise_not, invert, array a = array([1, 2, 3, 4]) assert (~a == [-2, -3, -4, -5]).all() assert (bitwise_not(a) == ~a).all() assert (invert(a) == ~a).all() assert invert(True) == False assert invert(False) == True
def GetBlackAndWhiteImageFromFile(FileName, NoDithering): Image = \ cv2.imread(FileName, cv2.IMREAD_GRAYSCALE) Image = np.bitwise_not(Image) if NoDithering: return cv2.threshold(Image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] else: return Image
def _numpy(self, data, weights, shape): q = self.quantity(data) self._checkNPQuantity(q, shape) self._checkNPWeights(weights, shape) weights = self._makeNPWeights(weights, shape) # no possibility of exception from here on out (for rollback) self.entries += float(weights.sum()) import numpy selection = numpy.isnan(q) numpy.bitwise_not(selection, selection) numpy.bitwise_and(selection, weights > 0.0, selection) q = q[selection] weights = weights[selection] q *= weights self.sum += float(q.sum())
def stitch(self, base_img, img_to_stitch, homography=None): """ Stitch img_to_stitch to base_img. :param base_img: The base image to which the img_to_stitch is going to be stitched on. :param img_to_stitch: The image to be stitched on base_img. :return: The warped image of the base_img and img_to_stitch. Note that the black part of the warped image will be chopped after stitching. """ if homography is None: H = self.find_homography(base_img, img_to_stitch) else: H = homography H = H / H[2, 2] H_inv = la.inv(H) (min_x, min_y, max_x, max_y) = self.find_dimensions(img_to_stitch, H_inv) max_x = max(max_x, base_img.shape[1]) max_y = max(max_y, base_img.shape[0]) move_h = np.matrix(np.identity(3), np.float32) if (min_x < 0): move_h[0, 2] += -min_x max_x += -min_x if (min_y < 0): move_h[1, 2] += -min_y max_y += -min_y mod_inv_h = move_h * H_inv img_w = int(math.ceil(max_x)) img_h = int(math.ceil(max_y)) # Warp the new image given the homography from the old images. base_img_warp = cv2.warpPerspective(base_img, move_h, (img_w, img_h)) img_to_stitch_warp = cv2.warpPerspective(img_to_stitch, mod_inv_h, (img_w, img_h)) # Put the base image on an enlarged palette. enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8) # Create a mask from the warped image for constructing masked composite. (ret, data_map) = cv2.threshold(cv2.cvtColor(img_to_stitch_warp, cv2.COLOR_BGR2GRAY), 0, 255, cv2.THRESH_BINARY) enlarged_base_img = cv2.add(enlarged_base_img, base_img_warp, mask=np.bitwise_not(data_map), dtype=cv2.CV_8U) final_img = cv2.add(enlarged_base_img, img_to_stitch_warp, dtype=cv2.CV_8U) return final_img