コード例 #1
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = float("-inf")
        weights = weights.copy()
        weights[selection] = 0.0

        selection = numpy.empty(q.shape, dtype=numpy.bool)
        for threshold, sub in self.bins:
            numpy.less(q, threshold, selection)
            subweights[:] = weights
            subweights[selection] = 0.0

            sub._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
コード例 #2
0
ファイル: test2.py プロジェクト: WIYN-ODI/donut
    def plot():

        py.plot(x[mask],
                y[mask],'bo')
        ylim = py.ylim()
        xlim = py.xlim()
        print xlim
        py.plot(x[np.bitwise_not(mask)],
                y[np.bitwise_not(mask)],'o',markerfacecolor='w')

        py.plot(xx,
                newFit(xx),'r-')

        # ylim = py.ylim()


        if xlim[0] < root < xlim[1]:
            py.plot([root,root],ylim,'r--')
        py.ylim(ylim)
        py.xlim(xlim)

        mean = np.mean(y[mask])
        py.plot(xlim,[mean,mean],
                'b--')

        py.grid()

        return
コード例 #3
0
ファイル: mlabplot.py プロジェクト: openlmd/robpath
 def draw_path(self, path, color=(0.7, 0.5, 0.3)):
     points, vectors, processes = [], [], []
     for k in range(len(path)-1):
         points.append(path[k][0])
         vectors.append(path[k+1][0] - path[k][0])
         processes.append(path[k][2])
     points, vectors = np.array(points), np.array(vectors)
     processes = np.array(processes)
     pnts, vctrs = points[processes], vectors[processes]
     mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2],
                   vctrs[:, 0], vctrs[:, 1], vctrs[:, 2],
                   color=color, mode='2ddash',
                   scale_factor=1, line_width=5.0)
     mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2],
                   vctrs[:, 0], vctrs[:, 1], vctrs[:, 2],
                   color=color, mode='arrow',
                   scale_factor=3, scale_mode='scalar', line_width=5.0)
     pnts = points[np.bitwise_not(processes)]
     vctrs = vectors[np.bitwise_not(processes)]
     mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2],
                   vctrs[:, 0], vctrs[:, 1], vctrs[:, 2],
                   color=(0.6, 0.6, 0.6), mode='2ddash',
                   scale_factor=1, line_width=2.0)
     mlab.quiver3d(pnts[:, 0], pnts[:, 1], pnts[:, 2],
                   vctrs[:, 0], vctrs[:, 1], vctrs[:, 2],
                   color=(0.6, 0.6, 0.6), mode='arrow',
                   scale_factor=2, scale_mode='scalar', line_width=2.0)
コード例 #4
0
def computeState(isFix,md):
    ''' generic function that determines event start and end
        isFix - 1d array, time series with one element for each
            gaze data point, 1 indicates the event is on, 0 - off
        md - minimum event duration
        returns
            list with tuples with start and end for each
                event (values in frames)
            timeseries analogue to isFix but the values
                correspond to the list
    '''
    fixations=[]
    if isFix.sum()==0: return np.int32(isFix),[]
    fixon = np.bitwise_and(isFix,
        np.bitwise_not(np.roll(isFix,1))).nonzero()[0].tolist()
    fixoff=np.bitwise_and(np.roll(isFix,1),
        np.bitwise_not(isFix)).nonzero()[0].tolist()
    if len(fixon)==0 and len(fixoff)==0: fixon=[0]; fixoff=[isFix.size-1]
    if fixon[-1]>fixoff[-1]:fixoff.append(isFix.shape[0]-1)
    if fixon[0]>fixoff[0]:fixon.insert(0,0)
    if len(fixon)!=len(fixoff): print 'invalid fixonoff';raise TypeError
    for f in range(len(fixon)):
        fs=fixon[f];fe=(fixoff[f]+1);dur=fe-fs
        if  dur<md[0] or dur>md[1]:
            isFix[fs:fe]=False
        else: fixations.append([fs,fe-1])
    #fixations=np.array(fixations)
    return isFix,fixations
コード例 #5
0
def get_corresponding_points(points1, points2, guess_tfm, rows=None, cols=None):
    """
    Returns two lists of points such that the transform explains the relation between
    pointsets the most. Also, returns the norm of the difference between point sets.
    tfm is from cam1 -> cam2
    """
    if not rows: rows = cb_rows
    if not cols: cols = cb_cols

    
    points1 = np.asarray(points1)
    points2 = np.asarray(points2)
    
    p12 = np.c_[points1,points2]
    p12 = p12[np.bitwise_not(np.isnan(p12).any(axis=1)),:]
    p1 = p12[:,0:3]
    p2 = p12[:,3:6]
    est = np.c_[p2,np.ones((p2.shape[0],1))].dot(guess_tfm.T)[:,0:3]
    dist = nlg.norm(p1-est,ord=np.inf)
    
    corr = range(rows*cols-1,-1,-1)
    p12r = np.c_[points1,points2[corr,:]]
    p12r = p12r[np.bitwise_not(np.isnan(p12r).any(axis=1)),:]
    p1r = p12r[:,0:3]
    p2r = p12r[:,3:6]
    est = np.c_[p2r,np.ones((p2r.shape[0],1))].dot(guess_tfm.T)[:,0:3]
    dist_new = nlg.norm(p1r-est, ord=np.inf)
    if dist_new < dist:
        points1, points2, dist = p1, p2, dist_new
    else:
        points1, points2 = p1, p2

    return points1, points2, dist
コード例 #6
0
def interpolateBlinks(t,d,hz):
    ''' Interpolate short missing intervals
        d - 1d array, time series with gaze data, np.nan indicates blink
        hz - gaze data recording rate
    '''
    isblink= np.isnan(d)
    if isblink.sum()<2 or isblink.sum()>(isblink.size-2): return d
    blinkon = np.bitwise_and(isblink,np.bitwise_not(
        np.roll(isblink,1))).nonzero()[0].tolist()
    blinkoff=np.bitwise_and(np.roll(isblink,1),
        np.bitwise_not(isblink)).nonzero()[0].tolist()
    if len(blinkon)==0 and len(blinkoff)==0: return d
    #print 'bla',len(blinkon), len(blinkoff)
    if blinkon[-1]>blinkoff[-1]: blinkoff.append(t.size-1)
    if blinkon[0]>blinkoff[0]: blinkon.insert(0,0)
    if len(blinkon)!=len(blinkoff):
        print 'Blink Interpolation Failed'
        raise TypeError
    f=interp1d(t[~isblink],d[~isblink],bounds_error=False)
    for b in range(len(blinkon)):
        bs=blinkon[b]-1
        be=(blinkoff[b])
        if (be-bs)<INTERPMD*hz:
            d[bs:be]=f(t[bs:be])
            #for c in [7,8]: tser[bs:be,c]=np.nan
    return d
コード例 #7
0
ファイル: surveysimF.py プロジェクト: tribeiro/SMAPS
	def trayImage(self,tray):

		xx = self._ii.max()+1
		yy = self._jj.max()+1

		map = np.zeros(xx*yy).reshape(yy,xx)
		
		for i in range(len(self._ii)):
			map[self._jj[i]][self._ii[i]] = 1.0

		xjj = self._jj[np.bitwise_not(self.obs[tray])]
		xii = self._ii[np.bitwise_not(self.obs[tray])]

		for i in range(len(xii)):
			map[xjj[i]][xii[i]] = 2.0
			
		if tray == self._repeatTray:
			maskRep = self.repeatInfo['nobs'] > 0
			idx = np.arange(len(maskRep))[maskRep]
			xjj = self._jj[maskRep]
			xii = self._ii[maskRep]
			
			for i in range(len(xii)):
				map[xjj[i]][xii[i]] += self.repeatInfo['nobs'][idx[i]]
		else:
			xjj = self._jj[np.bitwise_not(self.obs[tray])]
			xii = self._ii[np.bitwise_not(self.obs[tray])]

			for i in range(len(xii)):
				map[xjj[i]][xii[i]] = 2.0+self._nrepeat

		return map
コード例 #8
0
def removeShortEvs(tsin,md):
    """ >>> ts=np.array([1,1,1,0,1,1,1,0,0,1,1,1,0,0,0,1,1,1,
                0,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,1,1,0,0,1,0,1,0,1])
        >>> print ts
        >>> print removeShortEvs(ts==1,2,3) 
    """
    evs=[]
    if not np.any(tsin): return np.int32(tsin)
    if np.all(tsin): return np.int32(tsin)
    tser=np.copy(tsin)
    ton = np.bitwise_and(tser,
        np.bitwise_not(np.roll(tser,1))).nonzero()[0].tolist()
    toff=np.bitwise_and(np.roll(tser,1),
        np.bitwise_not(tser)).nonzero()[0].tolist()
    if ton[-1]>toff[-1]:toff.append(tser.shape[0])
    if ton[0]>toff[0]:ton.insert(0,0)
    assert len(ton)==len(toff)
    #print np.int32(np.bitwise_and(tser,np.bitwise_not(np.roll(tser,1))))
    #print np.int32(np.bitwise_and(np.roll(tser,1),np.bitwise_not(tser)))
    for f in range(len(ton)):
        ts=ton[f];te=toff[f];dur=te-ts
        #print ts, te,dur
        if  dur<md: tsin[ts:te]-=1
    #tsin -= temp[:,val]
    return np.int32(tsin)
コード例 #9
0
ファイル: Geometry.py プロジェクト: sakharin/Plot
    def projectVecs2Depth(self, T, vA, vB):
        shape = vB.shape
        if len(shape) == 2:
            vC = vA + vB
            A = np.linalg.norm(vA)
            B = np.linalg.norm(vB)
            C = np.linalg.norm(vC)

            vADotvB = (vA * vB).sum()
            vBDotvC = (vB * -vC).sum()
            vADotvC = (-vC * vA).sum()
            alpha = np.arccos(vADotvB / (A * B))
            beta = np.arccos(vBDotvC / (B * C))
            gamma = np.arccos(vADotvC / (A * C))

            if alpha == PI:
                return vA / A * T
            if alpha == 0:
                return vA / A * -T
            if alpha + beta + gamma != PI:
                alpha = PI - alpha

            beta = np.arcsin(A * np.sin(alpha) / T)
            gamma = PI - alpha - beta
            B_new = np.sin(gamma) * T / np.sin(alpha)
            vB = vB / B * B_new
            vC = vA + vB
            return vC
        if len(shape) == 3:
            h, w, d = shape

            vA = vA.reshape((1, 1, 3))
            vC = vA + vB
            A = self.normVec(vA)
            B = self.normVec(vB)
            C = self.normVec(vC)

            vADotvB = (vA * vB).sum(axis=2)
            vBDotvC = (vB * -vC).sum(axis=2)
            vADotvC = (-vC * vA).sum(axis=2)
            alpha = np.arccos(vADotvB / (A * B))
            beta = np.arccos(vBDotvC / (B * C))
            gamma = np.arccos(vADotvC / (A * C))

            mask1 = alpha == 0
            mask2 = alpha + beta + gamma != PI
            alpha = alpha * np.bitwise_not(mask2) + \
                PI - alpha * mask2
            # Avoid division by zero
            alpha += 1 * mask1
            beta = np.arcsin(A * np.sin(alpha) / T)
            gamma = PI - alpha - beta
            B_new = np.sin(gamma) * T / np.sin(alpha)
            vB = vB * (B_new / B).reshape((h, w, 1))
            vC = vA + vB
            vC = vC * np.bitwise_not(mask1).reshape((h, w, 1)) + \
                (vA / A * T * mask1.reshape((h, w, 1)))
            return vC
コード例 #10
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = self.high
        weights = weights.copy()
        weights[selection] = 0.0

        numpy.greater_equal(q, self.low, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.underflow._numpy(data, subweights, shape)

        numpy.less(q, self.high, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.overflow._numpy(data, subweights, shape)

        if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
            # Numpy defines histograms as including the upper edge of the last bin only, so drop that
            weights[q == self.high] == 0.0

            h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights)

            for hi, value in zip(h, self.values):
                value.fill(None, float(hi))

        else:
            q = numpy.array(q, dtype=numpy.float64)
            numpy.subtract(q, self.low, q)
            numpy.multiply(q, self.num, q)
            numpy.divide(q, self.high - self.low, q)
            numpy.floor(q, q)
            q = numpy.array(q, dtype=int)

            for index, value in enumerate(self.values):
                numpy.not_equal(q, index, selection)
                subweights[:] = weights
                subweights[selection] = 0.0
                value._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
コード例 #11
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = 0.0
        weights = weights.copy()
        weights[selection] = 0.0

        if all(isinstance(v, Count) and v.transform is identity for c, v in self.bins) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):

            h, _ = numpy.histogram(q, [float("-inf")] + [(c1 + c2)/2.0 for (c1, v1), (c2, v2) in zip(self.bins[:-1], self.bins[1:])] + [float("inf")], weights=weights)

            for hi, (c, v) in zip(h, self.bins):
                v.fill(None, float(hi))

        else:
            selection = numpy.empty(q.shape, dtype=numpy.bool)
            selection2 = numpy.empty(q.shape, dtype=numpy.bool)

            for index in xrange(len(self.bins)):
                if index == 0:
                    high = (self.bins[index][0] + self.bins[index + 1][0])/2.0
                    numpy.greater_equal(q, high, selection)

                elif index == len(self.bins) - 1:
                    low = (self.bins[index - 1][0] + self.bins[index][0])/2.0
                    numpy.less(q, low, selection)

                else:
                    low = (self.bins[index - 1][0] + self.bins[index][0])/2.0
                    high = (self.bins[index][0] + self.bins[index + 1][0])/2.0
                    numpy.less(q, low, selection)
                    numpy.greater_equal(q, high, selection2)
                    numpy.bitwise_or(selection, selection2, selection)

                subweights[:] = weights
                subweights[selection] = 0.0
                self.bins[index][1]._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
コード例 #12
0
ファイル: comparecalc.py プロジェクト: skconsulting/ild
def calnewpat(pat,slnroi,tabroipat,tabroi):
    print 'new pattern : ',pat

    if pat=='HCpret':
        pat1='HC'
        pat2='reticulation'

    elif pat=='HCpbro':
        pat1='HC'
        pat2='bronchiectasis'

    elif pat=='GGpbro':
        pat1='ground_glass'
        pat2='bronchiectasis'

    elif pat == 'GGpret':
        pat1='ground_glass'
        pat2='reticulation'

    elif pat=='bropret':
        pat1='bronchiectasis'
        pat2='reticulation'

    for i in slnroi:
        tab1=np.copy(tabroipat[pat1][i])
        np.putmask(tab1,tab1>0, 255)
        tab2=np.copy(tabroipat[pat2][i])
        np.putmask(tab2,tab2>0, 255)
        tab3=np.copy(tabroipat[pat][i])
        np.putmask(tab3,tab3>0, 255)
        taball=np.bitwise_or(tab2,tab1) 
        taball=np.bitwise_or(taball,tab3)
        np.putmask(taball, taball> 0, 255) 
        taballnot=np.bitwise_not(taball)


        tab=np.bitwise_and(tab1,tab2)        
        if tab.max()>0:     
            tab3=np.bitwise_or(tab3,tab)
            tabn=np.bitwise_not(tab3)      
            tab1=np.bitwise_and(tab1,tabn)
            np.putmask(tab1, tab1> 0, classif[pat1]+1)
            
            tab2=np.bitwise_and(tab2,tabn)
            np.putmask(tab2, tab2> 0, classif[pat2]+1)  
            
            np.putmask(tab, tab> 0, classif[pat]+1)            

            tabroi[i]=np.bitwise_and(tabroi[i],taballnot)             
            tabroi[i]=np.bitwise_or(tabroi[i],tab1) 
            tabroi[i]=np.bitwise_or(tabroi[i],tab2) 
            tabroi[i]=np.bitwise_or(tabroi[i],tab) 

    return tabroi
コード例 #13
0
ファイル: ETData.py プロジェクト: gestaltrevision/Chase
def tseries2eventlist(tser):
    tser=np.int32(tser)
    if tser.sum()==0: return []
    d=np.bitwise_and(tser,np.bitwise_not(np.roll(tser,1)))
    on = (d[1:].nonzero()[0]+1).tolist()
    d=np.bitwise_and(np.roll(tser,1),np.bitwise_not(tser))
    off=d[1:].nonzero()[0].tolist()
    if len(off)==0:off.append(tser.shape[0]-1)
    if len(on)==0: on.insert(0,0)
    if on[-1]>off[-1]: off.append(tser.shape[0]-1)
    if on[0]>off[0]: on.insert(0,0)
    if len(on)!=len(off): print 'invalid fixonoff';raise TypeError
    out=np.array([on,off]).T
    return out.tolist()
コード例 #14
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # switch to float here like in bin.py else numpy throws
        # TypeError on trivial integer cases such as:
        # >>> q = numpy.array([1,2,3,4])
        # >>> np.divide(q,1,q)
        # >>> np.floor(q,q)
        q = numpy.array(q, dtype=numpy.float64)
        neginfs = numpy.isneginf(q)
        posinfs = numpy.isposinf(q)

        numpy.subtract(q, self.origin, q)
        numpy.divide(q, self.binWidth, q)
        numpy.floor(q, q)
        q = numpy.array(q, dtype=numpy.int64)
        q[neginfs] = LONG_MINUSINF
        q[posinfs] = LONG_PLUSINF

        selected = q[weights > 0.0]

        selection = numpy.empty(q.shape, dtype=numpy.bool)
        for index in numpy.unique(selected):
            if index != LONG_NAN:
                bin = self.bins.get(index)
                if bin is None:
                    bin = self.value.zero()
                    self.bins[index] = bin

                numpy.not_equal(q, index, selection)
                subweights[:] = weights
                subweights[selection] = 0.0
                bin._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
コード例 #15
0
ファイル: xrmr.py プロジェクト: jackey-qiu/genx
def create_chi(g_0, lamda, chi0, A, B, C, M, d, mag_limit=1e-8, mpy_limit=1e-9):
    A = A.astype(np.complex128)
    B = B.astype(np.complex128)
    C = C.astype(np.complex128)
    # C = B*0.0

    m_x = M[..., 0]
    m_y = M[..., 1]
    m_z = M[..., 2]
    chi_xx = chi0 + A + C * m_x * m_x
    chi_yy = chi0 + A + C * m_y * m_y
    chi_zz = chi0 + A + C * m_z * m_z
    chi_xy = -1.0j * B * m_z + C * m_x * m_y
    chi_yx = -(-1.0j * B * m_z + C * m_x * m_y)
    chi_xz = 1.0j * B * m_y + C * m_x * m_z
    chi_zx = -(1.0j * B * m_y + C * m_x * m_z)
    chi_yz = -1.0j * B * m_x + C * m_y * m_z
    chi_zy = -(-1.0j * B * m_x + C * m_y * m_z)
    chi = ((chi_xx, chi_xy, chi_xz), (chi_yx, chi_yy, chi_yz), (chi_zx, chi_zy, chi_zz))

    # Take into account non-magnetic materials:
    non_mag = np.bitwise_and(np.abs(B) < mag_limit, np.abs(C) < mag_limit)
    # Ignore the ambient (vacuum)
    non_mag[0] = False

    # Take into account the matrix singularity arising when M||Y
    mpy = np.bitwise_and(np.abs(m_y - 1.0) < mpy_limit, np.bitwise_not(non_mag))

    return chi, non_mag, mpy
コード例 #16
0
ファイル: genePGre.py プロジェクト: skconsulting/ild
def genebackground(namedir,listroi):
    for sln in listroi:
        tabpbac=np.copy(tabslung[sln])
#        
        patok=False
        for pat in usedclassifall:
            if pat !=fidclass(0,classifall):
#                print sln,pat
                tabpat=tabroipat[pat][sln]

                if tabpat.max()>0:
                    patok=True
#                    tabp=cv2.cvtColor(tabpat,cv2.COLOR_BGR2GRAY)
                    np.putmask(tabpat,tabpat>0,255)
                    mask=np.bitwise_not(tabpat)
                    tabpbac=np.bitwise_and(tabpbac,mask)
#                    print tabroipat[fidclass(0,classif)][sln].shape
                    tabroipat[fidclass(0,classifall)][sln]=tabpbac

        if patok:
            labeldir=os.path.join(namedir,fidclass(0,classifall))
            if not os.path.exists(labeldir):
               os.mkdir(labeldir)
            namepat=tabscanName[sln]+'.'+typei1
            imgcoreScan=os.path.join(labeldir,namepat)
    #                imgcoreScan=os.path.join(locadir,namepat)
            tabtowrite=colorimage(tabroipat[fidclass(0,classifall)][sln],classifc[fidclass(0,classifall)])
#            tabtowrite=colorimage(tabroipat[fidclass(0,classifall)][sln],(100,100,100))

#            tabtowrite=cv2.cvtColor(tabtowrite,cv2.COLOR_BGR2RGB)
            cv2.imwrite(imgcoreScan,tabtowrite)    
コード例 #17
0
ファイル: dsi.py プロジェクト: StongeEtienne/dipy
def create_qtable(gtab, origin):
    """ create a normalized version of gradients

    Parameters
    ----------
    gtab : GradientTable
    origin : (3,) ndarray
        center of qspace

    Returns
    -------
    qtable : ndarray
    """

    bv = gtab.bvals
    bsorted = np.sort(bv[np.bitwise_not(gtab.b0s_mask)])
    for i in range(len(bsorted)):
        bmin = bsorted[i]
        try:
            if np.sqrt(bv.max() / bmin) > origin + 1:
                continue
            else:
                break
        except ZeroDivisionError:
            continue

    bv = np.sqrt(bv / bmin)
    qtable = np.vstack((bv, bv, bv)).T * gtab.bvecs
    return np.floor(qtable + .5)
コード例 #18
0
ファイル: MIMAS.py プロジェクト: PaulHancock/Aegean
def mask_table(region, table, negate=False, racol='ra', deccol='dec'):
    """
    Apply a given mask (region) to the table, removing all the rows with ra/dec inside the region
    If negate=False then remove the rows with ra/dec outside the region.


    Parameters
    ----------
    region : :class:`AegeanTools.regions.Region`
        Region to mask.

    table : Astropy.table.Table
        Table to be masked.

    negate :  bool
        If True then pixels *outside* the region are masked.
        Default = False.

    racol, deccol : str
        The name of the columns in `table` that should be interpreted as ra and dec.
        Default = 'ra', 'dec'

    Returns
    -------
    masked : Astropy.table.Table
        A view of the given table which has been masked.
    """
    inside = region.sky_within(table[racol], table[deccol], degin=True)
    if not negate:
        mask = np.bitwise_not(inside)
    else:
        mask = inside
    return table[mask]
コード例 #19
0
ファイル: motion.py プロジェクト: domwoe/experiments
    def highlightedImage(self,background,motion,number):
		redChannel = background[:,:,2]
		#highlight motion
		background[:,:,2] = np.bitwise_and(np.bitwise_not(motion), redChannel) +  np.bitwise_and(motion, redChannel//3 + 168)
		cv2.putText(background,'motion!',(self.frame_size[1]-50,self.frame_size[0]//2), self.font, 1, (0,0,255), 2)
		cv2.putText(background,str(number),(self.frame_size[1]//2-100,self.frame_size[0]//2-100), self.font, 2, (0,255,0), 2)
		return background
コード例 #20
0
ファイル: event.py プロジェクト: JuliaSprenger/mne-python
def _mask_trigs(events, mask, mask_type):
    """Helper function for masking digital trigger values"""
    if mask is not None:
        if not isinstance(mask, int):
            raise TypeError('You provided a(n) %s.' % type(mask) +
                            'Mask must be an int or None.')
    n_events = len(events)
    if n_events == 0:
        return events.copy()

    if mask is not None:
        if mask_type is None:
            warn("The default setting will change from 'not_and' "
                 "to 'and' in v0.14.", DeprecationWarning)
            mask_type = 'not_and'
        if mask_type == 'not_and':
            mask = np.bitwise_not(mask)
        elif mask_type != 'and':
            if mask_type is not None:
                raise ValueError("'mask_type' should be either 'and'"
                                 " or 'not_and', instead of '%s'" % mask_type)
        events[:, 1:] = np.bitwise_and(events[:, 1:], mask)
    events = events[events[:, 1] != events[:, 2]]

    return events
コード例 #21
0
ファイル: OptiposLib.py プロジェクト: heathzj/moped
 def fieldMask(self, field, numberOfFieldsPerCircle):
     """
     Returns a square matrix of size 3 * self.markerSizePixels, where the elements corresponding to the given field are 1, and all other elements are 0.
     """
     
     if (field, numberOfFieldsPerCircle) in self.fieldMaskCache:
         return self.fieldMaskCache[(field, numberOfFieldsPerCircle)]
     else:
         halfSize = 3 * self.markerSizePixels // 2
         result = np.zeros((halfSize * 2, halfSize * 2), dtype = np.uint8)
         fillColor = 255
         if field == 0: # Background field, return a rectangle around the circles 
             result = np.bitwise_not(self.markerMask())
         elif 0 < field and field <= 2 * numberOfFieldsPerCircle:
             if field <= numberOfFieldsPerCircle:
                 # First circle
                 y = - 3 * self.markerSizePixels // 4
                 rotationAngle = (-90 + (field - 1) * 360 // numberOfFieldsPerCircle) % 360
             else:
                 # Second circle
                 y = 3 * self.markerSizePixels // 4
                 rotationAngle = (90 - (field - numberOfFieldsPerCircle) * 360 // numberOfFieldsPerCircle) % 360
             cv2.ellipse(result, (halfSize, halfSize + y), (self.markerSizePixels // 2, self.markerSizePixels // 2), 
                         rotationAngle, 0, 360 // numberOfFieldsPerCircle, fillColor, cv2.FILLED)
         else:
             raise Exception("MarkerCandidate.fieldMask: invalid field: " + str(field))
         self.fieldMaskCache[(field, numberOfFieldsPerCircle)] = result
         return result
コード例 #22
0
ファイル: dirt.py プロジェクト: adussault/GenesisSEMImgProc
def dirtnalysis (img, res, MaskEdges=True, retSizes=False, verbose=False):
    """
    Runs molybdenum analysis on a given image. 
        Inputs: 
        - img - image as a numpy.ndarray
        - res - resolution of the image in square microns/pixel
        Key-Word Arguments:
        - MaskEdges = True - option to automatically mask off the background if 
            set to True
        - retSizes = False - option to returnt the dirt size data if set to True
        - verbose = False - prints verbose output if set to True.
        Returns a tuple containing:
            num - number of dirt particles
            area - area of the dirt in the image in square microns
            threshed - the dirt thresholded image (white dirt on black background) 
                as a numpy ndarray
            sizes[optional] -  a 1-dimensional numpy ndarray listing out the sizes
               (area) of each dirt particle in pixels 
    """
    
    # Dirt analysis
    threshed, masked = isolateDirt(img, verbose=verbose)
    area,num,sizes,labelled = meas.calcDirt(threshed, 
                                            res, 
                                            returnSizes=True,
                                            returnLabelled=True,
                                            getAreaInSquaremm=True)
    area = round(area,5)
    threshed = (masked/255)*np.bitwise_not(threshed)

    # put all results into return tuples
    if retSizes:
        return num, area, threshed, sizes
    else:
        return num, area, threshed
コード例 #23
0
def robust_l2(obs_phase, freqs, solve_cs=True):
    '''Solve the tec and cs for multiple datasets.
    `obs_phase` : `numpy.ndarray`
        the measured phase with shape (num_freqs, )
    `freqs` : `numpy.ndarray`
        the frequencies at the datapoints (num_freqs,)
    `solve_cs` : (optional) bool
        Whether to solve cs (True)
    '''
    obs_phase = phase_unwrapp1d(obs_phase)
    if solve_cs:
        def residuals(m, freqs, obs_phase):
            tec,cs = m[0],m[1]
            return calc_phase(tec,freqs,cs=cs) - obs_phase
    else:
        def residuals(m, freqs, obs_phase):
            tec,cs = m[0],m[1]
            return calc_phase(tec,freqs,cs=0.) - obs_phase
    nan_mask = np.bitwise_not(np.isnan(obs_phase))
    obs_phase_ = obs_phase[nan_mask]
    freqs_ = freqs[nan_mask]
    m0 = [0.0, 0.]
    m = least_squares(residuals,m0,loss='soft_l1',f_scale=90.*np.pi/180.,args=(freqs_,obs_phase_))
    if solve_cs:
        return m.x[0], m.x[1]
    else:
        return m.x[0], 0.
コード例 #24
0
ファイル: data_roifull.py プロジェクト: skconsulting/ild
def create_test_data(namedirtopcf,pat,tabscan,tabsroi,tabslung,datascan,tabscanName):
    
    (top,tail)=os.path.split(namedirtopcf)
    print 'create test data for :', tail, 'pattern :',pat
    pathpat=os.path.join(namedirtopcf,pat)

    list_image=[name for name in os.listdir(pathpat) if name.find('.'+typei1)>0] 
#    if len(list_image)==0:
#        list_image=[name for name in os.listdir(pathpat) if name.find('.'+typei)>0] 
#  
    if len(list_image)>0:

            for l in list_image:
                pos=l.find('.'+typei1)      
                ext=l[pos:len(l)]
                numslice=rsliceNum(l,'_',ext)
#                print numslice,tabroipat[numslice]
                if pat not in tabroipat[numslice]:
                    tabroipat[numslice].append(pat)                    
                if numslice not in numsliceok:
                    numsliceok.append(numslice)
                    
                    datascan=peparescan(numslice,tabscan[numslice],tabslung[numslice],datascan)
                    tabroi[numslice]=np.zeros((tabscan.shape[1],tabscan.shape[2]), np.uint8)
#                print numslice,tabroipat[numslice]
    #            tabl=tabslung[numslice].copy()
    #            np.putmask(tabl,tabl>0,1)
    
                newroi = cv2.imread(os.path.join(pathpat, l), 0) 
                
                if newroi.max()==0:
                    print pathpat,l
                    print newroi.shape
                    print newroi.max(),newroi.min()
                    print 'error image empty'
                    sys.exit()
                img=cv2.resize(newroi,(image_cols, image_rows),interpolation=cv2.INTER_LINEAR)

                
                np.putmask(tabroi[numslice], img > 0, 0)
    #                if classif[pat]>0:
                np.putmask(img, img > 0, classif[pat])
#                else:
#                    np.putmask(img, img > 0, classif['lung'])
                tablung=np.copy(tabslung[numslice])
                np.putmask(tablung,tablung>0,255)                      
                img=np.bitwise_and(tablung, img)  
                tabroi[numslice]+=img
                np.putmask(tablung,tablung>0,classif['healthy']) 
                tabroii=np.copy(tabroi[numslice])
                np.putmask(tabroii,tabroii>0,255) 
                mask=np.bitwise_not(tabroii)
                img=np.bitwise_and(tablung, mask)
                tabroif=np.bitwise_or(img,tabroi[numslice])
                tabroi[numslice]=tabroif
                           
#            
                    
    return tabroi,datascan
コード例 #25
0
ファイル: test_ufuncs.py プロジェクト: pypyjs/pypy
 def test_unary_bitops(self):
     from numpy import bitwise_not, invert, array
     a = array([1, 2, 3, 4])
     assert (~a == [-2, -3, -4, -5]).all()
     assert (bitwise_not(a) == ~a).all()
     assert (invert(a) == ~a).all()
     assert invert(True) == False
     assert invert(False) == True
コード例 #26
0
ファイル: CoinMaker.py プロジェクト: sbhackerspace/CoinMaker
def GetBlackAndWhiteImageFromFile(FileName, NoDithering):
  Image = \
    cv2.imread(FileName, cv2.IMREAD_GRAYSCALE)
  Image = np.bitwise_not(Image)
  if NoDithering:
    return cv2.threshold(Image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
  else:
    return Image
コード例 #27
0
def computeState(isFix,md,nfm=np.inf):
    fixations=[]
    if isFix.sum()==0: return np.int32(isFix),[]
    fixon = np.bitwise_and(isFix,
        np.bitwise_not(np.roll(isFix,1))).nonzero()[0].tolist()
    fixoff=np.bitwise_and(np.roll(isFix,1),
        np.bitwise_not(isFix)).nonzero()[0].tolist()
    if len(fixon)==0 and len(fixoff)==0: fixon=[0]; fixoff=[isFix.size-1]
    if fixon[-1]>fixoff[-1]:fixoff.append(isFix.shape[0]-1)
    if fixon[0]>fixoff[0]:fixon.insert(0,0)
    if len(fixon)!=len(fixoff): print 'invalid fixonoff';raise TypeError
    for f in range(len(fixon)):
        fs=fixon[f];fe=(fixoff[f]+1);dur=fe-fs
        if  dur<md[0] or dur>md[1]:
            isFix[fs:fe]=False
        else: fixations.append([fs,fe-1])
    return isFix,fixations
コード例 #28
0
def import_slab_data():
    filename = './alu_slab1.0_clip.xyz'
    filename = './kur_slab1.0_clip.xyz'
#    filename = './aluslab.xyz'
    data = np.loadtxt(filename)
    data = data[np.bitwise_not(np.isnan(data[:,2])),:]
    data = data[np.bitwise_and(np.bitwise_and(data[:,1]>=Latmin, data[:,1]<=Latmax),data[:,2]>-250)] # select data between latitude 45 and 50 degree and depth above 250km 
    return data
コード例 #29
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(weights.sum())

        import numpy
        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        numpy.bitwise_and(selection, weights > 0.0, selection)
        q = q[selection]
        weights = weights[selection]
        q *= weights

        self.sum += float(q.sum())
コード例 #30
0
ファイル: Stitcher.py プロジェクト: pan-long/cs4243
    def stitch(self, base_img, img_to_stitch, homography=None):
        """
        Stitch img_to_stitch to base_img.
        :param base_img: The base image to which the img_to_stitch is going to be stitched on.
        :param img_to_stitch: The image to be stitched on base_img.
        :return: The warped image of the base_img and img_to_stitch.

        Note that the black part of the warped image will be chopped after stitching.
        """
        if homography is None:
            H = self.find_homography(base_img, img_to_stitch)
        else:
            H = homography
        H = H / H[2, 2]
        H_inv = la.inv(H)

        (min_x, min_y, max_x, max_y) = self.find_dimensions(img_to_stitch, H_inv)
        max_x = max(max_x, base_img.shape[1])
        max_y = max(max_y, base_img.shape[0])

        move_h = np.matrix(np.identity(3), np.float32)

        if (min_x < 0):
            move_h[0, 2] += -min_x
            max_x += -min_x

        if (min_y < 0):
            move_h[1, 2] += -min_y
            max_y += -min_y

        mod_inv_h = move_h * H_inv

        img_w = int(math.ceil(max_x))
        img_h = int(math.ceil(max_y))

        # Warp the new image given the homography from the old images.
        base_img_warp = cv2.warpPerspective(base_img, move_h, (img_w, img_h))

        img_to_stitch_warp = cv2.warpPerspective(img_to_stitch, mod_inv_h, (img_w, img_h))

        # Put the base image on an enlarged palette.
        enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)

        # Create a mask from the warped image for constructing masked composite.
        (ret, data_map) = cv2.threshold(cv2.cvtColor(img_to_stitch_warp, cv2.COLOR_BGR2GRAY),
                                        0, 255, cv2.THRESH_BINARY)

        enlarged_base_img = cv2.add(enlarged_base_img, base_img_warp,
                                    mask=np.bitwise_not(data_map),
                                    dtype=cv2.CV_8U)

        final_img = cv2.add(enlarged_base_img, img_to_stitch_warp,
                            dtype=cv2.CV_8U)

        return final_img
コード例 #31
0
 def RunParallelConversion(self):
     os.chdir(self.workind_dir)
     print '\nRunning', len(
         self.runlist), 'jobs in parallel for', self.num_cores, 'cores\n'
     self.runs_dic_completed = OrderedDict(
         zip(self.runlist, [False for r in self.runlist]))
     self.runs_dic_running = OrderedDict(
         zip(self.runlist, [False for r in self.runlist]))
     self.queue = {c: None for c in xrange(self.num_cores)}
     self.queue_running = {c: False for c in xrange(self.num_cores)}
     self.queue_showing = {c: False for c in xrange(self.num_cores)}
     self.queue_runs = {c: None for c in xrange(self.num_cores)}
     first_time = True
     # ipdb.set_trace()
     with open(os.devnull, 'w') as FNULL:
         while not np.array(self.runs_dic_completed.values(), '?').all():
             pending = np.bitwise_not(
                 np.bitwise_or(self.runs_dic_running.values(),
                               self.runs_dic_completed.values()))
             pos_run, num_runs_left = pending.argmax(), pending.sum()
             jobi = self.runlist[pos_run]
             option = self.options[pos_run]
             do_add_queue = not np.array(self.queue_running.values(),
                                         '?').all() and num_runs_left > 0
             if do_add_queue:
                 pos_q, nfree = np.array(
                     self.queue_running.values(),
                     '?').argmin(), np.bitwise_not(
                         self.queue_running.values()).sum()
                 print '\nRunning job', jobi, '...'
                 command = [self.workind_dir + '/' + self.exec_command
                            ] + option
                 if nfree == 1 and not np.array(self.queue_showing.values(),
                                                '?').any() and self.verb:
                     print '\nShowing output for job', jobi
                     self.queue[pos_q] = subp.Popen(command,
                                                    bufsize=-1,
                                                    stdin=subp.PIPE,
                                                    close_fds=True)
                     # self.queue[pos_q] = 'blaa'
                     self.queue_showing[pos_q] = True
                 else:
                     self.queue[pos_q] = subp.Popen(command,
                                                    bufsize=-1,
                                                    stdin=subp.PIPE,
                                                    stdout=FNULL,
                                                    stderr=subp.STDOUT,
                                                    close_fds=True)
                     # self.queue[pos_q] = 'baaf'
                 self.queue_running[pos_q] = True
                 self.runs_dic_running[jobi] = True
                 self.queue_runs[pos_q] = jobi
             if not first_time:
                 temp = deepcopy(self.queue_running)
                 for p, queue_p in temp.iteritems():
                     if queue_p:
                         if self.queue[p]:
                             temp2 = self.queue[p]
                             if temp2.poll() is not None:
                                 jobj = self.queue_runs[p]
                                 print '\nJob', jobj, 'completed :). Closing ...\r',
                                 self.CloseSubprocess(self.queue[p],
                                                      stdin=True,
                                                      stdout=False)
                                 self.queue[p] = None
                                 self.queue_running[p] = False
                                 if self.queue_showing[p]:
                                     self.queue_showing[p] = False
                                 self.runs_dic_running[jobj] = False
                                 self.runs_dic_completed[jobj] = True
                                 print 'Job', jobj, 'completed :). Closing ... Done\n'
                 time.sleep(3)
             else:
                 first_time = not np.array(self.queue_running.values(),
                                           '?').all()
コード例 #32
0
ファイル: surgical.py プロジェクト: shidai/coast_guard_UWL
    def _clean(self, ar):
        patient = ar.clone()
        patient.pscrunch()
        patient.remove_baseline()

        # Shi Dai, 2019/01/02/, apply weights before forming the template

        # Get weights
        weights = patient.get_weights()
        # Remove profile from dedispersed data
        patient.dedisperse()
        data = patient.get_data().squeeze()

        # apply weights
        data = clean_utils.apply_weights(data, weights)

        #template = np.apply_over_axes(np.sum, data, (0, 1)).squeeze()
        # Shi Dai, 2019/05/16, using 2D template
        nsub, nchan, nbin = data.shape
        temp_T = np.sum(data, 0)
        temp_reshape = temp_T.reshape(
            (26, nchan / 26, nbin))  # hard coded to use 26 sub-bands
        template = np.sum(temp_reshape, axis=1)
        print("Using 2D template with %d channels." % (template.shape[0]))

        clean_utils.remove_profile_inplace(patient, template)
        #np.save('data', data)
        #np.save('template', template)

        # re-set DM to 0
        patient.dededisperse()

        # Get data (select first polarization - recall we already P-scrunched)
        data = patient.get_data()[:, 0, :, :]
        data = clean_utils.apply_weights(data, weights)

        #   # Remove profile from dedispersed data
        #   patient.dedisperse()
        #   data = patient.get_data().squeeze()
        #   template = np.apply_over_axes(np.sum, data, (0, 1)).squeeze()
        #   clean_utils.remove_profile_inplace(patient, template)
        #   # re-set DM to 0
        #   patient.dededisperse()
        #
        #   # Get weights
        #   weights = patient.get_weights()
        #   # Get data (select first polarization - recall we already P-scrunched)
        #   data = patient.get_data()[:,0,:,:]
        #   data = clean_utils.apply_weights(data, weights)

        # Mask profiles where weight is 0
        mask_2d = np.bitwise_not(np.expand_dims(weights, 2).astype(bool))
        mask_3d = mask_2d.repeat(ar.get_nbin(), axis=2)
        data = np.ma.masked_array(data, mask=mask_3d)

        # RFI-ectomy must be recommended by average of tests
        avg_test_results = clean_utils.comprehensive_stats(data, axis=2, \
                                    chanthresh=self.configs.chanthresh, \
                                    subintthresh=self.configs.subintthresh, \
                                    chan_order=self.configs.chan_order, \
                                    chan_breakpoints=self.configs.chan_breakpoints, \
                                    chan_numpieces=self.configs.chan_numpieces, \
                                    subint_order=self.configs.subint_order, \
                                    subint_breakpoints=self.configs.subint_breakpoints, \
                                    subint_numpieces=self.configs.subint_numpieces, \
                                    )
        for (isub, ichan) in np.argwhere(avg_test_results >= 1):
            # Be sure to set weights on the original archive, and
            # not the clone we've been working with.
            integ = ar.get_Integration(int(isub))
            integ.set_weight(int(ichan), 0.0)
コード例 #33
0
ファイル: eeg_cnn_2.py プロジェクト: data-man-34/EEG_CNN-1
# the data, shuffled and split between tran and test sets
data, labels = load_data(filename)
mat = scipy.io.loadmat(subjectsFilename, mat_dtype=True)
subjNumbers = np.squeeze(mat['subjectNum'])     # subject IDs for each trial

# Creating the folds
# kf = StratifiedKFold(np.squeeze(labels), n_folds=ksplit, shuffle=True, random_state=123)
# kf = KFold(labels.shape[0], n_folds=ksplit, shuffle=True, random_state=123)
# fold_pairs = [(tr, ts) for (tr, ts) in kf]

# Leave-Subject-Out cross validation
fold_pairs = []
for i in np.unique(subjNumbers):
    ts = subjNumbers == i
    tr = np.squeeze(np.nonzero(np.bitwise_not(ts)))
    np.random.shuffle(tr)
    fold_pairs.append((tr, np.squeeze(np.nonzero(ts))))


trainScores, testScores = [], []
for fold in fold_pairs:
    (X_train, y_train), (X_test, y_test) = reformatInput(data, labels, fold)
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
コード例 #34
0
    # Read image
    # img = cv2.imread("./20210310_BSA&SERUM/1PM/C/screenshots/18.png")
    img = cv2.imread(imagePath)
    img_np = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # blurred = cv2.GaussianBlur(img, (5, 5), 0)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    ############################ red mask ####################### -- step 1 ----------  not in use
    mask_r0 = cv2.inRange(hsv, redLower, redUpper)

    #################################### threshold minus green  -- step 2 ----------  not in use
    ret, th2 = cv2.threshold(img, 70, 255, cv2.THRESH_BINARY)  # 70 threshold ###################################
    # cv2.imshow("th2",th2)
    hsv_th2 = cv2.cvtColor(th2, cv2.COLOR_BGR2HSV)
    mask_g1 = cv2.inRange(hsv_th2, greenLower, greenUpper)
    mask_g2 = np.bitwise_not(mask_g1)
    res_minus_green = cv2.bitwise_and(hsv_th2, hsv_th2, mask=mask_g2)
    gray = cv2.cvtColor(res_minus_green, cv2.COLOR_BGR2GRAY)
    # cv2.imshow("th2-green",res_minus_green)
    # cv2.imshow("gray",gray)

    mask_3 = np.bitwise_or(gray, mask_r0)
    ret, mask_3 = cv2.threshold(mask_3, 50, 255, cv2.THRESH_BINARY)  # 10 threshold
    # cv2.imshow("colormask+thresholdcolor",mask_3)

    ##### RED + GREEN
    _, contours_thresh_gray_im_with_keypoints0, _ = cv2.findContours(mask_3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    n_small_dot_0 = 0
    n_big_dot_0 = 0
コード例 #35
0
    def match2(self, img1, img2, step):
        #need rgb and bw versions of both images
        Slice = self.step_dict[step]
        base_image = self.to_BW(img1)
        next_image = self.to_BW(img2)
        print('SIFT detect and compute')
        self.mem.append(self.process.memory_info().rss)
        self.Vmem.append(self.process.memory_info().vms)
        base_features, base_descriptions = self.SIFT.detectAndCompute(
            base_image[Slice[0]:Slice[1]], None)
        new_features, new_descriptions = self.SIFT.detectAndCompute(
            next_image[Slice[2]:Slice[3]], None)
        self.mem.append(self.process.memory_info().rss)
        self.Vmem.append(self.process.memory_info().vms)
        print('knn Matching')
        matches = self.matcher.knnMatch(new_descriptions,
                                        trainDescriptors=base_descriptions,
                                        k=2)
        print('number of matches: ', len(matches))

        matches_subset = self.filter_matches(matches)
        distance = self.imageDistance(matches_subset)

        kp1 = []
        kp2 = []

        for match in matches_subset:
            kp1.append(base_features[match.trainIdx])
            kp2.append(new_features[match.queryIdx])

        p1 = np.array([k.pt for k in kp1])
        p2 = np.array([k.pt for k in kp2])

        H, stat = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        inlierRatio = float(np.sum(stat)) / float(len(stat))
        print('inlier ratio: ', inlierRatio)
        H = H / H[2, 2]
        H_inv = linalg.inv(H)
        (min_x, min_y, max_x, max_y) = self.findDimensions(img2, H_inv)

        # Adjust max_x and max_y by base img size
        max_x = max(max_x, base_image.shape[1])
        max_y = max(max_y, base_image.shape[0])

        move_h = np.matrix(np.identity(3), np.float32)

        if (min_x < 0):
            move_h[0, 2] += -min_x
            max_x += -min_x

        if (min_y < 0):
            move_h[1, 2] += -min_y
            max_y += -min_y

        mod_inv_h = move_h * H_inv

        img_w = int(math.ceil(max_x))
        img_h = int(math.ceil(max_y))

        base_h, base_w, base_d = img1.shape
        next_h, next_w, next_d = img2.shape

        img1 = img1[5:(base_h - 5), 5:(base_w - 5)]
        img2 = img2[5:(next_h - 5), 5:(next_w - 5)]
        base_img_warp = cv2.warpPerspective(img1, move_h, (img_w, img_h))
        next_img_warp = cv2.warpPerspective(img2, mod_inv_h, (img_w, img_h))

        enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)

        (ret, data_map) = cv2.threshold(
            cv2.cvtColor(next_img_warp, cv2.COLOR_BGR2GRAY), 0, 255,
            cv2.THRESH_BINARY)

        # add base image
        enlarged_base_img = cv2.add(enlarged_base_img,
                                    base_img_warp,
                                    mask=np.bitwise_not(data_map),
                                    dtype=cv2.CV_8U)

        # add next image
        final_img = cv2.add(enlarged_base_img, next_img_warp, dtype=cv2.CV_8U)

        #Crop black edge
        final_gray = cv2.cvtColor(final_img, cv2.COLOR_BGR2GRAY)
        _, thresh = cv2.threshold(final_gray, 1, 255, cv2.THRESH_BINARY)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)

        max_area = 0
        best_rect = (0, 0, 0, 0)

        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)

            deltaHeight = h - y
            deltaWidth = w - x

            area = deltaHeight * deltaWidth

            if (area > max_area and deltaHeight > 0 and deltaWidth > 0):
                max_area = area
                best_rect = (x, y, w, h)

        if (max_area > 0):
            final_img_crop = final_img[best_rect[1]:best_rect[1] +
                                       best_rect[3],
                                       best_rect[0]:best_rect[0] +
                                       best_rect[2]]

            final_img = final_img_crop
        return final_img, img_w, x, w
コード例 #36
0
    def evolve_motion(self,
                      t_span,
                      freeze_axis=[False, False, False],
                      random_recoil=False,
                      random_force=False,
                      max_scatter_probability=0.1,
                      progress_bar=False,
                      record_force=False,
                      **kwargs):
        """
        Evolve the populations :math:`N` and the motion of the atom in time.

        This function evolves the rate equations, moving the atom through space,
        given the instantaneous force, for some period of time.

        Parameters
        ----------
        t_span : list or array_like
            A two element list or array that specify the initial and final time
            of integration.
        freeze_axis : list of boolean
            Freeze atomic motion along the specified axis.
            Default: [False, False, False]
        random_recoil : boolean
            Allow the atom to randomly recoil from scattering events.
            Default: False
        random_force : boolean
            Rather than calculating the force using the rateeq.force() method,
            use the calculated scattering rates from each of the laser beam
            (combined with the instantaneous populations) to randomly add photon
            absorption events that cause the atom to recoil randomly from the
            laser beam(s).
            Default: False
        max_scatter_probability : float
            When undergoing random recoils and/or force, this sets the maximum
            time step such that the maximum scattering probability is less than
            or equal to this number during the next time step.  Default: 0.1
        progress_bar : boolean
            If true, show a progress bar as the calculation proceeds.
            Default: False
        record_force : boolean
            If true, record the instantaneous force and store in the solution.
            Default: False
        **kwargs :
            Additional keyword arguments get passed to solve_ivp_random, which
            is what actually does the integration.

        Returns
        -------
        sol : OdeSolution
            Bunch object that contains the following fields:

                * t: integration times found by solve_ivp
                * N: population vs. time
                * v: atomic velocity
                * r: atomic position

            It contains other important elements, which can be discerned from
            scipy's solve_ivp documentation.
        """
        free_axes = np.bitwise_not(freeze_axis)

        if progress_bar:
            progress = progressBar()

        if record_force:
            ts = []
            Fs = []

        def motion(t, y):
            N = y[:-6]
            v = y[-6:-3]
            r = y[-3:]

            Rev, Rijl = self.construct_evolution_matrix(r, v, t)
            if not random_force:
                if record_force:
                    F = self.force(r, t, N, return_details=True)

                    ts.append(t)
                    Fs.append(F)

                    F = F[0]
                else:
                    F = self.force(r, t, N, return_details=False)

                dydt = np.concatenate(
                    (Rev @ N, F * free_axes / self.hamiltonian.mass +
                     self.constant_accel, v))
            else:
                dydt = np.concatenate((Rev @ N, self.constant_accel, v))

            if np.any(np.isnan(dydt)):
                raise ValueError('Enountered a NaN!')

            if progress_bar:
                progress.update(t / t_span[-1])

            return dydt

        def random_force_func(t, y, dt):
            total_P = 0
            num_of_scatters = 0

            # Go over all available keys:
            for key in self.laserBeams:
                # Extract the pumping rate from each laser:
                Rl = np.sum(self.Rijl[key], axis=(1, 2))

                # Calculate the probability to scatter a photon from the laser:
                P = Rl * dt

                # Roll the dice N times, where $N=\sum(lasers)
                dice = np.random.rand(len(P))

                # Give them kicks!
                for ii in np.arange(len(Rl))[dice < P]:
                    num_of_scatters += 1
                    y[-6:-3] += self.laserBeams[key].beam_vector[ii].kvec(y[-3:], t)/\
                                self.hamiltonian.mass
                    # Can branch to a differe, lower state, but let's ignore that
                    # for the moment.
                    y[-6:-3] += self.recoil_velocity[key] * (
                        random_vector(free_axes))

                total_P += np.sum(P)

            # Calculate a new maximum dt to make sure we evolve while not
            # exceeding dt max:
            new_dt_max = (max_scatter_probability / total_P) * dt

            return (num_of_scatters, new_dt_max)

        def random_recoil_func(t, y, dt):
            num_of_scatters = 0
            total_P = 0.

            # Go over each block in the Hamiltonian and compute the decay:
            for key in self.decay_rates:
                P = dt * self.decay_rates[key] * y[self.decay_N_indices[key]]

                # Roll the dice N times, where $N=\sum(n_i)
                dice = np.random.rand(len(P))

                # For any random number that is lower than P_i, add a
                # recoil velocity.
                for ii in range(np.sum(dice < P)):
                    num_of_scatters += 1
                    y[-6:-3] += self.recoil_velocity[key] * (
                        random_vector(free_axes) + random_vector(free_axes))

                # Save the total probability of a scatter:
                total_P += np.sum(P)

            # Calculate a new maximum dt to make sure we evolve while not
            # exceeding dt max:
            new_dt_max = (max_scatter_probability / total_P) * dt

            return (num_of_scatters, new_dt_max)

        y0 = np.concatenate((self.N0, self.v0, self.r0))
        if random_force:
            self.sol = solve_ivp_random(
                motion,
                random_force_func,
                t_span,
                y0,
                initial_max_step=max_scatter_probability,
                **kwargs)
        elif random_recoil:
            self.sol = solve_ivp_random(
                motion,
                random_recoil_func,
                t_span,
                y0,
                initial_max_step=max_scatter_probability,
                **kwargs)
        else:
            self.sol = solve_ivp(motion, t_span, y0, **kwargs)

        if progress_bar:
            # Just in case the solve_ivp_random terminated due to an event.
            progress.update(1.)

        # Rearrange the solution:
        self.sol.N = self.sol.y[:-6]
        self.sol.v = self.sol.y[-6:-3]
        self.sol.r = self.sol.y[-3:]

        if record_force:
            f = interp1d(ts[:-1], np.array([f[0] for f in Fs[:-1]]).T)
            self.sol.F = f(self.sol.t)

            f = interp1d(ts[:-1], np.array([f[2] for f in Fs[:-1]]).T)
            self.sol.fmag = f(self.sol.t)

            self.sol.f = {}
            for key in Fs[0][1]:
                f = interp1d(ts[:-1], np.array([f[1][key] for f in Fs[:-1]]).T)
                self.sol.f[key] = f(self.sol.t)
                self.sol.f[key] = np.swapaxes(self.sol.f[key], 0, 1)

        del self.sol.y

        return self.sol
コード例 #37
0
ファイル: training_wheel.py プロジェクト: alejandropan/ibllib
def get_wheel_data(session_path, bp_data=None, save=False):
    """
    Get wheel data from raw files and converts positions into centimeters and
    timestamps into seconds.
    **Optional:** saves _ibl_wheel.times.npy and _ibl_wheel.position.npy

    Times:
    Gets Rotary Encoder timestamps (ms) for each position and converts to times.

    Uses time_converter to extract and convert timstamps (ms) to times (s).

    Positions:
    Positions are in (cm) of RE perimeter relative to 0. The 0 resets every trial.

    cmtick = radius (cm) * 2 * pi / n_ticks
    cmtick = 3.1 * 2 * np.pi / 1024

    :param session_path: absolute path of session folder
    :type session_path: str
    :param data: dictionary containing the contents pybppod jsonable file read with raw.load_data
    :type data: dict, optional
    :param save: wether to save the corresponding alf file
                 to the alf folder, defaults to False
    :type save: bool, optional
    :return: Numpy structured array.
    :rtype: numpy.ndarray
    """
    ##
    status = 0
    if not bp_data:
        bp_data = raw.load_data(session_path)
    df = raw.load_encoder_positions(session_path)
    if df is None:
        logger_.error('No wheel data for ' + str(session_path))
        return None
    data = structarr(['re_ts', 're_pos', 'bns_ts'],
                     shape=(df.shape[0], ),
                     formats=['f8', 'f8', np.object])
    data['re_ts'] = df.re_ts.values
    data['re_pos'] = df.re_pos.values
    data['bns_ts'] = df.bns_ts.values
    data['re_pos'] = data[
        're_pos'] / 1024 * 2 * np.pi  # convert positions to radians
    trial_starts = get_trial_start_times(session_path)
    # need a flag if the data resolution is 1ms due to the old version of rotary encoder firmware
    if np.all(np.mod(data['re_ts'], 1e3) == 0):
        status = 1
    data['re_ts'] = data['re_ts'] / 1e6  # convert ts to seconds
    # get the converter function to translate re_ts into behavior times
    convtime = time_converter_session(session_path, kind='re2b')
    data['re_ts'] = convtime(data['re_ts'])

    def get_reset_trace_compensation_with_state_machine_times():
        # this is the preferred way of getting resets using the state machine time information
        # it will not always work depending on firmware versions, new bugs
        iwarn = []
        ns = len(data['re_pos'])
        tr_dc = np.zeros_like(data['re_pos'])  # trial dc component
        for bp_dat in bp_data:
            restarts = np.sort(
                np.array(bp_dat['behavior_data']['States timestamps']
                         ['reset_rotary_encoder'] + bp_dat['behavior_data']
                         ['States timestamps']['reset2_rotary_encoder'])[:, 0])
            ind = np.unique(
                np.searchsorted(data['re_ts'], restarts, side='left') - 1)
            # the rotary encoder doesn't always reset right away, and the reset sample given the
            # timestamp can be ambiguous: look for zeros
            for i in np.where(data['re_pos'][ind] != 0)[0]:
                # handle boundary effects
                if ind[i] > ns - 2:
                    continue
                # it happens quite often that we have to lock in to next sample to find the reset
                if data['re_pos'][ind[i] + 1] == 0:
                    ind[i] = ind[i] + 1
                    continue
                # also case where the rotary doesn't reset to 0, but erratically to -1/+1
                if data['re_pos'][ind[i]] <= (1 / 1024 * 2 * np.pi):
                    ind[i] = ind[i] + 1
                    continue
                # compounded with the fact that the reset may have happened at next sample.
                if np.abs(
                        data['re_pos'][ind[i] + 1]) <= (1 / 1024 * 2 * np.pi):
                    ind[i] = ind[i] + 1
                    continue
                # sometimes it is also the last trial that has this behaviour
                if (bp_data[-1] is bp_dat) or (bp_data[0] is bp_dat):
                    continue
                iwarn.append(ind[i])
                # at which point we are running out of possible bugs and calling it
            tr_dc[ind] = data['re_pos'][ind - 1]
        if iwarn:  # if a warning flag was caught in the loop throw a single warning
            logger_.warning(
                'Rotary encoder reset events discrepancy at following indices: '
                + str(iwarn) + ' times: ' + str(data['re_ts'][iwarn]))
        # exit status 0 is fine, 1 something went wrong
        return tr_dc, len(iwarn) != 0

    # attempt to get the resets properly unless the unit is ms which means precision is
    # not good enough to match SM times to wheel samples time
    if not status:
        tr_dc, status = get_reset_trace_compensation_with_state_machine_times()

    # if something was wrong or went wrong agnostic way of getting resets: just get zeros values
    if status:
        tr_dc = np.zeros_like(data['re_pos'])  # trial dc component
        i0 = np.where(data['re_pos'] == 0)[0]
        tr_dc[i0] = data['re_pos'][i0 - 1]
    # even if things went ok, rotary encoder may not log the whole session. Need to fix outside
    else:
        i0 = np.where(
            np.bitwise_and(
                np.bitwise_or(data['re_ts'] >= trial_starts[-1],
                              data['re_ts'] <= trial_starts[0]),
                data['re_pos'] == 0))[0]
    # make sure the bounds are not included in the current list
    i0 = np.delete(
        i0, np.where(np.bitwise_or(i0 >= len(data['re_pos']) - 1, i0 == 0)))
    # a 0 sample is not a reset if 2 conditions are met:
    # 1/2 no inflexion (continuous derivative)
    c1 = np.abs(
        np.sign(data['re_pos'][i0 + 1] - data['re_pos'][i0]) -
        np.sign(data['re_pos'][i0] - data['re_pos'][i0 - 1])) == 2
    # 2/2 needs to be below threshold
    c2 = np.abs(
        (data['re_pos'][i0] - data['re_pos'][i0 - 1]) /
        (EPS +
         (data['re_ts'][i0] - data['re_ts'][i0 - 1]))) < THRESHOLD_RAD_PER_SEC
    # apply reset to points identified as resets
    i0 = i0[np.where(np.bitwise_not(np.bitwise_and(c1, c2)))]
    tr_dc[i0] = data['re_pos'][i0 - 1]

    # unwrap the rotation (in radians) and then add the DC component from restarts
    data['re_pos'] = np.unwrap(data['re_pos']) + np.cumsum(tr_dc)

    # Also forgot to mention that time stamps may be repeated or very close to one another.
    # Find them as they will induce large jitters on the velocity function or errors in
    # attempts of interpolation
    rep_idx = np.where(
        np.diff(data['re_ts']) <= THRESHOLD_CONSECUTIVE_SAMPLES)[0]
    # Change the value of the repeated position
    data['re_pos'][rep_idx] = (data['re_pos'][rep_idx] +
                               data['re_pos'][rep_idx + 1]) / 2
    data['re_ts'][rep_idx] = (data['re_ts'][rep_idx] +
                              data['re_ts'][rep_idx + 1]) / 2
    # Now remove the repeat times that are rep_idx + 1
    data = np.delete(data, rep_idx + 1)

    # convert to cm
    data['re_pos'] = data['re_pos'] * WHEEL_RADIUS_CM

    # #  DEBUG PLOTS START HERE ########################
    # # if you are experiencing a new bug here is some plot tools
    # # do not forget to increment the wasted dev hours counter below
    # WASTED_HOURS_ON_THIS_WHEEL_FORMAT = 16
    #
    # import matplotlib.pyplot as plt
    # fig = plt.figure()
    # ax = plt.axes()
    # tstart = get_trial_start_times(session_path)
    # tts = np.c_[tstart, tstart, tstart + np.nan].flatten()
    # vts = np.c_[tstart * 0 + 100, tstart * 0 - 100, tstart + np.nan].flatten()
    # ax.plot(tts, vts, label='Trial starts')
    # ax.plot(convtime(df.re_ts.values/1e6), df.re_pos.values / 1024 * 2 * np.pi,
    #         '.-', label='Raw data')
    # i0 = np.where(df.re_pos.values == 0)
    # ax.plot(convtime(df.re_ts.values[i0] / 1e6), df.re_pos.values[i0] / 1024 * 2 * np.pi,
    #         'r*', label='Raw data zero samples')
    # ax.plot(convtime(df.re_ts.values / 1e6) , tr_dc, label='reset compensation')
    # ax.set_xlabel('Bpod Time')
    # ax.set_ylabel('radians')
    # #
    # restarts = np.array(bp_data[10]['behavior_data']['States timestamps']\
    #                         ['reset_rotary_encoder']).flatten()
    # # x__ = np.c_[restarts, restarts, restarts + np.nan].flatten()
    # # y__ = np.c_[restarts * 0 + 1, restarts * 0 - 1, restarts+ np.nan].flatten()
    # #
    # # ax.plot(x__, y__, 'k', label='Restarts')
    #
    # ax.plot(data['re_ts'], data['re_pos'] / WHEEL_RADIUS_CM, '.-', label='Output Trace')
    # ax.legend()
    # # plt.hist(np.diff(data['re_ts']), 400, range=[0, 0.01])
    # #  DEBUG PLOTS STOP HERE ########################

    check_alf_folder(session_path)
    if raw.save_bool(save, '_ibl_wheel.timestamps.npy'):
        tpath = os.path.join(session_path, 'alf', '_ibl_wheel.timestamps.npy')
        np.save(tpath, data['re_ts'])
    if raw.save_bool(save, '_ibl_wheel.position.npy'):
        ppath = os.path.join(session_path, 'alf', '_ibl_wheel.position.npy')
        np.save(ppath, data['re_pos'])
    return data
コード例 #38
0
ファイル: main.py プロジェクト: zhaozz-lab/PupilTracker
predictor = dlib.shape_predictor(
    os.path.join(data_path, 'shape_predictor_68_face_landmarks.dat'))

model = load_model(os.path.join(data_path, 'model.h5'))
model.load_weights(os.path.join(data_path, 'weights.h5'))

user32 = ctypes.windll.user32
screenX, screenY = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)

window_name = "Pupils Tracker"
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                      cv2.WINDOW_FULLSCREEN)

background = np.zeros((screenY, screenX), np.uint8)
background = np.bitwise_not(background)
background = cv2.cvtColor(background, cv2.COLOR_GRAY2BGR)
bg = background.copy()
cv2.imshow(window_name, bg)
cap = cv2.VideoCapture(1)

cursor = False
calibration = 8
pupilPositions = []
sightFocus = [0, 0, 0, 0, 0, 0]

bgz = cv2.imread(os.path.join(data_path, 'bg.png'))


#Increase brightness of image
def increase_brightness(img, value):
コード例 #39
0
    def compare(self, name, hnp, npdata, hpy, pydata):
        import numpy

        npdata2 = npdata.copy()

        hnp2 = hnp.copy()
        hnp3 = hnp.copy()
        hpy2 = hpy.copy()
        hpy3 = hpy.copy()

        startTime = time.time()
        hnp.fill.numpy(npdata)
        numpyTime = time.time() - startTime

        if pydata.dtype != numpy.unicode_:
            for key in npdata:
                diff = (npdata[key] != npdata2[key]) & numpy.bitwise_not(
                    numpy.isnan(npdata[key])) & numpy.bitwise_not(
                        numpy.isnan(npdata2[key]))
                if numpy.any(diff):
                    raise AssertionError(
                        "npdata has been modified:\n{0}\n{1}\n{2}\n{3} vs {4}".
                        format(npdata[key], npdata2[key], numpy.nonzero(diff),
                               npdata[key][numpy.nonzero(diff)[0][0]],
                               npdata2[key][numpy.nonzero(diff)[0][0]]))

        hnp2.fill.numpy(npdata)
        hnp3.fill.numpy(npdata)
        hnp3.fill.numpy(npdata)
        assert (hnp + hnp2) == hnp3
        assert (hnp2 + hnp) == hnp3
        assert (hnp + hnp.zero()) == hnp2
        assert (hnp.zero() + hnp) == hnp2

        startTime = time.time()
        for d in pydata:
            if isinstance(d, numpy.unicode_):
                d = str(d)
            else:
                d = float(d)
            hpy.fill(d)
        pyTime = time.time() - startTime

        for h in [hpy2, hpy3, hpy3]:
            for d in pydata:
                if isinstance(d, numpy.unicode_):
                    d = str(d)
                else:
                    d = float(d)
                h.fill(d)

        assert (hpy + hpy) == hpy3
        assert (hpy + hpy2) == hpy3
        assert (hpy2 + hpy) == hpy3
        assert (hpy + hpy.zero()) == hpy2
        assert (hpy.zero() + hpy) == hpy2

        hnpj = json.dumps(hnp.toJson())
        hpyj = json.dumps(hpy.toJson())

        if Factory.fromJson(hnp.toJson()) != Factory.fromJson(hpy.toJson()):
            raise AssertionError("\n numpy: {0}\npython: {1}".format(
                hnpj, hpyj))
        else:
            sys.stderr.write(
                "{0:45s} | numpy: {1:.3f}ms python: {2:.3f}ms = {3:g}X speedup\n"
                .format(name, numpyTime * 1000, pyTime * 1000,
                        self.twosigfigs(pyTime / numpyTime)))

        assert Factory.fromJson((hnp + hnp2).toJson()) == Factory.fromJson(
            (hpy + hpy2).toJson())
        assert Factory.fromJson(hnp3.toJson()) == Factory.fromJson(
            hpy3.toJson())
コード例 #40
0
ファイル: common.py プロジェクト: goto40/mdsd
def get_imask(thetype, bfrom, bto):
    return thetype(np.bitwise_not(get_mask(thetype, bfrom, bto)))
コード例 #41
0
ファイル: coaddition.py プロジェクト: sbailey/desispec
def coadd_fibermap(fibermap):

    log = get_logger()
    log.debug("'coadding' fibermap")

    targets = np.unique(fibermap["TARGETID"])
    ntarget = targets.size

    jj = np.zeros(ntarget, dtype=int)
    for i, tid in enumerate(targets):
        jj[i] = np.where(fibermap["TARGETID"] == tid)[0][0]
    tfmap = fibermap[jj]

    #- initialize NUMEXP=-1 to check that they all got filled later
    tfmap['COADD_NUMEXP'] = np.zeros(len(tfmap), dtype=np.int16) - 1

    # smarter values for some columns
    for k in ['DELTA_X', 'DELTA_Y']:
        if k in fibermap.colnames:
            tfmap.rename_column(k, 'MEAN_' + k)
            xx = Column(np.zeros(ntarget))
            tfmap.add_column(xx, name='RMS_' + k)
    for k in ['NIGHT', 'EXPID', 'TILEID', 'SPECTROID', 'FIBER']:
        if k in fibermap.colnames:
            xx = Column(np.arange(ntarget))
            tfmap.add_column(xx, name='FIRST_' + k)
            xx = Column(np.arange(ntarget))
            tfmap.add_column(xx, name='LAST_' + k)
            xx = Column(np.arange(ntarget))
            tfmap.add_column(xx, name='NUM_' + k)

    for i, tid in enumerate(targets):
        jj = fibermap["TARGETID"] == tid

        #- coadded FIBERSTATUS = bitwise AND of input FIBERSTATUS
        tfmap['FIBERSTATUS'][i] = np.bitwise_and.reduce(
            fibermap['FIBERSTATUS'][jj])

        #- Only FIBERSTATUS=0 were included in the coadd
        fiberstatus_nonamp_bits = get_all_nonamp_fiberbitmask_val()
        fiberstatus_amp_bits = get_justamps_fiberbitmask()
        targ_fibstatuses = fibermap['FIBERSTATUS'][jj]
        nonamp_fiberstatus_flagged = (
            (targ_fibstatuses & fiberstatus_nonamp_bits) > 0)
        allamps_flagged = ((targ_fibstatuses
                            & fiberstatus_amp_bits) == fiberstatus_amp_bits)
        good_coadds = np.bitwise_not(nonamp_fiberstatus_flagged
                                     | allamps_flagged)
        tfmap['COADD_NUMEXP'][i] = np.count_nonzero(good_coadds)

        for k in ['DELTA_X', 'DELTA_Y']:
            if k in fibermap.colnames:
                vals = fibermap[k][jj]
                tfmap['MEAN_' + k][i] = np.mean(vals)
                tfmap['RMS_' + k][i] = np.sqrt(np.mean(
                    vals**2))  # inc. mean offset, not same as std

        for k in ['NIGHT', 'EXPID', 'TILEID', 'SPECTROID', 'FIBER']:
            if k in fibermap.colnames:
                vals = fibermap[k][jj]
                tfmap['FIRST_' + k][i] = np.min(vals)
                tfmap['LAST_' + k][i] = np.max(vals)
                tfmap['NUM_' + k][i] = np.unique(vals).size
        for k in ['FIBERASSIGN_X', 'FIBERASSIGN_Y', 'FIBER_RA', 'FIBER_DEC']:
            if k in fibermap.colnames:
                tfmap[k][i] = np.mean(fibermap[k][jj])
        for k in [
                'FIBER_RA_IVAR', 'FIBER_DEC_IVAR', 'DELTA_X_IVAR',
                'DELTA_Y_IVAR'
        ]:
            if k in fibermap.colnames:
                tfmap[k][i] = np.sum(fibermap[k][jj])

    return tfmap
コード例 #42
0
ファイル: binlog2r.py プロジェクト: dawe/scatACC
    	
    binned_data.var_names = [f"{x[0]}:{x[1]}-{x[2]}" for x in binner.values]
    nz = np.sum(binned_data.X != 0, axis=1)
    binned_data.obs['dropout'] = (binned_data.X.shape[1] - nz) / binned_data.X.shape[1] 
    binned_data.obs['n_reads'] = binned_data.X.sum(axis=1)

    binned_data.var['gc_content'] = binner['gc_content'].values
    binned_data.var['mapability'] = binner['mapability'].values

    binned_data.write(f"{options.prefix}_bin_counts.h5ad")
			
    binned_data.var.fillna(0, inplace=True)

    binned_data.X = np.log2( binned_data.X / np.mean(adata.X, axis=1)[:, None])

    binned_data.X[np.bitwise_not(np.isfinite(binned_data.X))] = -np.ptp(binned_data.X[np.isfinite(binned_data.X)])/2

    binned_data = binned_data.copy().T

    sc.pp.regress_out(binned_data, ['gc_content', 'mapability'])

    sc.pp.scale(test, max_value=4)

    binned_data = binned_data.copy().T
    binned_data.write(f"{options.prefix}_log2r.h5ad")


if __name__ == '__main__':
	main()

コード例 #43
0
def _decode_fixed_length(file_bytes, fields):
    """Decode a fixed length APID.
    
    Parameters
    ----------
    file_bytes : array 
       A NumPy array of uint8 type, holding the bytes of the file to decode.
    fields : list of ccsdspy.interface.PacketField
       A list of fields, including the secondary header but excluding the
       primary header.

    Returns
    -------
    Ordered dictionary mapping field names to NumPy arrays.
    """
    # Setup a dictionary mapping a bit offset to each field. It is assumed
    # that the `fields` array contains entries for the secondary header.
    packet_nbytes = file_bytes[4] * 256 + file_bytes[5] + 7
    body_nbytes = sum(field._bit_length for field in fields) // 8
    counter = (packet_nbytes - body_nbytes) * 8

    bit_offset = {}

    for i, field in enumerate(fields):
        if i == 0 and field._bit_offset is not None:
            # case: using bit_offset to fix the start position
            bit_offset[field._name] = field._bit_offset
            counter = field._bit_offset + field._bit_length
        elif field._bit_offset is None:
            # case: floating start position such that packet def fills to
            # to end of packet. What's missing is assumed to be header at the beginning.
            bit_offset[field._name] = counter
            counter += field._bit_length
        elif field._bit_offset < counter:
            # case: bit_offset specifying to backtrack. This condition
            # seems odd and unlikely. Eg. one or more bits of a packet overlap?
            bit_offset[field._name] = field._bit_offset
            # don't update counter unless the the overlap goes past counter
            counter = max(field._bit_offset + field._bit_length, counter)
        elif field._bit_offset >= counter:
            # case: otherwise, bit_offset is ahead of counter and we're skipping
            # definition of 0 or more bits.
            bit_offset[field._name] = field._bit_offset
            counter = field._bit_offset + field._bit_length
        elif field._bit_length - field._tail_offset <= counter:
            #case: counter has reached end of data field
            bit_offset[field._name] = field._bit_offset
            counter = field._bit_length - field._tail_offset
        else:
            raise RuntimeError(
                ("Unexpected case: could not compare"
                 " bit_offset {} with counter {} for field {}").format(
                     field._bit_offset, counter, field._name))

    if all(field._bit_offset is None for field in fields):
        assert counter == packet_nbytes * 8, \
            'Field definition != packet length'.format(n=counter-packet_nbytes*8)
    elif counter > packet_nbytes * 8:
        raise RuntimeError(
            ("Packet definition larger than packet length"
             " by {} bits").format(counter - (packet_nbytes * 8)))

    # Setup metadata for each field, consiting of where to look for the field in
    # the file and how to parse it.
    FieldMeta = namedtuple(
        'Meta', ['nbytes_file', 'start_byte_file', 'nbytes_final', 'np_dtype'])
    field_meta = {}

    for field in fields:
        nbytes_file = np.ceil(field._bit_length / 8.).astype(int)

        if (bit_offset[field._name] % 8
                and bit_offset[field._name] % 8 + field._bit_length > 8):
            nbytes_file += 1

        nbytes_final = {3: 4, 5: 8, 6: 8, 7: 8}.get(nbytes_file, nbytes_file)
        start_byte_file = bit_offset[field._name] // 8

        # byte_order_symbol is only used to control float types here.
        #  - uint and int byte order are handled with byteswap later
        #  - fill is independent of byte order (all 1's)
        #  - byte order is not applicable to str types
        byte_order_symbol = "<" if field._byte_order == "little" else ">"
        np_dtype = {
            'uint': '>u%d' % nbytes_final,
            'int': '>i%d' % nbytes_final,
            'fill': '>u%d' % nbytes_final,
            'float': '%sf%d' % (byte_order_symbol, nbytes_final),
            'str': 'S%d' % nbytes_final,
        }[field._data_type]

        field_meta[field] = FieldMeta(nbytes_file, start_byte_file,
                                      nbytes_final, np_dtype)

    # Read the file and calculate length of packet and number of packets in the
    # file. Trim extra bytes that may have occurred by a break in the downlink
    # while a packet was beign transferred.
    extra_bytes = file_bytes.size % packet_nbytes

    if extra_bytes > 0:
        file_bytes = file_bytes[:-extra_bytes]

    packet_count = file_bytes.size // packet_nbytes

    # Create byte arrays for each field. At the end of this method they are left
    # as the numpy uint8 type.
    field_bytes = {}

    for field in fields:
        meta = field_meta[field]
        arr = np.zeros(packet_count * meta.nbytes_final, 'u1')
        xbytes = meta.nbytes_final - meta.nbytes_file

        for i in range(xbytes, meta.nbytes_final):
            arr[i::meta.nbytes_final] = (file_bytes[meta.start_byte_file + i -
                                                    xbytes::packet_nbytes])
            field_bytes[field] = arr

    # Switch dtype of byte arrays to the final dtype, and apply masks and shifts
    # to interpret the correct bits.
    field_arrays = OrderedDict()

    for field in fields:
        meta = field_meta[field]
        arr = field_bytes[field]
        arr.dtype = meta.np_dtype

        if field._data_type in ('int', 'uint'):
            xbytes = meta.nbytes_final - meta.nbytes_file

            bitmask_left = (bit_offset[field._name] + 8 * xbytes -
                            8 * meta.start_byte_file)

            bitmask_right = (8 * meta.nbytes_final - bitmask_left -
                             field._bit_length)

            bitmask_left, bitmask_right = (np.array(
                [bitmask_left, bitmask_right]).astype(meta.np_dtype))

            bitmask = np.zeros(arr.shape, meta.np_dtype)
            bitmask |= (1 << int(8 * meta.nbytes_final - bitmask_left)) - 1
            tmp = np.left_shift([1], bitmask_right)
            bitmask &= np.bitwise_not(tmp[0] - 1).astype(meta.np_dtype)

            arr &= bitmask
            arr >>= bitmask_right

            if field._byte_order == 'little':
                arr.byteswap(inplace=True)

        field_arrays[field._name] = arr

    return field_arrays