Exemplo n.º 1
0
    def easiest_sqi_interval(self):
        """
        Easiest sequential-quadratic-interpolation NIP, if there
        is any and the estimate is better than xmin - epsilon.

        Note that to use NIP index as interval index, you should check
        if the qxmin[i] is smaller or larger than points[i+1] - the
        NIP covers three points, not just two!
        """
        # Do not take guesses that are too near one of the interval
        # pair boundaries
        if self.axis is None:
            qxmin_suitable = np.logical_and(self.qxmin - self.points > self.tolx,
                                            np.roll(self.points, -2) - self.qxmin > self.tolx)
        else:
            qxmin_suitable = np.logical_and(self.qxmin[:,self.axis] - self.points[:,self.axis] > self.tolx,
                                            np.roll(self.points[:,self.axis], -2) - self.qxmin[:,self.axis] > self.tolx)
        iqfmin = filter(lambda (i, qfmin): qxmin_suitable[i], enumerate(self.qfmin))
        if len(iqfmin) == 0:
            # print('stop split')
            return None  # We cannot split further
        i, qfmin = min(iqfmin, key=itemgetter(1))
        if qfmin > self.fmin - self.epsilon and (self.force_Brent == 0 or self.itercnt % self.force_Brent > 0):
            # print('%s > %s' % (qfmin, self.fmin - self.epsilon))
            return None  # Even the best estimate is too high
        return i
def generate_RI_text_fast(N, RI_letters, cluster_sz, ordered, text_name, alph=alphabet):
	text_vector = np.zeros((1, N))
	text = utils.load_text(text_name)
	cluster2 = ''
	vector = np.ones((1,N))
	for char_num in xrange(len(text)):		
		cluster = cluster + text[char_num]
		if len(cluster) < cluster_sz:
			continue
		elif len(cluster) > cluster_sz:
			prev_letter = cluster[0]
			prev_letter_idx = alphabet.find(letter)
			inverse = np.roll(RI_letters[prev_letter_idx,:], cluster_sz-1)
			vector = np.multiply(vector, inverse)
			vector = np.roll(vector, 1)
			letter = text[char_num]
			letter_idx = alphabet.find(letter)
			vector = np.multiply(vector, RI_letters[letter_idx,:])
			cluster = cluster[1:]
		else: # (len(cluster) == cluster_size), happens once
			letters = list(cluster)
			for letter in letters:
				vector = np.roll(vector,1)
				letter_idx = alphabet.find(letter)
				vector = np.multiply(vector, RI_letters[letter_idx,:])
		text_vector += vector
	return text_vector
Exemplo n.º 3
0
    def build_loss(self):
        r"""Implements the N-dim version of function
        $$TV^{\beta}(x) = \sum_{whc} \left ( \left ( x(h, w+1, c) - x(h, w, c) \right )^{2} +
        \left ( x(h+1, w, c) - x(h, w, c) \right )^{2} \right )^{\frac{\beta}{2}}$$
        to return total variation for all images in the batch.
        """
        image_dims = K.ndim(self.img) - 2

        # Constructing slice [1:] + [:-1] * (image_dims - 1) and [:-1] * (image_dims)
        start_slice = [slice(1, None, None)] + [slice(None, -1, None) for _ in range(image_dims - 1)]
        end_slice = [slice(None, -1, None) for _ in range(image_dims)]
        samples_channels_slice = [slice(None, None, None), slice(None, None, None)]

        # Compute pixel diffs by rolling slices to the right per image dim.
        tv = None
        for i in range(image_dims):
            ss = tuple(samples_channels_slice + start_slice)
            es = tuple(samples_channels_slice + end_slice)
            diff_square = K.square(self.img[utils.slicer[ss]] - self.img[utils.slicer[es]])
            tv = diff_square if tv is None else tv + diff_square

            # Roll over to next image dim
            start_slice = np.roll(start_slice, 1).tolist()
            end_slice = np.roll(end_slice, 1).tolist()

        tv = K.sum(K.pow(tv, self.beta / 2.))
        return normalize(self.img, tv)
Exemplo n.º 4
0
def plot_filter(filter_type, alpha, ts, sc, overlap):
    import matplotlib.pyplot as plt
    time_taps = gfdm_filter_taps(filter_type, alpha, ts, sc)
    freq_taps = gfdm_freq_taps(time_taps)
    freq_taps_sparse = gfdm_freq_taps_sparse(freq_taps, ts, overlap)

    fig = plt.figure()
    tp = fig.add_subplot('211')
    t = np.arange(0, ts, 1. / sc)
    time_taps = np.fft.ifftshift(time_taps)
    plt.plot(t, np.abs(time_taps))
    plt.plot(t, np.abs(np.fft.ifftshift(freq_tapered_raised_cosine(t - 1. * ts / 2., alpha))))
    plt.plot(t, np.roll(np.abs(time_taps), sc))
    plt.xlim((0, ts))
    plt.xlabel('timeslot')
    plt.grid()

    fp = fig.add_subplot('212')
    f = np.arange(0, sc, 1. / ts)
    plt.plot(f, np.abs(freq_taps))
    plt.plot(f, np.abs(np.fft.fft(freq_tapered_raised_cosine(t - 1. * ts / 2., alpha))))
    plt.plot(f, np.abs(np.concatenate((freq_taps_sparse[0:len(freq_taps_sparse) / 2],
                                       np.zeros(sc * ts - len(freq_taps_sparse)),
                                       freq_taps_sparse[len(freq_taps_sparse) / 2:]))), linestyle='--')

    plt.plot(f, np.roll(np.abs(freq_taps), ts * (sc // 2)))
    plt.plot(f, np.roll(np.abs(freq_taps), ts * (sc // 2 + 1)))
    plt.xlim((0, sc))
    plt.xlabel('subcarrier')
    plt.grid()

    plt.gcf().suptitle("GFDM filter: type='{}' with M={}, K={}, L={}".format(filter_type.upper(), ts, sc, overlap), fontsize=16)
    plt.show()
Exemplo n.º 5
0
 def _make_segment(self,x,y,threshold=None):
     if threshold is None:
         threshold = self._segment_threshold
     x,y=np.atleast_1d(x),np.atleast_1d(y)
     d2 = np.sqrt((np.roll(x,1)-x)**2+(np.roll(y,1)-y)**2)
     w=np.where(d2 > threshold)[0]
     #w=w[w!=0]
     xx=[]
     yy=[]
     if len(w) == 1:
         x=np.roll(x,-w[0])
         y=np.roll(y,-w[0])
         xx.append(x)
         yy.append(y)
     elif len(w) >= 2:
         xx.append(x[0:w[0]])
         yy.append(y[0:w[0]])
         for i in xrange(len(w)-1):
             xx.append(x[w[i]:w[i+1]])
             yy.append(y[w[i]:w[i+1]])
         xx.append(x[w[-1]:])
         yy.append(y[w[-1]:])
     else:
         xx.append(x)
         yy.append(y)
     return xx,yy
Exemplo n.º 6
0
    def imageCoCenter(self, inst, algo):

        x1, y1, tmp = getCenterAndR_ef(self.image)
        if algo.debugLevel >= 3:
            print('imageCoCenter: (x1,y1)=(%8.2f,%8.2f)\n' % (x1, y1))

        stampCenterx1 = inst.sensorSamples / 2. + 0.5
        stampCentery1 = inst.sensorSamples / 2. + 0.5
        radialShift = 3.5 * algo.upReso * \
            (inst.offset / 1e-3) * (10e-6 / inst.pixelSize)

        radialShift = radialShift * self.fldr / 1.75
        if (self.fldr > 1.75):
            radialShift = 0

        if self.fldr != 0:
            I1c = self.fieldX / self.fldr
            I1s = self.fieldY / self.fldr
        else:
            I1c = 0
            I1s = 0

        stampCenterx1 = stampCenterx1 + radialShift * I1c
        stampCentery1 = stampCentery1 + radialShift * I1s

        self.image = np.roll(self.image, int(
            np.round(stampCentery1 - y1)), axis=0)
        self.image = np.roll(self.image, int(
            np.round(stampCenterx1 - x1)), axis=1)
def laplacian(grid, out):
    np.copyto(out, grid)
    out *= -4
    out += np.roll(grid, +1, 0)
    out += np.roll(grid, -1, 0)
    out += np.roll(grid, +1, 1)
    out += np.roll(grid, -1, 1)
Exemplo n.º 8
0
def preprocessing(signal, param):
    """
    各シグナルの前処理(確認済)
    @param singal: fNIRS信号 1次元array型
    @param param: パラメータモジュール  
    
    @return: 前処理終了後のシグナル値 1次元array型(特徴次元数)
    """            
    ###### データの前処理
    # バンドパスフィルタ
    if param.FILTER_TYPE == "butter":
        preprocessed_signal = prba.butter_bandpass_filter(signal, param.BPF_BAND[0], param.BPF_BAND[1], 
                                                          param.FS, order = param.BUTTER_WORTH_ORDER)
    else: # フィルタ処理無し
        preprocessed_signal = signal 
    
    # 平滑化
    if param.SMOOTHING_TYPE:
        preprocessed_signal = prsm.smoothing(preprocessed_signal, param.SMOOTHING_LENGTH)
    else: # 平滑化処理無し
        pass
    
    # 1次微分
    if param.DIFF >= 1:
        preprocessed_signal_n = np.roll(preprocessed_signal, -1)
        preprocessed_signal = preprocessed_signal_n - preprocessed_signal
    # 2次微分
    elif param.DIFF == 2:
        preprocessed_signal_n = np.roll(preprocessed_signal, -1)
        preprocessed_signal_p = np.roll(preprocessed_signal, 1)
        preprocessed_signal = (preprocessed_signal_n - preprocessed_signal) - (preprocessed_signal - preprocessed_signal_p)
    
    return preprocessed_signal
Exemplo n.º 9
0
def _decorate_contour_segment(data, stride=1, options={}, tomax=True, labelled=False, outline=None, aspect=1):
	default_options = {'scale': 0.2,
			'scale_units': 'dots',
			'headaxislength': 2,
			'headlength': 2,
			'headwidth': 2,
			'minshaft': 1,
			'units': 'dots',
			#'angles': 'xy',
			'edgecolor': outline,
			'linewidth': 0 if outline is None else 0.2
		}
	default_options.update(options)

	x = data[::stride,0]
	y = data[::stride,1]

	sign = 1 if tomax else -1
	dx = -sign*np.diff(y)*aspect
	dy = sign*np.diff(x)
	l = np.sqrt(dx**2+dy**2)
	dx /= l
	dy /= l

	x = 0.5*(x+np.roll(x,-1))
	y = 0.5*(y+np.roll(y,-1))

	if labelled:
		x,y,dx,dy = x[1:-2], y[1:-2], dx[1:-1], dy[1:-1]
	else:
		x,y = x[:-1], y[:-1]

	plt.quiver(x, y, dx, dy, **default_options)
    def apply_channel_deconvolution (self, img, psfsize=10, snrVal=8):    # Based on deconvolution.py in python samples of opencv
        
        img = img.astype('double')/255.0
        img = self.blur_edge(img)
        IMG = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
    
        if (psfsize==0): return img
        
        defocus = True
    
        ang = 0
        d = psfsize
        snr = snrVal
        noise = 10**(-0.1*snr)

        if defocus:
            psf = self.defocus_kernel(d)
        else:
            psf = self.motion_kernel(ang, d)

        psf /= psf.sum()
        psf_pad = np.zeros_like(img)
        kh, kw = psf.shape
        psf_pad[:kh, :kw] = psf
        PSF = cv2.dft(psf_pad, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows = kh)
        PSF2 = (PSF**2).sum(-1)
        iPSF = PSF / (PSF2 + noise)[...,np.newaxis]
        RES = cv2.mulSpectrums(IMG, iPSF, 0)
        res = cv2.idft(RES, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT )
        res = np.roll(res, -kh//2, 0)
        res = np.roll(res, -kw//2, 1)

        return res
Exemplo n.º 11
0
def make_step(net, step_size=1.5, end=default_layer, jitter=32, clip=True, objective=objective_L2, sigma=0):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]    # the layer targeted by default_layer

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)     # inference of features

    objective(dst)           # set an objective

    net.backward(start=end)  # retrain

    g = src.diff[0]

    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)

    if sigma:
        src.data[0] = blur(src.data[0], sigma)
Exemplo n.º 12
0
    def draw_svd(self,comp='V',nmodes=6,center=False,zoom = False,
                 interp='none'):
        #compute svd is not present . . later

        fig = plt.figure()
        try:
            y = self.svd[comp]
        except:
            y = self.svd['V']

        for i in  xrange(nmodes):
            canvas = fig.add_subplot(2,3,i+1)
            
            # if comp=='V':
            #     y_i = y[i,:,:]
            # else:
            #     y_i = y[:,i]
            y_i = y[i]
        
            nx,ny = y_i.shape
            if center:
                y_i = np.roll(np.roll(y_i,nx/2,axis=0),ny/2,axis=1)
            if zoom and not center:
                y_i = y_i[0:nx/8.0,0:ny/8.]
            if zoom and center:
                y_i = y_i[nx/2-nx/8:nx/2.0+nx/8,
                          ny/2-ny/16:ny/2.0+ny/16,]
    
            canvas.imshow(np.real(y_i),aspect='auto',interpolation= interp)
        
        plt.close(fig)
        return fig
Exemplo n.º 13
0
def local_maxima(array2d,user_peak,index=False,count=4,floor=0,bug=False):

    from operator import itemgetter, attrgetter
    
    if user_peak == 0:
        where = ((array2d >= np.roll(array2d,1,0)) &
                 (array2d >= np.roll(array2d,-1,0)) &
                 (array2d >= np.roll(array2d,0,1)) &
                 (array2d >= np.roll(array2d,0,-1)) &
                 (array2d >= array2d.max()/5.0) &
                 (array2d > floor*np.ones(array2d.shape)) &
                 (array2d >= array2d.mean()))
    else: #some simpler filter if user indicated some modes
        where = array2d > floor

    #ignore the lesser local maxima, throw out anything with a ZERO
    if bug==True:    
        print array2d,array2d[where.nonzero()],where.nonzero()[0]
    
    peaks = zip(where.nonzero()[0],where.nonzero()[1],array2d[where.nonzero()])
    
    peaks = sorted(peaks,key=itemgetter(2),reverse=True)
   
    if len(peaks) > count and user_peak==0:
        peaks = peaks[0:count]
    
            
    keys = ['y_i','z_i','amp']
    
    peaks = [dict(zip(keys,peaks[x])) for x in range(len(peaks))]
    
    return peaks
	def GetReconstructionWithBoundary(self,N,orientationmap,filename):
		"""
		Decorate the orientation map with boundaries.
		"""
		indexmap = self.GetIndexmap()
		newindexmap = numpy.empty(numpy.array(indexmap.shape)*4)
		for i in range(4):
			for j in range(4):
				newindexmap[i::4,j::4] = indexmap
		neworientationmap = numpy.empty([4*N,4*N,3])
		for k in range(3):
			for i in range(4):
				for j in range(4):
					neworientationmap[i::4,j::4,k] = orientationmap[:,:,k]
		boundarymap =  1-(indexmap==numpy.roll(indexmap, 1,0))*(indexmap==numpy.roll(indexmap,-1,0))*\
					   (indexmap==numpy.roll(indexmap, 1,1))*(indexmap==numpy.roll(indexmap,-1,1))
		xs,ys = boundarymap.nonzero()
		for (i,j) in zip(xs,ys):
			temp = [indexmap[i,j]==indexmap[(i-1+N)%N,j],indexmap[i,j]==indexmap[(i+1)%N,j],\
					indexmap[i,j]==indexmap[i,(j-1+N)%N],indexmap[i,j]==indexmap[i,(j+1)%N]]
			pos = [[(4*i,4*j),(4*i+1,4*j+4)],[(4*i+3,4*j),(4*i+4,4*j+4)],\
				   [(4*i,4*j),(4*i+4,4*j+1)],[(4*i,4*j+3),(4*i+4,4*j+4)]]
			for n in range(4):
				if not temp[n]:
					newindexmap[pos[n][0][0]:pos[n][1][0],pos[n][0][1]:pos[n][1][1]] = -1
		for k in range(3):
			neworientationmap[:,:,k] *= (newindexmap!=-1)
		"""
		Use PIL to plot for larger sizes, since we want to be able to draw pixel by pixel.
		"""
		from PIL import Image
		pilImage = Image.fromarray(neworientationmap.astype('uint8'), 'RGB')
		pilImage.save(filename+'.png')
Exemplo n.º 15
0
    def _process_data_gated(self):
        """
        Processes the raw data from the counting device
        @return:
        """
        # remember the new count data in circular array
        self.countdata[0] = np.average(self.rawdata[0])
        # move the array to the left to make space for the new data
        self.countdata = np.roll(self.countdata, -1)
        # also move the smoothing array
        self.countdata_smoothed = np.roll(self.countdata_smoothed, -1)
        # calculate the median and save it
        self.countdata_smoothed[-int(self._smooth_window_length / 2) - 1:] = np.median(
            self.countdata[-self._smooth_window_length:])

        # save the data if necessary
        if self._saving:
            # if oversampling is necessary
            if self._counting_samples > 1:
                self._sampling_data = np.empty((self._counting_samples, 2))
                self._sampling_data[:, 0] = time.time() - self._saving_start_time
                self._sampling_data[:, 1] = self.rawdata[0]
                self._data_to_save.extend(list(self._sampling_data))
            # if we don't want to use oversampling
            else:
                # append tuple to data stream (timestamp, average counts)
                self._data_to_save.append(np.array((time.time() - self._saving_start_time,
                                                    self.countdata[-1])))
        return
Exemplo n.º 16
0
def imageShiftAndCrop( mage, shiftby ):
    """ imageShiftAndCrop( mage, shiftby )
    This is a relative shift, integer pixel only, pads with zeros to cropped edges
    
    mage = input image
    shiftby = [y,x] pixel shifts    
    """
    
    # Actually best approach is probably to roll and then zero out the parts we don't want
    # The pad function is expensive in comparison

    shiftby = np.array( shiftby, dtype='int' )
    # Shift X
    if(shiftby[1] < 0 ):
        mage = np.roll( mage, shiftby[1], axis=1 )
        mage[:, shiftby[1]+mage.shape[1]:] = 0.0
    elif shiftby[1] == 0:
        pass
    else: # positive shift
        mage = np.roll( mage, shiftby[1], axis=1 )
        mage[:, :shiftby[1]] = 0.0
    # Shift Y
    if( shiftby[0] < 0 ):
        mage = np.roll( mage, shiftby[0], axis=0 )
        mage[shiftby[0]+mage.shape[0]:,:] = 0.0
    elif shiftby[0] == 0:
        pass
    else:  # positive shift
        mage = np.roll( mage, shiftby[0], axis=0 )
        mage[:shiftby[0],:] = 0.0
    return mage
Exemplo n.º 17
0
    def polyhedra(self, wm):
        '''Iterates through the polyhedra that make up the closest volume to a certain vertex'''
        for p, facerow in enumerate(self.connected):
            faces = facerow.indices
            pts, polys = _ptset(), _quadset()
            if len(faces) > 0:
                poly = np.roll(self.polys[faces[0]], -np.nonzero(self.polys[faces[0]] == p)[0][0])
                assert pts[wm[p]] == 0
                assert pts[self.pts[p]] == 1
                pts[wm[poly[[0, 1]]].mean(0)]
                pts[self.pts[poly[[0, 1]]].mean(0)]

                for face in faces:
                    poly = np.roll(self.polys[face], -np.nonzero(self.polys[face] == p)[0][0])
                    a = pts[wm[poly].mean(0)]
                    b = pts[self.pts[poly].mean(0)]
                    c = pts[wm[poly[[0, 2]]].mean(0)]
                    d = pts[self.pts[poly[[0, 2]]].mean(0)]
                    e = pts[wm[poly[[0, 1]]].mean(0)]
                    f = pts[self.pts[poly[[0, 1]]].mean(0)]

                    polys((0, c, a, e))
                    polys((1, f, b, d))
                    polys((1, d, c, 0))
                    polys((1, 0, e, f))
                    polys((f, e, a, b))
                    polys((d, b, a, c))

            yield pts.points, np.array(list(polys.triangles))
Exemplo n.º 18
0
  def add_new( self, new_timestamp, new_data ):
    """
    ---------------------------------------------------------------------------
    Add a new data item to the buffer
    """    

    if np.rank( new_data )==1:
      if np.shape( new_data )[0] != self._dimensions:
        return False
    else:
      # make sure incoming data has correct shape
      if np.shape( new_data ) != self._dimensions:
        return False
    
    self._lock.acquire()

    # roll the arrays forward by one
    self._data = np.roll( self._data, 1 , 0 )
    self._timestamps = np.roll( self._timestamps, 1 , 0 )
    
    # insert the new data at the head 
    self._data[0] = copy.deepcopy( new_data )
    self._timestamps[0] = copy.deepcopy( new_timestamp ) 

    # increment the rollcounter
    self._rollcount += 1

    # release the lock    
    self._lock.release()
    
    return True
Exemplo n.º 19
0
def sphericalpolygon_area(lons, lats, R=6371000.):
	"""
	USAGE
	-----
	area = sphericalpolygon_area(lons, lats, R=6371000.)

	Calculates the area of a polygon on the surface of a sphere of
	radius R using Girard's Theorem, which states that the area of
	a polygon of great circles is R**2 times the sum of the angles
	between the polygons minus (N-2)*pi, where N is number of corners.
	R = 6371000 m (6371 km, default) is a typical value for the mean
	radius of the Earth.

	Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python
	"""
	lons, lats = map(np.asanyarray, (lons, lats))
	N = lons.size

	angles = np.empty(N)
	for i in xrange(N):

	    phiB1, phiA, phiB2 = np.roll(lats, i)[:3]
	    LB1, LA, LB2 = np.roll(lons, i)[:3]

	    # calculate angle with north (eastward)
	    beta1 = greatCircleBearing(LA, phiA, LB1, phiB1)
	    beta2 = greatCircleBearing(LA, phiA, LB2, phiB2)

	    # calculate angle between the polygons and add to angle array
	    angles[i] = np.arccos(np.cos(-beta1)*np.cos(-beta2) + np.sin(-beta1)*np.sin(-beta2))

	return (np.sum(angles) - (N-2)*np.pi)*R**2
Exemplo n.º 20
0
def find_max_cov(fwd_vals, rev_vals):    
    max_cov = 0.0
    max_cov_shift = 0

    for i in range(MIN_SHIFT, MAX_SHIFT):
        # shift fwd values ahead by i bp
        # shift rev values back by i bp
        matrix = np.vstack((np.roll(fwd_vals, i),
                            np.roll(rev_vals, -i)))

        # compute covariance between fwd and rev
        cov = np.cov(matrix)[0,1]

        max_str = ""
        if cov > max_cov:
            # this is the highest covariance we've seen yet
            max_cov = cov
            max_cov_shift = i
            max_str = " *"

        last_max = i - max_cov_shift

        if last_max > MAX_SHIFT_MAX_DIST:
            # we seem to be well past the peak covariance
            break

        sys.stderr.write("shift: %d, cov: %g%s\n" % (i, cov, max_str))
        
    sys.stderr.write("shift: %d, cov: %g\n" % (max_cov_shift, max_cov))

    return max_cov_shift
Exemplo n.º 21
0
def make_step2(net, step_size=1.5, end='inception_4c/output',
              jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    if jitter==0:
        ox=0
        oy=0
    else:
        ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    if not np.abs(g).mean()==0:
        src.data[:] += step_size/np.abs(g).mean() * g
        src.data[:] -= 0.005*step_size*src.data[:]

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
Exemplo n.º 22
0
def get_corr_map_photon(coo1, coo2, skypos, skyrange, sec, bound, bandwidth):
  imsz = imagetools.deg2pix(skypos, skyrange, 0.0001)
  count = np.zeros(imsz)
  print(imsz)
  co_rel = np.array([[0,0]])
  len1 = coo1.shape[0]
  len2 = coo2.shape[0]
  print(len1,len2)
  wcs = imagetools.define_wcs(skypos,skyrange,width=False,height=False,verbose=0,pixsz=0.0001)

  if len2<=80 or len1<=80:
    return count, np.array([0.0, 0.0])
  if len2>len1:
    for i in range(len2):
      #print(i)
      tmp_co = np.roll(coo2, i, axis=0)[0:len1,:]-coo1
      tmp_co = tmp_co[np.absolute(tmp_co[:,0])<=bound,:]
      tmp_co = tmp_co[np.absolute(tmp_co[:,1])<=bound,:]
      co_rel = np.concatenate((co_rel, tmp_co), axis = 0)

  else:
    for i in range(len1):
      #print(i)
      tmp_co = coo2-np.roll(coo1, i, axis=0)[0:len2,:]
      tmp_co = tmp_co[np.absolute(tmp_co[:,0])<=bound,:]
      tmp_co = tmp_co[np.absolute(tmp_co[:,1])<=bound,:]
      co_rel = np.concatenate((co_rel, tmp_co), axis = 0)

  print co_rel.shape
  if co_rel.shape[0]>1000:
    centroid = ck.find_centroid(co_rel[1:], bandwidth, 11, bound)
  else:
    return count, np.array([0.0, 0.0])

  return count, centroid
 def GetPerimeter(self,sites=None):
     """
     Find out the associated perimeter of this cluster.
     """
     if sites is None:
         indices = self.sites.get_indices()
     else:
         indices = sites.get_indices()
     area = len(indices)
     if area <= 3:
         return 2.*(area+1.)
     else:
         cluster = numpy.zeros((self.N,self.N))
         cluster[indices[:,0],indices[:,1]] = 1.
         mask = (cluster>0)
         newcluster = numpy.copy(cluster)
         newcluster += 1.*(cluster==numpy.roll(cluster,1,0))*mask
         newcluster += 1.*(cluster==numpy.roll(cluster,1,1))*mask
         newcluster += 1.*(cluster==numpy.roll(cluster,-1,0))*mask
         newcluster += 1.*(cluster==numpy.roll(cluster,-1,1))*mask
         """
         Any site with n (1<=n<=4) neighbors will contribute (4-n) to the perimeter;
         """
         perimeter = numpy.sum(newcluster==2.)*3.+numpy.sum(newcluster==3.)*2.+numpy.sum(newcluster==4.)
         return perimeter 
Exemplo n.º 24
0
	def __init__(self,*arg,**kw):
		super(build_model, self).__init__(*arg, **kw)
		self['nlayers'] = 5
		self['nx'] = 500
		fault_throw = 20
		
		self['dz'] = np.array([40, 80, 40, 200, 400, ])
		self['vp'] = np.array([800., 2200., 1800., 2400., 4500., ])
		self['vs'] = self['vp']/2.
		self['rho'] = np.array([1500., 2500., 1400., 2700., 4500., ])
		self['depths'] = np.cumsum(self['dz'])
		
		self['model'] = {}
		for model in ['vp', 'vs', 'rho']:
			layer_list = []
			for index in range(self['nlayers']):
				layer = np.ones((self['nx'], self['dz'][index]), 'f')
				layer *= self[model][index]
				layer_list.append(layer)
			self['model'][model] = np.hstack(layer_list)
			self['model'][model][250:500,120:160] = self[model][1]
			self['model'][model][250:500,120+fault_throw:160+fault_throw] = self[model][2]
		
		self['model']['z'] = self['model']['vp'] * self['model']['rho']
		self['model']['R'] = (np.roll(self['model']['z'], shift=-1) - self['model']['z'])/(np.roll(self['model']['z'], shift=-1) + self['model']['z'])
		self['model']['R'][:,0] *= 0
		self['model']['R'][:,-1:] *= 0
		self['model']['R'][:,:self['dz'][0]+2] = 0
Exemplo n.º 25
0
def normals_numpy(depth, rect=((0,0),(640,480)), win=7, mat=None):
    assert depth.dtype == np.float32
    from scipy.ndimage.filters import uniform_filter
    (l,t),(r,b) = rect
    v,u = np.mgrid[t:b,l:r]
    depth = depth[v,u]
    depth[depth==0] = -1e8  # 2047
    depth = calibkinect.recip_depth_openni(depth)
    depth = uniform_filter(depth, win)
    global duniform
    duniform = depth

    dx = (np.roll(depth,-1,1) - np.roll(depth,1,1))/2
    dy = (np.roll(depth,-1,0) - np.roll(depth,1,0))/2
    #dx,dy = np.array(depth),np.array(depth)
    #speedup.gradient(depth.ctypes.data, dx.ctypes.data,
    # dy.ctypes.data, depth.shape[0], depth.shape[1])

    X,Y,Z,W = -dx, -dy, 0*dy+1, -(-dx*u + -dy*v + depth).astype(np.float32)

    mat = calibkinect.projection().astype('f').transpose()
    mat = np.ascontiguousarray(mat)
    x = X*mat[0,0] + Y*mat[0,1] + Z*mat[0,2] + W*mat[0,3]
    y = X*mat[1,0] + Y*mat[1,1] + Z*mat[1,2] + W*mat[1,3]
    z = X*mat[2,0] + Y*mat[2,1] + Z*mat[2,2] + W*mat[2,3]
    w = np.sqrt(x*x + y*y + z*z)
    w[z<0] *= -1
    weights = z*0+1
    weights[depth<-1000] = 0
    weights[(z/w)<.1] = 0
    #return x/w, y/w, z/w
    return np.dstack((x/w,y/w,z/w)), weights
Exemplo n.º 26
0
def face_ibug_68_mirrored_to_face_ibug_68(pcloud):
    r"""
    Apply the IBUG 68-point semantic labels, on a pointcloud that has been
    mirrored around the vertical axis (flipped around the Y-axis). Thus, on
    the flipped image the jaw etc would be the wrong way around. This
    rectifies that and returns a new PointCloud whereby all the points
    are oriented correctly.

    The semantic labels applied are as follows:

      - jaw
      - left_eyebrow
      - right_eyebrow
      - nose
      - left_eye
      - right_eye
      - mouth

    References
    ----------
    .. [1] http://www.multipie.org/
    .. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
    """
    new_pcloud, old_map = face_ibug_68_to_face_ibug_68(pcloud,
                                                       return_mapping=True)
    lms_map = np.hstack([old_map['jaw'][::-1],
                         old_map['right_eyebrow'][::-1],
                         old_map['left_eyebrow'][::-1],
                         old_map['nose'][:4],
                         old_map['nose'][4:][::-1],
                         np.roll(old_map['right_eye'][::-1], 4),
                         np.roll(old_map['left_eye'][::-1], 4),
                         np.roll(old_map['mouth'][:12][::-1], 7),
                         np.roll(old_map['mouth'][12:][::-1], 5)])
    return new_pcloud.from_vector(pcloud.points[lms_map]), old_map
Exemplo n.º 27
0
def test_shift():

    amp = 1
    v0 = 0 * u.m / u.s
    sigma = 8
    spectral_axis = np.arange(-50, 51) * u.m / u.s

    true_spectrum = gaussian(spectral_axis.value,
                             amp, v0.value, sigma)

    # Shift is an integer, so rolling is equivalent
    rolled_spectrum = np.roll(true_spectrum, 10)

    shift_spectrum = fourier_shift(true_spectrum, 10)

    np.testing.assert_allclose(shift_spectrum,
                               rolled_spectrum,
                               rtol=1e-4)

    # With part masked
    masked_spectrum = true_spectrum.copy()
    mask = np.abs(spectral_axis.value) <= 30
    masked_spectrum[~mask] = np.NaN

    rolled_mask = np.roll(mask, 10)
    rolled_masked_spectrum = rolled_spectrum.copy()
    rolled_masked_spectrum[~rolled_mask] = np.NaN

    shift_spectrum = fourier_shift(masked_spectrum, 10)

    np.testing.assert_allclose(shift_spectrum,
                               rolled_masked_spectrum,
                               rtol=1e-4)
Exemplo n.º 28
0
	def gradient_ascent_step(net, step_size=1.5, end="inception_4c/output",
		jitter=32, clip=True, objective_fn=None, **objective_params):
		# if the objective function is None, initialize it as
		# the standard L2 objective
		if objective_fn is None:
			objective_fn = BatCountry.L2_objective

		# input image is stored in Net's 'data' blob
		src = net.blobs["data"]
		dst = net.blobs[end]

		# apply jitter shift
		ox, oy = np.random.randint(-jitter, jitter + 1, 2)
		src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2)

		net.forward(end=end)
		objective_fn(dst, **objective_params)
		net.backward(start=end)
		g = src.diff[0]

		# apply normalized ascent step to the input image
		src.data[:] += step_size / np.abs(g).mean() * g

 		# unshift image
		src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2)

		# unshift image
		if clip:
			bias = net.transformer.mean["data"]
			src.data[:] = np.clip(src.data, -bias, 255 - bias)
Exemplo n.º 29
0
    def _calculate_series(self, attr):
        mi = getattr(self, '{}_min'.format(attr))
        amp = getattr(self, '{}_max'.format(attr)) - mi

        funcname = getattr(self, '{}_func'.format(attr))
        period = getattr(self, '{}_period'.format(attr))
        offset = getattr(self, '{}_offset'.format(attr))
        speriod = getattr(self, '{}_sample'.format(attr))
        duty = getattr(self, '{}_duty'.format(attr))
        duration = getattr(self, '{}_duration'.format(attr))

        x, y, sx, sy = [], [], [], []
        if funcname != NULL_STR:
            if self.xy_pattern_enabled and getattr(self, '{}_use_transit_time'.format(attr)):
                t = self.calculate_transit_time()
            else:
                t = duration

            t = t or 1
            x = linspace(0, t, 500)

            speriod = speriod or 1
            sx = arange(0, t, speriod)
            if sx[-1] < t:
                sx = append(sx, t)

            if funcname == 'Sine':
                def func(xx):
                    return (mi + amp) + amp * sin(period * xx + offset)

                y = func(x)

            elif funcname == 'Square':
                def func(xx):
                    return 0.5 * amp * (signal.square(period * xx * 2 * pi + offset, duty=duty / 100.) + 1) + mi

                y = func(x)

                bx = asarray(diff(y), dtype=bool)
                bx = roll(bx, 1)

                sx = x[bx]
                sx = append(asarray([0]), sx)
                sx = append(sx, asarray([t]))

            elif funcname == 'Saw':
                def func(xx):
                    return 0.5 * amp * (signal.sawtooth(period * xx * 2 * pi + offset) + 1) + mi

                y = func(x)

                asign = sign(gradient(y))
                signchange = ((roll(asign, 1) - asign) != 0).astype(bool)
                signchange[0] = False
                nsx = x[signchange]
                sx = hstack((sx, nsx))

            sy = func(sx)

        return x, y, sx, sy
Exemplo n.º 30
0
def main():
    args = parse_args()

    gdb = genome.db.GenomeDB(assembly=args.assembly)

    fwd_track = gdb.open_track(args.fwd_track)
    rev_track = gdb.open_track(args.rev_track)
    combined_track = gdb.create_track(args.combined_track)
    
    shift = find_strand_shift(gdb, fwd_track, rev_track)

    sys.stderr.write("shifting fwd/rev by +%d/-%d bp\n" % (shift, shift))

    for chrom in gdb.get_chromosomes():
        sys.stderr.write("%s\n" % chrom.name)
        carray = create_carray(combined_track, chrom, args.dtype)

        fwd_vals = fwd_track.get_nparray(chrom)
        rev_vals = rev_track.get_nparray(chrom)

        # shift fwd / rev values by the offset that gave
        # the maximum covariance2
        fwd_vals = np.roll(fwd_vals, shift)
        rev_vals = np.roll(rev_vals, -shift)
        fwd_vals[:shift] = 0
        rev_vals[-shift:] = 0

        carray[:] = fwd_vals + rev_vals
        
    fwd_track.close()
    rev_track.close()
    combined_track.close()
Exemplo n.º 31
0
def _coil_trans_to_loc(coil_trans):
    """Convert coil_trans to loc."""
    coil_trans = coil_trans.astype(np.float64)
    return np.roll(coil_trans.T[:, :3], 1, 0).flatten()
        def run_noPlt(self, inits, nstepmax = 10):
                import numpy as np
                import time 
                from scipy.interpolate import interp1d
                #import matplotlib.pyplot as plt
                inits_x = inits[0]
                inits_y = inits[1]                    
                # max number of iterations
                
                # frequency of plotting
                nstepplot = 1e1
                # plot string every nstepplot if flag1 = 1 
                flag1 = 1

                # temperature ###?!!!
                mu=9
                mu = 3
                #mu = 4
                # parameter used as stopping criterion
                tol1 = 1e-7

                # number of images during prerelaxation
                n2 = 1e1;
                # number of images along the string (try from  n1 = 3 up to n1 = 1e4)
                n1 = 25
                n1 = len(inits_x)
                # time-step (limited by the ODE step on line 83 & 84 but independent of n1)
                h = 1e-4
                #h = 3e-5
                
                # end points of the initial string
                # notice that they do NOT have to be at minima of V -- the method finds
                # those automatically

                # initialization
                
                x = np.array(inits_x)
                y = np.array(inits_y)
                dx = x-np.roll(x, 1)
                dy = y-np.roll(y, 1)
                dx[0] = 0
                dy[0] = 0
                xi = x
                yi = y

                # parameters in Mueller potential

                aa = [-0.3] # inverse radius in x
                bb = [0] # radius in xy
                cc = [-0.3] # inverse radius in y
                AA = 300*[-200] # strength

                XX = [0] # center_x
                YY = [0] # center_y

                zxx = np.mgrid[-2.5:2.51:0.01]
                zyy = np.mgrid[-2.5:2.51:0.01]
                xx, yy = np.meshgrid(zxx, zyy)


                V1 = AA[0]*np.exp(aa[0] * np.square(xx-XX[0]) + bb[0] * (xx-XX[0]) * (yy-YY[0]) +cc[0]*np.square(yy-YY[0]))

##### Main loop

                trj_x = []
                trj_y = []
                for nstep in range(int(nstepmax)):
                        
                        # calculation of the x and y-components of the force, dVx and dVy respectively
                        ee = AA[0]*np.exp(aa[0]*np.square(x-XX[0])+bb[0]*(x-XX[0])*(y-YY[0])+cc[0]*np.square(y-YY[0]))
                        dVx = (aa[0]*(x-XX[0])+bb[0]*(y-YY[0]))*ee
                        dVy = (bb[0]*(x-XX[0])+cc[0]*(y-YY[0]))*ee

                        x0 = x
                        y0 = y
                        x = x - h*dVx + np.sqrt(h*mu)*np.random.randn(1,n1)
                        y = y - h*dVy + np.sqrt(h*mu)*np.random.randn(1,n1) 
                        trj_x.append(x) 
                        trj_y.append(y)
                return trj_x, trj_y          
Exemplo n.º 33
0
def render_lights(size=MAP_WINDOW_SIZE, show_weather=True):
	if not SETTINGS['draw lights']:
		return False

	reset_lights(size=size)
	_weather_light = weather.get_lighting()
	
	#Not entirely my code. Made some changes to someone's code from libtcod's Python forum.
	RGB_LIGHT_BUFFER[0] = numpy.add(RGB_LIGHT_BUFFER[0], _weather_light[0])
	RGB_LIGHT_BUFFER[1] = numpy.add(RGB_LIGHT_BUFFER[1], _weather_light[1])
	RGB_LIGHT_BUFFER[2] = numpy.add(RGB_LIGHT_BUFFER[2], _weather_light[2])
	(x, y) = SETTINGS['light mesh grid']
	
	if show_weather:
		weather.generate_effects(size)

	_remove_lights = []
	for light in WORLD_INFO['lights']:
		_x_range = light['pos'][0]-CAMERA_POS[0]
		_y_range = light['pos'][1]-CAMERA_POS[1]
		
		if _x_range <= -20 or _x_range>=size[0]+20:
			continue
		
		if _y_range <= -20 or _y_range>=size[1]+20:
			continue
		
		if not 'old_pos' in light:
			light['old_pos'] = (0, 0, -2)
		else:
			light['old_pos'] = light['pos'][:]
		
		if 'follow_item' in light:
			if not light['follow_item'] in ITEMS:
				_remove_lights.append(light)
				continue
				
			light['pos'] = items.get_pos(light['follow_item'])[:]
		
		_render_x = light['pos'][0]-CAMERA_POS[0]
		_render_y = light['pos'][1]-CAMERA_POS[1]
		_x = numbers.clip(light['pos'][0]-(size[0]/2),0,MAP_SIZE[0])
		_y = numbers.clip(light['pos'][1]-(size[1]/2),0,MAP_SIZE[1])
		_top_left = (_x,_y,light['pos'][2])
		
		#TODO: Render only on move
		if not tuple(light['pos']) == tuple(light['old_pos']):
			light['los'] = cython_render_los.render_los((light['pos'][0],light['pos'][1]), light['brightness']*2, view_size=size, top_left=_top_left)
		
		los = light['los'].copy()
		
		_x_scroll = _x-CAMERA_POS[0]
		_x_scroll_over = 0
		_y_scroll = _y-CAMERA_POS[1]
		_y_scroll_over = 0
		
		if _x_scroll<0:
			_x_scroll_over = _x_scroll
			_x_scroll = los.shape[1]+_x_scroll
		
		if _y_scroll<0:
			_y_scroll_over = _y_scroll
			_y_scroll = los.shape[0]+_y_scroll
		
		los = numpy.roll(los, _y_scroll, axis=0)
		los = numpy.roll(los, _x_scroll, axis=1)
		los[_y_scroll_over:_y_scroll,] = 1
		los[:,_x_scroll_over:_x_scroll] = 1
		
		if SETTINGS['diffuse light']:
			_y, _x = diffuse_light((y, x))
			(x, y) = numpy.meshgrid(_x, _y)
		
		sqr_distance = (x - (_render_x))**2.0 + (y - (_render_y))**2.0
		
		brightness = numbers.clip(random.uniform(light['brightness']*light['shake'], light['brightness']), 0.01, 50) / sqr_distance
		brightness *= los
		#brightness *= LOS_BUFFER[0]
		
		#_mod = (abs((WORLD_INFO['length_of_day']/2)-WORLD_INFO['real_time_of_day'])/float(WORLD_INFO['length_of_day']))*5.0	
		#_mod = numbers.clip(_mod-1, 0, 1)
		#(255*_mod, 165*_mod, 0*_mod)
		#print brightness
		#light['brightness'] = 25
		#light['color'][0] = 255*(light['brightness']/255.0)
		#light['color'][1] = (light['brightness']/255.0)
		#light['color'][2] = 255*(light['brightness']/255.0)
		RGB_LIGHT_BUFFER[0] -= (brightness.clip(0, 2)*(light['color'][0]))#numpy.subtract(RGB_LIGHT_BUFFER[0], light['color'][0]).clip(0, 255)
		RGB_LIGHT_BUFFER[1] -= (brightness.clip(0, 2)*(light['color'][1]))#numpy.subtract(RGB_LIGHT_BUFFER[1], light['color'][1]).clip(0, 255)
		RGB_LIGHT_BUFFER[2] -= (brightness.clip(0, 2)*(light['color'][2]))#numpy.subtract(RGB_LIGHT_BUFFER[2], light['color'][2]).clip(0, 255)
Exemplo n.º 34
0
def test_axis_diff_and_interp_nonperiodic_2d(all_2d, boundary, axis_name,
                                             varname, this, to):
    ds, periodic, expected = all_2d

    try:
        ax_periodic = axis_name in periodic
    except TypeError:
        ax_periodic = periodic

    boundary_arg = boundary if not ax_periodic else None
    axis = Axis(ds, axis_name, periodic=ax_periodic, boundary=boundary_arg)
    da = ds[varname]

    # everything is left shift
    data = ds[varname].data

    axis_num = da.get_axis_num(axis.coords[this])

    # lookups for numpy.pad
    numpy_pad_arg = {"extend": "edge", "fill": "constant"}
    # args for numpy.pad
    pad_left = (1, 0)
    pad_right = (0, 1)
    pad_none = (0, 0)

    if this == "center":
        if ax_periodic:
            data_left = np.roll(data, 1, axis=axis_num)
            data_right = data
        else:
            pad_width = [
                pad_left if i == axis_num else pad_none
                for i in range(data.ndim)
            ]
            the_slice = tuple([
                slice(0, -1) if i == axis_num else slice(None)
                for i in range(data.ndim)
            ])
            data_left = np.pad(data, pad_width,
                               numpy_pad_arg[boundary])[the_slice]
            data_right = data
    elif this == "left":
        if ax_periodic:
            data_left = data
            data_right = np.roll(data, -1, axis=axis_num)
        else:
            pad_width = [
                pad_right if i == axis_num else pad_none
                for i in range(data.ndim)
            ]
            the_slice = tuple([
                slice(1, None) if i == axis_num else slice(None)
                for i in range(data.ndim)
            ])
            data_right = np.pad(data, pad_width,
                                numpy_pad_arg[boundary])[the_slice]
            data_left = data

    data_interp = 0.5 * (data_left + data_right)
    data_diff = data_right - data_left

    # determine new dims
    dims = list(da.dims)
    dims[axis_num] = axis.coords[to]
    coords = {dim: ds[dim] for dim in dims}

    da_interp_expected = xr.DataArray(data_interp, dims=dims, coords=coords)
    da_diff_expected = xr.DataArray(data_diff, dims=dims, coords=coords)

    da_interp = axis.interp(da, to)
    da_diff = axis.diff(da, to)

    assert da_interp_expected.equals(da_interp)
    assert da_diff_expected.equals(da_diff)

    if boundary_arg is not None:
        if boundary == "extend":
            bad_boundary = "fill"
        elif boundary == "fill":
            bad_boundary = "extend"

        da_interp_wrong = axis.interp(da, to, boundary=bad_boundary)
        assert not da_interp_expected.equals(da_interp_wrong)
        da_diff_wrong = axis.diff(da, to, boundary=bad_boundary)
        assert not da_diff_expected.equals(da_diff_wrong)
Exemplo n.º 35
0
def shift(audio, shift_sec, fs):
    """Shifts audio.
    """
    shift_count = int(shift_sec * fs)
    return np.roll(audio, shift_count)
Exemplo n.º 36
0
 def random_sample(self,
                   inputs,
                   n,
                   topk=None,
                   topp=None,
                   states=None,
                   temperature=1,
                   min_ends=1):
     """随机采样n个结果
     说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp
          表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。
     返回:n个解码序列组成的list。
     """
     inputs = [np.array([i]) for i in inputs]
     output_ids = self.first_output_ids
     results = []
     for step in range(self.maxlen):
         probas, states = self.predict(inputs, output_ids, states,
                                       temperature, 'probas')  # 计算当前概率
         probas /= probas.sum(axis=1, keepdims=True)  # 确保归一化
         if step == 0:  # 第1步预测后将结果重复n次
             probas = np.repeat(probas, n, axis=0)
             inputs = [np.repeat(i, n, axis=0) for i in inputs]
             output_ids = np.repeat(output_ids, n, axis=0)
         if topk is not None:
             k_indices = probas.argpartition(-topk,
                                             axis=1)[:, -topk:]  # 仅保留topk
             probas = np.take_along_axis(probas, k_indices,
                                         axis=1)  # topk概率
             probas /= probas.sum(axis=1, keepdims=True)  # 重新归一化
         if topp is not None:
             p_indices = probas.argsort(axis=1)[:, ::-1]  # 从高到低排序
             probas = np.take_along_axis(probas, p_indices, axis=1)  # 排序概率
             cumsum_probas = np.cumsum(probas, axis=1)  # 累积概率
             flag = np.roll(cumsum_probas >= topp, 1, axis=1)  # 标记超过topp的部分
             flag[:, 0] = False  # 结合上面的np.roll,实现平移一位的效果
             probas[flag] = 0  # 后面的全部置零
             probas /= probas.sum(axis=1, keepdims=True)  # 重新归一化
         sample_func = lambda p: np.random.choice(len(p), p=p)  # 按概率采样函数
         sample_ids = np.apply_along_axis(sample_func, 1, probas)  # 执行采样
         sample_ids = sample_ids.reshape((-1, 1))  # 对齐形状
         if topp is not None:
             sample_ids = np.take_along_axis(p_indices, sample_ids,
                                             axis=1)  # 对齐原id
         if topk is not None:
             sample_ids = np.take_along_axis(k_indices, sample_ids,
                                             axis=1)  # 对齐原id
         output_ids = np.concatenate([output_ids, sample_ids], 1)  # 更新输出
         end_counts = (output_ids == self.end_id).sum(1)  # 统计出现的end标记
         if output_ids.shape[1] >= self.minlen:  # 最短长度判断
             flag = (end_counts == min_ends)  # 标记已完成序列
             if flag.any():  # 如果有已完成的
                 for ids in output_ids[flag]:  # 存好已完成序列
                     results.append(ids)
                 flag = (flag == False)  # 标记未完成序列
                 inputs = [i[flag] for i in inputs]  # 只保留未完成部分输入
                 output_ids = output_ids[flag]  # 只保留未完成部分候选集
                 end_counts = end_counts[flag]  # 只保留未完成部分end计数
                 if len(output_ids) == 0:
                     break
     # 如果还有未完成序列,直接放入结果
     for ids in output_ids:
         results.append(ids)
     # 返回结果
     return results
Exemplo n.º 37
0
def nearest_value_and_distance(refpixels, domain, nodata):
    """
    Returns distance in pixels !
    """

    height, width = domain.shape
    midpoints = 0.5*(refpixels + np.roll(refpixels, 1, axis=0))
    midpoints_valid = (refpixels[:-1, 3] == refpixels[1:, 3])
    # midpoints = midpoints[1:, :2][midpoints_valid]
    midpoints = midpoints[1:, :2]
    midpoints[~midpoints_valid, 0] = -999999.0
    midpoints[~midpoints_valid, 1] = -999999.0

    midpoints_index = cKDTree(midpoints, balanced_tree=True)
    distance = np.zeros_like(domain)
    values = np.copy(distance)
    nearest_axes = np.zeros_like(domain, dtype='uint32')

    # semi-vectorized code, easier to understand

    # for i in range(height):

    #     js = np.arange(width)
    #     row = np.column_stack([np.full_like(js, i), js])
    #     valid = domain[row[:, 0], row[:, 1]] != nodata
    #     query_pixels = row[valid]
    #     nearest_dist, nearest_idx = midpoints_index.query(query_pixels, k=1, jobs=4)
    #     nearest_a = np.take(refpixels, nearest_idx, axis=0, mode='wrap')
    #     nearest_b = np.take(refpixels, nearest_idx+1, axis=0, mode='wrap')
    #     nearest_m = np.take(midpoints[:, 2], nearest_idx+1, axis=0, mode='wrap')
    #     # same as
    #     # nearest_value = 0.5*(nearest_a[:, 2] + nearest_b[:, 2])
    #     dist, signed_dist, pos = ta.signed_distance(
    #         np.float32(nearest_a),
    #         np.float32(nearest_b),
    #         np.float32(query_pixels))

    # faster fully-vectorized code

    pixi, pixj = np.meshgrid(np.arange(height), np.arange(width), indexing='ij')
    valid = domain != nodata
    query_pixels = np.column_stack([pixi[valid], pixj[valid]])

    del pixi
    del pixj
    del valid

    _, nearest_idx = midpoints_index.query(query_pixels, k=1)

    # nearest_a = np.take(refpixels, nearest_idx, axis=0, mode='wrap')
    # nearest_b = np.take(refpixels, nearest_idx+1, axis=0, mode='wrap')

    nearest_p = np.take(refpixels, np.column_stack([nearest_idx, nearest_idx+1]), axis=0, mode='wrap')
    nearest_a = nearest_p[:, 0, :]
    nearest_b = nearest_p[:, 1, :]

    dist, signed_dist, pos = ta.signed_distance(
        np.float32(nearest_a),
        np.float32(nearest_b),
        np.float32(query_pixels))

    # interpolate between points A and B
    nearest_value = nearest_a[:, 2] + pos*(nearest_b[:, 2] - nearest_a[:, 2])

    nearest_axis = np.copy(nearest_a[:, 3])
    nearest_axis[nearest_axis != nearest_b[:, 3]] = 0

    # almost same as
    # nearest_m = 0.5*(nearest_a[:, 2] + nearest_b[:, 2])
    # same as
    # nearest_m = np.take(midpoints[:, 2], nearest_idx+1, axis=0, mode='wrap')

    distance[query_pixels[:, 0], query_pixels[:, 1]] = dist * np.sign(signed_dist)
    values[query_pixels[:, 0], query_pixels[:, 1]] = nearest_value
    nearest_axes[query_pixels[:, 0], query_pixels[:, 1]] = nearest_axis

    return nearest_axes, values, distance
Exemplo n.º 38
0
    if RunMode == 1:
        cuda = torch.device("cuda")
        CRSM = torch.zeros((BASELINE, 2, CHOUT, 2), dtype=torch.float32, device=cuda)
    elif RunMode == 2:
        CRSM = cp.zeros((BASELINE, 2, CHOUT), dtype=cp.complex64)
    #### Read sub File ####
    with open(str(FILEN1),'rb') as fn1:
        for t in range(INTNUM):
            fn1.seek(HEADSIZE+t*BLOCKSIZE)
            RAW1 = np.fromfile(fn1, dtype=np.int8, count = BLOCKSIZE).reshape(
                    SMPNUM//INTNUM*ANTNUM*2*SAMPLE//FINECH, FINECH, 2)
            # Raw to Complex #
            if RunMode == 0:
                FFT1 = RAW1[:, 0] + 1j*RAW1[:, 1]
                # FFT #
                FFT1 = np.roll(np.fft.fft(FFT1, n=FINECH, norm = "ortho")[:, OUT_STR: OUT_END].reshape(
                        SMPNUM//INTNUM, ANTNUM, 2, SAMPLE//FINECH, CHOUT), FINECH//2, axis=4)
                # print("FFT %.2f sec"%(time.time() - tstart))
                # Cross Correlate #
                ss = 0
                for i in range(ANTNUM):
                    for ii in range(i, ANTNUM):
                        CRSMCPU[ss, 0, t, :] = (FFT1[:, i, 0, :, :] * np.conj(FFT1[:, ii, 0, :, :])).mean(axis = (0, 1))
                        CRSMCPU[ss, 1, t, :] = (FFT1[:, i, 1, :, :] * np.conj(FFT1[:, ii, 1, :, :])).mean(axis = (0, 1))
                        ss += 1

            elif RunMode == 1:
                FFT1 = torch.from_numpy(RAW1).float().cuda()
                # FFT #
                FFT1 = torch.roll(torch.fft(FFT1, 1, normalized = True)[:, OUT_STR: OUT_END, :].view(
                        SMPNUM//INTNUM, ANTNUM, 2, SAMPLE//FINECH, CHOUT, 2), FINECH//2, dims=4)
                # print("FFT %.2f sec"%(time.time() - tstart))
def poly_area(x, y):
    """ Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
    return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
Exemplo n.º 40
0
def count_datacube(datacube,
                   counted_shape,
                   sigmathresh=4,
                   nsamples=40,
                   upperlimit=10,
                   drwidth=100,
                   sub_pixel=True,
                   plot_histogram=False,
                   plot_electrons=False):
    data = datacube.data4D

    #Get dimensions
    y, x, z, t = [datacube.Q_Ny, datacube.Q_Nx, datacube.R_Ny, datacube.R_Nx]

    print('Getting dark current reference')
    #Remove dark background
    dr, mean, stddev = get_dark_reference(data,
                                          ndrsamples=nsamples,
                                          upperlimit=upper_limit,
                                          drwidth=drwidth)

    print('Calculating threshhold')
    #Get threshhold
    thresh = calculate_counting_threshhold(data,
                                           dr,
                                           sigmathresh=sigmathresh,
                                           nsamples=nsamples,
                                           upper_limit=upper_limit,
                                           plot_histogram=plot_histogram)

    counted = np.zeros(counted_shape + (z, t), dtype=np.uint16)

    #S
    total_electrons = 0
    e_list = None

    for tt in range(t):
        printProgressBar(tt,
                         t - 1,
                         prefix='Counting:',
                         suffix='Complete',
                         length=50)
        for zz in range(z):

            #Background subtract and remove X-rays\hot pixels
            workingarray = data[:, :, zz, tt] - dr[:, np.newaxis]
            workingarray[workingarray > mean + upper_limit * stddev] = 0

            #Create a map of events (pixels with value greater than
            #the threshhold value)

            events = np.greater(workingarray, thresh)

            #Now find local maxima by circular shift and comparison to neighbours
            #if we want diagonal neighbours to be considered seperate electron
            #counts then we would add a second for loop to do these extra
            #comparisons
            for i in range(4):
                events = np.logical_and(
                    np.greater(
                        workingarray,
                        np.roll(workingarray, i % 2 * 2 - 1, axis=i // 2)),
                    events)
            events[0, :] = False
            events[-1, :] = False
            events[:, 0] = False
            events[:, -1] = False

            electron_posn = np.asarray(np.argwhere(events), dtype=np.float)
            num_electrons = np.shape(electron_posn)[0]

            if (sub_pixel):
                #Now do center of mass in local region 3x3 region to refine position
                # estimate
                for i in range(num_electrons):
                    event = electron_posn[i, :]
                    electron_posn[i, :] += center_of_mass(
                        workingarray[int(event[0] - 1):int(event[0] + 2),
                                     int(event[1] - 1):int(event[1] + 2)])
                electron_posn -= np.asarray([1, 1])[np.newaxis, :]

            if (plot_electrons):
                if (not os.path.exists(count_plots)): os.mkdir('count_plots')
                figsize = max(np.shape(data[:, :, zz, tt])[:2]) // 200
                fig = plt.figure(figsize=(figsize, figsize))
                ax = fig.add_subplot(111)
                ax.imshow(data[:, :, zz, tt], origin='lower', vmax=2 * thresh)
                ax.plot(electron_posn[:, 1], electron_posn[:, 0], 'rx')
                ax.set_title('Found {0} electrons'.format(num_electrons))
                plt.show()
                fig.savefig('count_plots/Countplot_{0}_{1}.pdf'.format(tt, zz))
            #Update total number of electrons
            total_electrons += num_electrons
            #Put the electron_posn in fractional coordinates
            #where the positions are fractions of the original array
            electron_posn /= np.asarray([max(y, x), max(y, x)])[np.newaxis, :]
            electron_posn = np.hstack(
                (electron_pons, np.asarray([tt, xx],
                                           dtype=np.float32)[np.newaxis, :]))
            if (e_list is None): e_list = electron_posn
            else: e_list = np.vstack((e_list, electron_posn))
    return electron_posn
Exemplo n.º 41
0
def ZOGY(R, N, Pr, Pn, sr, sn, fr, fn, Vr, Vn, dx, dy):
    """
    Optimal image subtraction in a pythonic layout! 
    Where the magic happens. Algorithm from
    Zackay et al. (2016)
    """
    R_hat = fft.fft2(R)

    N_hat = fft.fft2(N)

    Pn_hat = fft.fft2(Pn)

    Pn_hat2_abs = np.abs(Pn_hat**2)

    Pr_hat = fft.fft2(Pr)

    Pr_hat2_abs = np.abs(Pr_hat**2)

    sn2 = sn**2

    sr2 = sr**2

    fn2 = fn**2

    fr2 = fr**2

    fD = fr * fn / np.sqrt(sn2 * fr2 + sr2 * fn2)

    denom = sn2 * fr2 * Pr_hat2_abs + sr2 * fn2 * Pn_hat2_abs
    if np.any(denom == 0):
        print('There are zeros!')

    D_hat = (fr * Pr_hat * N_hat - fn * Pn_hat * R_hat) / np.sqrt(denom)
    D = np.real(fft.ifft2(D_hat)) / fD

    P_D_hat = (fr * fn / fD) * (Pr_hat * Pn_hat) / np.sqrt(denom)

    S_hat = fD * D_hat * np.conj(P_D_hat)
    S = np.real(fft.ifft2(S_hat))

    kr_hat = fr * fn2 * np.conj(Pr_hat) * Pn_hat2_abs / denom
    kr = np.real(fft.ifft2(kr_hat))
    kr2 = kr**2
    kr2_hat = fft.fft2(kr2)

    kn_hat = fn * fr2 * np.conj(Pn_hat) * Pr_hat2_abs / denom
    kn = np.real(fft.ifft2(kn_hat))
    kn2 = kn**2
    kn2_hat = fft.fft2(kn2)

    Vr_hat = fft.fft2(Vr)
    Vn_hat = fft.fft2(Vn)

    VSr = np.real(fft.ifft2(Vr_hat * kr2_hat))
    VSn = np.real(fft.ifft2(Vn_hat * kn2_hat))

    dx2 = dx**2
    dy2 = dy**2
    # and calculate astrometric variance
    Sn = np.real(fft.ifft2(kn_hat * N_hat))
    dSndy = Sn - np.roll(Sn, 1, axis=0)
    dSndx = Sn - np.roll(Sn, 1, axis=1)
    VSn_ast = dx2 * dSndx**2 + dy2 * dSndy**2

    Sr = np.real(fft.ifft2(kr_hat * R_hat))
    dSrdy = Sr - np.roll(Sr, 1, axis=0)
    dSrdx = Sr - np.roll(Sr, 1, axis=1)
    VSr_ast = dx2 * dSrdx**2 + dy2 * dSrdy**2

    # and finally S_corr
    V_S = VSr + VSn
    V_ast = VSr_ast + VSn_ast
    V = V_S + V_ast

    # make sure there's no division by zero
    S_corr = np.copy(S)
    S_corr[V > 0] /= np.sqrt(V[V > 0])

    F_S = np.sum((fn2 * Pn_hat2_abs * fr2 * Pr_hat2_abs) / denom)
    F_S /= R.size

    alpha = S / F_S
    alpha_std = np.zeros(alpha.shape)
    alpha_std[V_S > 0] = np.sqrt(V_S[V_S > 0]) / F_S

    return (D, S, S_corr, alpha, alpha_std)
def is_clockwise(p):
    x = p[:, 0]
    y = p[:, 1]
    return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0
Exemplo n.º 43
0
def L_BFGS_nls(x0,
               d0,
               fdf,
               qlist,
               glist,
               fdf0=None,
               big_step=100,
               tol=1.0e-6,
               itmax=100,
               init_step=1.0e-3,
               m=0,
               k=0):
    """L-BFGS minimization without line search
    Does one step.
        Arguments:
            fdf: function and gradient
            fdf0: initial function and gradient value
            d0: initial direction for line minimization
            x0: initial point
            qlist: list of previous positions used for reduced inverse Hessian construction
            glist: list of previous gradients used for reduced inverse Hessian construction
            m: number of corrections to store and use
            k: iteration (MD step) number
            big_step: limit on step length
            tol: convergence tolerance
            itmax: maximum number of allowed iterations
            init_step: initial step size
    """

    # Original function value, gradient, other initializations
    zeps = 1.0e-10
    if fdf0 is None: fdf0 = fdf(x0)
    f0, df0 = fdf0
    n = len(x0.flatten())
    dg = np.zeros(n)
    g = df0
    x = np.zeros(n)
    linesum = np.dot(x0.flatten(), x0.flatten())
    alpha = np.zeros(m)
    beta = np.zeros(m)
    rho = np.zeros(m)
    q = np.zeros(n)

    # Initial line direction
    xi = d0
    dg = df0

    # Step size
    stepsize = np.sqrt(np.dot(d0.flatten(), d0.flatten()))

    # First iteration; use initial step
    if k == 0:
        scale = 1.0
        while np.sqrt(np.dot(g.flatten(), g.flatten())) >= np.sqrt(np.dot(df0.flatten(), df0.flatten()))\
                or np.isnan(np.sqrt(np.dot(g.flatten(), g.flatten()))) == True\
                or np.isinf(np.sqrt(np.dot(g.flatten(), g.flatten()))) == True:
            x = np.add(x0, (scale * init_step * d0 /
                            np.sqrt(np.dot(d0.flatten(), d0.flatten()))))
            scale *= 0.1
            fx, g = fdf(x)
    else:

        # Scale if attempted step is too large
        if stepsize > big_step:
            d0 = big_step * d0 / np.sqrt(np.dot(d0.flatten(), d0.flatten()))
            info(" @MINIMIZE: Scaled step size", verbosity.debug)

        x = np.add(x0, d0)
        print "step size:", np.sqrt(np.dot(d0.flatten(), d0.flatten()))
        fx, g = fdf(x)

    info(" @MINIMIZE: Started L-BFGS", verbosity.debug)
    info(" @MINIMIZE: Updated gradient", verbosity.debug)

    # Update line direction (xi) and current point (x0)
    xi = np.subtract(x, x0)

    # Build list of previous positions
    if k < m:
        qlist[k] = xi.flatten()
    else:
        qlist = np.roll(qlist, -1, axis=0)
        qlist[m - 1] = xi.flatten()

    # Update current point
    x0 = x

    # Compute difference of gradients
    q = g.flatten()
    dg = np.subtract(g, dg)

    # Build list of previous gradients
    if k < m:
        glist[k] = dg.flatten()
    else:
        glist = np.roll(glist, -1, axis=0)
        glist[m - 1] = dg.flatten()

    # Determine bounds for L-BFGS 'two loop recursion'
    if k < (m - 1):
        bound1 = k
        bound2 = k + 1
    else:
        bound1 = m - 1
        bound2 = m

    # Begin two loop recursion:
    # First loop
    for j in range(bound1, -1, -1):
        rho[j] = 1.0 / np.dot(glist[j], qlist[j])
        alpha[j] = rho[j] * np.dot(qlist[j], q)
        q = q - alpha[j] * glist[j]

    info(" @MINIMIZE: First L-BFGS loop recursion completed", verbosity.debug)

    # Two possiblities for scaling: using first or most recent
    # members of the gradient and position lists
    hk = np.dot(glist[bound1], qlist[bound1]) / np.dot(glist[bound1],
                                                       glist[bound1])
    #hk = np.dot(glist[0], qlist[0]) / np.dot(glist[0], glist[0])
    xi = hk * q

    # Second loop
    for j in range(0, bound2, 1):
        beta[j] = rho[j] * np.dot(glist[j], xi)
        xi = xi + qlist[j] * (alpha[j] - beta[j])

    # Update direction xi
    xi = -xi.reshape(d0.shape)

    info(" @MINIMIZE: Second L-BFGS loop recursion completed", verbosity.debug)
    info(" @MINIMIZE: Updated search direction", verbosity.debug)

    return (x, fx, xi, qlist, glist)
Exemplo n.º 44
0
def boundary_info(p,bars):
    """
    If Bflag = 1, it returns a 1D array of nodes ('boundary_nodes') and
    a 2D array of bars ('boundary').
    The first element in 'boundary_nodes' identifies the node (by its
    number) that has the minimum x-coordinate. If there are several nodes
    in the boundary with minimum x, it will choose the node from this set
    the node with minimum y-coordinate. This will be the 'reference node'
    of the set of nodes on the boundary, easy to find out visually in the
    boundary plot. Following this reference node the elements in the
    'boundary_nodes' array appear in the same order as they appear on
    the boundary plot as one follows the perimeter of the boundary in a
    ccw direction, defined as the direction in which we see all the nodes
    inside the mesh at our left.
    Likewise, the first element in the 2D array 'boundary' defines the
    first bar in the boundary (using a pair of nodes). The order of the
    bars follows the same order used by the 'boundary_nodes' array.

    If Bflag=2, it returns two 1D arrays of nodes ('ext_bound_nodes' and
    'int_bound_nodes') and two 2D arrays of bars ('ext_bound' and
    'int_bound'). The first set, 'ext_bound_nodes' and 'ext_bound' refers
    to the external boundary and the second set, 'int_bound_nodes' and
    'int_bound' refers to the internal boundary. In both cases the order
    of the nodes and bars is such that as we go along the boundary we
    see the internal nodes of the mesh towards our left ('ccw' convention)
    """
    bars = bars.tolist()
    
    Bflag = 1  # boundary flag default, indicating only 1 boundary
    bound = [bars[0]]
    del bars[0]

    while bound[0][0] != bound[-1][1]:
        for bar in bars:
            if bar[0] == bound[-1][1]:
                to_remove = bar
                b = [bar[0],bar[1]]       
                bound.append(b)
                break
            if bar[1] == bound[-1][1]:
                to_remove = bar
                b = [bar[1],bar[0]]
                bound.append(b)
                break
        bars.remove(to_remove)

    if len(bars) > 0:
        Bflag = 2  # two boundaries, nodes between boundaries(external flow)
        next_bound = [bars[0]]
        del bars[0]

        while next_bound[0][0] != next_bound[-1][1]:
            for bar in bars:
                if bar[0] == next_bound[-1][1]:
                    to_remove = bar
                    b = [bar[0],bar[1]]       
                    next_bound.append(b)
                    break
                if bar[1] == next_bound[-1][1]:
                    to_remove = bar
                    b = [bar[1],bar[0]]
                    next_bound.append(b)
                    break
            bars.remove(to_remove)
    if len(bars) > 0:
        print ('Error: there are more than 2 boundaries')
        print ('       number of bars left out = %4d' % len(bars))

    # ---------------------------------------------------------------------
    # If there is more than one boundary find out which one is external and
    # which one is internal
    # ---------------------------------------------------------------------
    p = np.asarray(p)
    if Bflag == 1:
        bound = np.asarray(bound)
    if Bflag == 2:
        bound = np.asarray(bound)
        next_bound = np.asarray(next_bound)

    # find the node(s) with minimum x-coordinate in bound
    # find the node(s) with minimum x-coordinate in next_bound
    # the one with the minimum x-coord is the external boundary

    # since the nodes in bound appear twice each (once to the left of a bar,
    # the second time to the right of a bar) all the nodes are represented
    # either in p[bound[:,0]] or p[bound[:,1]]


    px_min_bound,py_min_bound = np.amin(p[bound[:,0]],axis=0)
    # px_min_bound = -3.0; py_min_bound = -1.0

    if Bflag == 1:
        boundary = np.copy(bound)
        px_min_boundary = np.copy(px_min_bound)

    if Bflag == 2:
        px_min_next_bound, py_min_next_bound =  \
                           np.amin(p[next_bound[:,0]],axis=0)
        if px_min_bound < px_min_next_bound:
            ext_bound = np.copy(bound)
            int_bound = np.copy(next_bound)
            px_min_ext = px_min_bound
            px_min_int = px_min_next_bound
        else:
            ext_bound = np.copy(next_bound)
            int_bound = np.copy(bound)
            px_min_ext = np.copy(px_min_next_bound)
            px_min_int = np.copy(px_min_bound)

    # -------------------------------------------------------------
    # put the nodes making the boundary in a separate array(s)
    # -------------------------------------------------------------
    if Bflag == 1:
        bpoints = np.reshape(boundary[0::2],2*len(boundary[0::2]))
        if len(boundary) % 2 == 0:   # even
            boundary_nodes = bpoints
        else:
            boundary_nodes = bpoints[0:-1]
    if Bflag == 2:
        bpoints = np.reshape(ext_bound[0::2],2*len(ext_bound[0::2]))
        if len(ext_bound) % 2 == 0:  # even
            ext_bound_nodes = bpoints
        else:
            ext_bound_nodes = bpoints[0:-1]
        bpoints = np.reshape(int_bound[0::2],2*len(int_bound[0::2]))
        if len(int_bound) % 2 == 0:   # even
            int_bound_nodes = bpoints
        else:
            int_bound_nodes = bpoints[0:-1]

    # Note ext_bound_nodes and int_bound_nodes contain the indexes
    # of the nodes on the boundary. To find their coordinates, do
    #   p[ext_bound_nodes] and p[int_bound_nodes]

    # -------------------------------------------------------------
    # Find the reference node (index) for each array of boundary nodes
    # -------------------------------------------------------------
    eps = 0.0001
    if Bflag == 1:
        ind_min_x = np.where(p[boundary_nodes,0] < px_min_boundary + eps)
        nn = np.argmin(p[boundary_nodes[ind_min_x],1])
        ref_ind = ind_min_x[0][nn] 

    if Bflag == 2:
        ind_min_x = np.where(p[ext_bound_nodes,0] < px_min_ext + eps)
        nn = np.argmin(p[ext_bound_nodes[ind_min_x],1])
        ext_ref_ind = ind_min_x[0][nn]
        
        ind_min_x = np.where(p[int_bound_nodes,0] < px_min_int + eps)
        nn = np.argmin(p[int_bound_nodes[ind_min_x],1])
        int_ref_ind = ind_min_x[0][nn]
        
    # --------------------------------------------------------------
    #  roll the boundaries so the ref point is the first point
    #---------------------------------------------------------------
    if Bflag == 1:
        boundary = np.roll(boundary, len(boundary) - ref_ind, axis=0)
        boundary_nodes = np.roll(boundary_nodes, len(boundary_nodes) - ref_ind)
    if Bflag == 2:
        ext_bound = np.roll(ext_bound, len(ext_bound) - ext_ref_ind, axis=0)
        int_bound = np.roll(int_bound, len(int_bound) - int_ref_ind, axis=0)
        ext_bound_nodes = np.roll(ext_bound_nodes,len(ext_bound_nodes) - ext_ref_ind)
        int_bound_nodes = np.roll(int_bound_nodes,len(int_bound_nodes) - int_ref_ind)
        
    # --------------------------------------------------------------
    #  Order the boundary(s) such that as we travel along the boundary(s)
    #  we always see the nodes of the grid to our left ('ccw')
    #  Test of ccw:
    #  For the boundary (Bflag=1) or ext_bound Bflag=2), as we traverse the
    #  periphery of the boundary increasing the index of the node, the total
    #  area enclosed in a complete turn must be negative
    #  For the int_bound the total area enclosed in a complete turn along the
    #  boundary must be positive

    # to change from ccw to cw (or viceversa):
    # 1) swap the columns of the array
    # 2) reverse the order of the elements in the array: 1st becomes last,
    #    2nd becomes next to last, and so on
    # --------------------------------------------------------------
    def kintegrate(y,x):
        area = 0.0
        for i in range(len(x)-1):
            dx = x[i+1] - x[i]
            h = 0.5*(y[i+1] + y[i])
            area += h*dx
        return area

    if Bflag == 1:
        y = np.concatenate((p[boundary_nodes,1],[p[boundary_nodes[0],1]]))
        x = np.concatenate((p[boundary_nodes,0],[p[boundary_nodes[0],0]]))
        area = kintegrate(y,x)
        if area > 0:
            # invert the order
            boundary_nodes = boundary_nodes[::-1]
            boundary_nodes = np.roll(boundary_nodes,1)
            boundary[:,[0,1]] = boundary[:,[1,0]]
            boundary = boundary[::-1]
        return boundary_nodes, boundary
    if Bflag == 2:
        y_ext = np.concatenate((p[ext_bound_nodes,1],[p[ext_bound_nodes[0],1]]))
        x_ext = np.concatenate((p[ext_bound_nodes,0],[p[ext_bound_nodes[0],0]]))
        y_int = np.concatenate((p[int_bound_nodes,1],[p[int_bound_nodes[0],1]]))
        x_int = np.concatenate((p[int_bound_nodes,0],[p[int_bound_nodes[0],0]]))
        area_ext = kintegrate(y_ext,x_ext)
        area_int = kintegrate(y_int,x_int)
        if area_ext > 0:
            # invert the order
            ext_bound_nodes = ext_bound_nodes[::-1]
            ext_bound_nodes = np.roll(ext_bound_nodes,1)
            ext_bound[:,[0,1]] = ext_bound[:,[1,0]]
            ext_bound = ext_bound[::-1]
            
        if area_int < 0:
            # invert the order
            int_bound_nodes = int_bound_nodes[::-1]
            int_bound_nodes = np.roll(int_bound_nodes,1)
            int_bound[:,[0,1]] = int_bound[:,[1,0]]
            int_bound = int_bound[::-1]
        return ext_bound_nodes,ext_bound,int_bound_nodes,int_bound
Exemplo n.º 45
0
    def _pad_signal(self, data, pad_len, mode='symm', center=False):
        '''
        inputs:
        -------
        - data: rank 3 ndarray with shape (n_data, n_channels, data_len)
        - pad_len: int type, length after padding
        - mode: string type either symm, per, zero.
        - center: bool type indicating whether to bring the padded signal to the center

        outputs:
        --------
        - data: ndarray type padded data shaped (n_data, n_channels, pad_len)
        '''
        n_data = data.shape[0]
        n_channels = data.shape[1]
        data_len = data.shape[2]
        has_imag = np.linalg.norm(np.imag(data)) > 0 # bool type that checks if any of the elements has imaginary component

        if mode == 'symm':
            idx0 = np.concatenate([np.arange(data_len), np.arange(data_len, 0, -1) - 1], axis=0)
            conjugate0 = np.concatenate([np.zeros(data_len), np.ones(data_len)], axis=0)
        elif mode == 'per' or mode == 'zero':
            idx0 = np.arange(data_len)
            conjugate0 = np.zeros(data_len)
        else:
            raise ValueError('Invalid boundary conditions!')

        if mode != 'zero':
            idx = np.zeros(pad_len)
            conjugate = np.zeros(pad_len)
            idx[:data_len] = np.arange(data_len)
            conjugate[:data_len] = np.zeros(data_len)
            src = np.arange(data_len, data_len + np.floor((pad_len-data_len) / 2), dtype=int) % len(idx0)
            dst = np.arange(data_len, data_len + np.floor((pad_len - data_len) / 2), dtype=int)

            idx[dst] = idx0[src]
            conjugate[dst] = conjugate0[src]
            src = (len(idx0) - np.arange(1, np.ceil((pad_len - data_len) / 2) + 1, dtype=int)) % len(idx0)
            dst = np.arange(pad_len - 1, data_len + np.floor((pad_len - data_len) / 2 ) - 1, -1, dtype=int)
            idx[dst] = idx0[src]
            conjugate[dst] = conjugate0[src]
            # conjugate is shaped (pad_len,)
        else:
            idx = np.arange(data_len)
            conjugate = np.zeros(data_len)
            # conjugate is shaped (data_len,)

        idx = idx.astype(int)
        # idx, idx0, conjugate, conjugate0, src, dst are all rank 1 ndarrays
        data = data[:, :, idx] # data shape: (n_data, n_channels, data_len or pad_len)
        conjugate = conjugate[np.newaxis, np.newaxis, :] # conjugate shape: (1, 1, data_len or pad_len)
        if has_imag:
            data = data - 2j * np.imag(data) * conjugate
        # data is shaped (n_data, data_len or pad_len)

        if mode == 'zero':
            data = np.concatenate([data, np.zeros((n_data, n_channels, pad_len - data_len))], axis=2)

        if center: # if center is nonzero (negative values are allowed, too)
            margin = int(np.floor((pad_len - data_len) / 2))
            data = np.roll(data, margin, axis=2)

        return data
Exemplo n.º 46
0
def L_BFGS(x0, d0, fdf, qlist, glist, fdf0, big_step, tol, itmax, m, scale, k):
    """L-BFGS minimization. Uses approximate line minimizations.
    Does one step.
        Arguments:
            fdf = function and gradient
            fdf0 = initial function and gradient value
            d0 = initial direction for line minimization
            x0 = initial point
            qlist = list of previous positions used for reduced inverse Hessian construction
            glist = list of previous gradients used for reduced inverse Hessian construction
            m = number of corrections to store and use
            k = iteration (MD step) number
            big_step = limit on step length
            tol = convergence tolerance
            itmax = maximum number of allowed iterations
    """

    zeps = 1.0e-10
    n = len(x0.flatten())
    alpha = np.zeros(m)
    beta = np.zeros(m)
    rho = np.zeros(m)

    u0, g0 = fdf0

    # Maximum step size
    linesum = np.dot(x0.flatten(), x0.flatten())
    big_step = big_step * max(np.sqrt(linesum), n)

    # Perform approximate line minimization in direction d0
    x, u, g = min_approx(fdf, x0, fdf0, d0, big_step, tol, itmax)

    # Compute difference of positions (gradients)
    # Build list of previous 'd_positions (d_gradients)'

    d_x = np.subtract(x, x0)
    if k < m:
        qlist[k] = d_x.flatten()
    else:
        qlist_aux = np.roll(qlist, -1, axis=0)
        qlist[:] = qlist_aux
        qlist[m - 1] = d_x.flatten()

    d_g = np.subtract(g, g0)
    if k < m:
        glist[k] = d_g.flatten()
    else:
        glist_aux = np.roll(glist, -1, axis=0)
        glist[:] = glist_aux
        glist[m - 1] = d_g.flatten()

    # Update direction.
    # 1_Determine bounds for L-BFGS 'two loop recursion'
    if k < (m - 1):
        bound1 = k
        bound2 = k + 1
    else:
        bound1 = m - 1
        bound2 = m

    # 2
    q = g.flatten()

    # 3_Loops
    fac = np.dot(d_g.flatten(), d_x.flatten())
    sumdg = np.dot(d_g.flatten(), d_g.flatten())
    sumdx = np.dot(d_x.flatten(), d_x.flatten())

    # Skip update if not 'fac' sufficiently positive
    if fac > np.sqrt(zeps * sumdg * sumdx):

        # Begin two loop recursion:
        # First loop
        for j in range(bound1, -1, -1):
            rho[j] = 1.0 / np.dot(glist[j], qlist[j])
            alpha[j] = rho[j] * np.dot(qlist[j], q)
            q = q - alpha[j] * glist[j]

        info(" @MINIMIZE: First L-BFGS loop recursion completed",
             verbosity.debug)

        if scale == 0:
            hk = 1.0
        elif scale == 1:
            hk = np.dot(glist[0], qlist[0]) / np.dot(glist[0], glist[0])
        elif scale == 2:
            hk = np.dot(glist[bound1], qlist[bound1]) / np.dot(
                glist[bound1], glist[bound1])

        d = hk * q

        # Second loop
        for j in range(0, bound2, 1):
            beta[j] = rho[j] * np.dot(glist[j], d)
            d = d + qlist[j] * (alpha[j] - beta[j])

        info(" @MINIMIZE: Second L-BFGS loop recursion completed",
             verbosity.debug)
        d = -1.0 * d.reshape(d0.shape)

    else:
        info(
            " @MINIMIZE: Skipped direction update; direction * gradient insufficient",
            verbosity.debug)
        #d = d0
        d = -1.0 * d_x

    d0[:] = d
    info(" @MINIMIZE: Updated search direction", verbosity.debug)
Exemplo n.º 47
0
    def process(self):  # 进行数据解析
        while self.current > 129:  # 如果队列中数据多于2包数据
            if self.buf[0] == 170 and self.buf[1] == 85 and self.buf[
                    2] == 1 and self.buf[3] == 54:
                # if self.debug:
                #     print("检测到帧头,功能字是" + str(self.buf[2]))

                quat_w = self.hex2signedint(6) / 10000.0
                quat_x = self.hex2signedint(8) / 10000.0
                quat_y = self.hex2signedint(10) / 10000.0
                quat_z = self.hex2signedint(12) / 10000.0

                # quat_x=(self.buf[9]*256+self.buf[8])/10000.0
                # quat_y=(self.buf[11]*256+self.buf[10])/10000.0
                # quat_z=(self.buf[13]*256+self.buf[12])/10000.0

                gyro_x = self.hex2signedint(14) / 50.0
                gyro_y = self.hex2signedint(16) / 50.0
                gyro_z = self.hex2signedint(18) / 50.0

                acc_x = self.hex2signedint(20) / 4000.0
                acc_y = self.hex2signedint(22) / 4000.0
                acc_z = self.hex2signedint(24) / 4000.0

                header = Header(stamp=rospy.Time.now())
                header.frame_id = 'pandar'
                imu = Imu()
                imu.header = header
                # quat = Rotation.from_euler('ZYX', [heading, pitch, roll], degrees=True).as_quat()
                imu.orientation.w = quat_w
                imu.orientation.x = quat_x
                imu.orientation.y = quat_y
                imu.orientation.z = quat_z

                imu.angular_velocity.x = gyro_x
                imu.angular_velocity.y = gyro_y
                imu.angular_velocity.z = gyro_z

                imu.linear_acceleration.x = acc_x
                imu.linear_acceleration.y = acc_y
                imu.linear_acceleration.z = acc_z
                # print 0
                # str1=str(rospy.Time.now())+','+str1
                pub.publish(imu)

                print quat_w, quat_x, quat_y, quat_z, gyro_x, gyro_y, gyro_z, acc_x, acc_y, acc_z
                # print quat_w,quat_x,quat_y,quat_z

                # datalength = self.buf[3]  # 有效数据长度
                framelength = self.buf[4] + 2  # 帧长度
                # datasum = np.sum(self.buf[0:framelength - 1]) % 256
                # if datasum == self.buf[framelength - 1]:  # 校验通过
                #     self.data_signal = self.buf[4:4 + datalength]
                #     self.data_signal = np.array(
                #         self.data_signal, dtype='uint8')
                #     self.data_signal = self.data_signal.reshape(-1, 2)
                #     self.data_signal = self.data_signal[:, 0] * 256 +   \
                #         self.data_signal[:, 1]
                #     self.newdata = True
                #
                #     if self.debug:
                #         print(self.data_signal)

                self.buf = np.roll(self.buf, -framelength)
                self.current -= framelength
                if self.debug:
                    print("解析到一帧数据")
                # else:  # 校验失败
                #     if self.debug:
                #         print("校验和错误")
                #
                #     if 170 in self.buf[2:self.current]:  # 帧头对,但是校验和错误
                #         temparray = self.buf[2:self.current]
                #         if not isinstance(temparray, list):
                #             temparray = temparray.tolist()
                #         offset = temparray.index(170)
                #
                #         self.buf = np.roll(self.buf, -offset)
                #         self.current -= offset
            # 如果解析不到,舍弃前面的数据,直到data[0] == 170
            elif 170 in self.buf[0:self.current]:
                if self.debug:
                    print("接收到无效数据")

                # temparray = self.buf[0:self.current]
                # if not isinstance(temparray, list):
                #     temparray = temparray.tolist()
                # offset = temparray.index(170)
                for i in range(len(self.buf)):
                    if self.buf[i + 0] == 170 and self.buf[
                            i + 1] == 85 and self.buf[i + 2] == 1 and self.buf[
                                i + 3] == 54:

                        self.buf = np.roll(self.buf, -i)
                        self.current -= i
                        break
Exemplo n.º 48
0
    def _truncate_filter(self, filter, thresh=1e-3):
        '''
        truncates the given fourier transform of the filter. this filter will have values that are high in
        only a small region. in this case, one can make a similar filter which is a hard-thresholded version
        of the given filter. this can siginificantly speedup the computation as the convolution in the fourier
        domain is a multiplication and so only the nonzero values have to be considered.
        
        first, the smallest region that contains all the values that are above a certain threshold is identified.
        this region is extended slightly so that the region length is N/2^m where N is the length of the given
        filter and m is some integer. during this process, the adjusted region's start and end point might be 
        beyond the given filter. In this case, the filter is wrapped around (periodically extend the given filter
        and then take the adjusted region. 
        FIXME: what is the basis of the idea of wrapping around?
        
        in order to use this filter and later reconstruct to the correct original size, keys named
        'start' and 'filter_len' are stored in the output dictionary

        inputs:
        -------
        - filter: rank 1 ndarray which is the fourier representation of the filter
        - thresh: threshold relative to the maximum value of the given filter's absolute values in fourier domain, between 0 and 1

        outputs:
        --------
        - filter_truncated: dict type object containing the following keys:
        coef: the truncated filter
        start: starting index of the fourier domain support
        filter_len: original length of the filter
        '''
        # print(filter)
        filter_len = len(filter)
        
        filter_truncated = {'filter_len':filter_len}

        # FIXME: for consistency the max() function in matlab is implemented below.
        # after running tests, consider replacing with idx_max = np.argmax(filter)
        maxabs = np.abs(filter).max()
        maxangle = np.angle(filter[np.abs(filter) == maxabs]).max()
        idx_max = np.where(np.logical_and(np.abs(filter) == maxabs, np.angle(filter) == maxangle))[0][0]

        filter = np.roll(filter, int(filter_len / 2) - (idx_max + 1))
        # np.where()'s return type is a tuple and so access the 0th index
        idx = np.where(np.abs(filter) > (np.abs(filter).max() * thresh))[0] 
        
        idx1 = idx[0]
        idx2 = idx[-1]

        nonzero_len = idx2 - idx1 + 1
        nonzero_len = int(np.round(filter_len / 2**(np.floor(np.log2(filter_len / nonzero_len)))))

        # before np.round(), add small amount since in np.round(), halfway values are 
        # rounded to the nearest even value, i.e., np.round(2.5) gives 2.0, NOT 3
        # if the amount is too small (1e-17, for example), does not work
        idx1 = int(np.round(np.round((idx1 + idx2) / 2 + 1e-6) - nonzero_len / 2 + 1e-6))
        idx2 = idx1 + int(nonzero_len) - 1
        
        filter = filter[np.arange(idx1, idx2 + 1) % filter_len]

        filter_truncated['coef'] = filter
        filter_truncated['start'] = int(idx1 - (filter_len / 2 - idx_max) + 1)

        return filter_truncated
def t3_preprocess_data(train, test, structures, contributions, mulliken_charges):

    train = pd.merge(train, contributions, how='left',
                    left_on=['molecule_name', 'atom_index_0', 'atom_index_1', 'type'],
                    right_on=['molecule_name', 'atom_index_0', 'atom_index_1', 'type'])

    # train = pd.merge(train, potential_energy, how='left',
    #                 left_on=['molecule_name'],
    #                 right_on=['molecule_name'])

    train = pd.merge(train, mulliken_charges, how='left',
                    left_on=['molecule_name', 'atom_index_0'],
                    right_on=['molecule_name', 'atom_index'])
    train.drop('atom_index', axis=1, inplace=True)
    # train.rename(inplace=True, columns={'mulliken_charge': 'mulliken_charge_0'})

    # train = pd.merge(train, mulliken_charges, how='left',
    #                 left_on=['molecule_name', 'atom_index_1'],
    #                 right_on=['molecule_name', 'atom_index'])
    # train.drop('atom_index', axis=1, inplace=True)
    # train.rename(inplace=True, columns={'mulliken_charge': 'mulliken_charge_1'})

    # electronegativity and atomic_radius
    # https://www.kaggle.com/vaishvik25/1-r-3-hyperpar-tuning

    # from tqdm import tqdm_notebook as tqdm
    atomic_radius = {'H': 0.38, 'C': 0.77, 'N': 0.75, 'O': 0.73, 'F': 0.71}  # Without fudge factor

    fudge_factor = 0.05
    atomic_radius = {k: v + fudge_factor for k, v in atomic_radius.items()}
    # print(atomic_radius)

    electronegativity = {'H': 2.2, 'C': 2.55, 'N': 3.04, 'O': 3.44, 'F': 3.98}

    # structures = pd.read_csv(structures, dtype={'atom_index':np.int8})

    atoms = structures['atom'].values
    atoms_en = [electronegativity[x] for x in atoms]
    atoms_rad = [atomic_radius[x] for x in atoms]

    structures['EN'] = atoms_en
    structures['rad'] = atoms_rad

    # print(structures.head())

    i_atom = structures['atom_index'].values
    p = structures[['x', 'y', 'z']].values
    p_compare = p
    m = structures['molecule_name'].values
    m_compare = m
    r = structures['rad'].values
    r_compare = r

    source_row = np.arange(len(structures))
    max_atoms = 28

    bonds = np.zeros((len(structures) + 1, max_atoms + 1), dtype=np.int8)
    bond_dists = np.zeros((len(structures) + 1, max_atoms + 1), dtype=np.float32)

    print('Calculating bonds')

    for i in range(max_atoms - 1):
        p_compare = np.roll(p_compare, -1, axis=0)
        m_compare = np.roll(m_compare, -1, axis=0)
        r_compare = np.roll(r_compare, -1, axis=0)

        mask = np.where(m == m_compare, 1, 0)  # Are we still comparing atoms in the same molecule?
        dists = np.linalg.norm(p - p_compare, axis=1) * mask
        r_bond = r + r_compare

        bond = np.where(np.logical_and(dists > 0.0001, dists < r_bond), 1, 0)

        source_row = source_row
        target_row = source_row + i + 1  # Note: Will be out of bounds of bonds array for some values of i
        target_row = np.where(np.logical_or(target_row > len(structures), mask == 0), len(structures),
                              target_row)  # If invalid target, write to dummy row

        source_atom = i_atom
        target_atom = i_atom + i + 1  # Note: Will be out of bounds of bonds array for some values of i
        target_atom = np.where(np.logical_or(target_atom > max_atoms, mask == 0), max_atoms,
                               target_atom)  # If invalid target, write to dummy col

        bonds[(source_row, target_atom)] = bond
        bonds[(target_row, source_atom)] = bond
        bond_dists[(source_row, target_atom)] = dists
        bond_dists[(target_row, source_atom)] = dists

    bonds = np.delete(bonds, axis=0, obj=-1)  # Delete dummy row
    bonds = np.delete(bonds, axis=1, obj=-1)  # Delete dummy col
    bond_dists = np.delete(bond_dists, axis=0, obj=-1)  # Delete dummy row
    bond_dists = np.delete(bond_dists, axis=1, obj=-1)  # Delete dummy col

    print('Counting and condensing bonds')

    bonds_numeric = [[i for i, x in enumerate(row) if x] for row in bonds]
    bond_lengths = [[dist for i, dist in enumerate(row) if i in bonds_numeric[j]] for j, row in
                    enumerate(bond_dists)]
    bond_lengths_mean = [np.mean(x) for x in bond_lengths]
    bond_lengths_median = [np.median(x) for x in bond_lengths]
    bond_lengths_std = [np.std(x) for x in bond_lengths]
    n_bonds = [len(x) for x in bonds_numeric]

    # bond_data = {'bond_' + str(i):col for i, col in enumerate(np.transpose(bonds))}
    # bond_data.update({'bonds_numeric':bonds_numeric, 'n_bonds':n_bonds})

    bond_data = {'n_bonds': n_bonds, 'bond_lengths_mean': bond_lengths_mean,
                 'bond_lengths_std': bond_lengths_std, 'bond_lengths_median': bond_lengths_median}
    bond_df = pd.DataFrame(bond_data)
    structures = structures.join(bond_df)
    # print(structures.head(20))


    train = map_atom_info(train, 0, structures)
    train = map_atom_info(train, 1, structures)

    test = map_atom_info(test, 0, structures)
    test = map_atom_info(test, 1, structures)

    train_p_0 = train[['x_0', 'y_0', 'z_0']].values
    train_p_1 = train[['x_1', 'y_1', 'z_1']].values
    test_p_0 = test[['x_0', 'y_0', 'z_0']].values
    test_p_1 = test[['x_1', 'y_1', 'z_1']].values

    train['dist'] = np.linalg.norm(train_p_0 - train_p_1, axis=1)
    test['dist'] = np.linalg.norm(test_p_0 - test_p_1, axis=1)
    train['dist'] = 1 / (train['dist'] ** 3) # https://www.kaggle.com/vaishvik25/1-r-3-hyperpar-tuning
    test['dist'] = 1 / (test['dist'] ** 3)
    train['dist_x'] = (train['x_0'] - train['x_1']) ** 2
    test['dist_x'] = (test['x_0'] - test['x_1']) ** 2
    train['dist_y'] = (train['y_0'] - train['y_1']) ** 2
    test['dist_y'] = (test['y_0'] - test['y_1']) ** 2
    train['dist_z'] = (train['z_0'] - train['z_1']) ** 2
    test['dist_z'] = (test['z_0'] - test['z_1']) ** 2

    train['type_0'] = train['type'].apply(lambda x: x[0])
    test['type_0'] = test['type'].apply(lambda x: x[0])

    return train, test, structures
Exemplo n.º 50
0
    def _conv_sub_1d(self, data, filter, ds):
        '''
        performs 1d convolution followed by downsampling in real space, given data and filters in frequency domain.
        This corresponds to multiplication followed by 
        periodization (when downsampling) or zeropadding (when upsampling). the returned signal is in real space.
        
        inputs:
        -------
        - data: ndarray with shape (n_data, n_channels, data_len). data to be convolved given in the frequency domain
        - filter: dict or ndarray type given in the frequency domain.
        if filter_format is fourier_multires:
        filter is dict type with the following keys: 'coef', 'filter_len'  
        filter['coef'] is assumed to be a rank 1 list of filters where each filter is rank 1
        From the data length and filter['filter_len'], the ndarray with the same size with that of the data is found and convolved.
        This dict type object filter is an output of _periodize_filter().

        if filter_format is fourier_truncated
        filter is dict type with the following keys: 'coef', 'filter_len', 'start'
        filter['coef'] is assumed to be a rank 1 ndarray
        
        - ds: downsampling factor exponent when represented in power of 2

        outputs:
        --------
        - y_ds: convolved signal in real space followed by downsampling having shape (n_data, n_channels, data_output_len)
        
        FIXME: understand the idea of wrapping around, and other questions in the comments
        '''

        n_data = data.shape[0]
        n_channels = data.shape[1]
        data_len = data.shape[2]

        filter_format = self._filter_format
        if isinstance(filter, dict):
            if filter_format == 'fourier_multires':
                # _periodize_filter() generates a set of filters whose lengths are filter_len/2^0, filter_len/2^1, filter_len/2^2, ...
                # these filters are grouped into a rank 1 list. therefore, given the original size of the filter filter_len,
                # and the length of the data, the length of the filter can be determined which can be found from the list
                coef = filter['coef'][int(np.round(np.log2(filter['filter_len'] / data_len)))]
                # make coef into rank 3 ndarray sized (1, 1, filter_len) for broadcasting
                coef = coef[np.newaxis, np.newaxis, :] 
                yf = data * coef
            elif filter_format == 'fourier_truncated':
                # in this case, filter['coef'] is an ndarray
                start = filter['start']
                coef = filter['coef']
                n_coef = len(coef)
                if n_coef > data_len:
                    # if filter is larger than the given data, create a lowpass filter and periodize it
                    # FIXME: what is the basis of this idea?
                    start0 = start % filter['filter_len']
                    if (start0 + n_coef) <= filter['filter_len']:
                        rng = np.arange(start0, n_coef - 1).astype(int)
                    else:
                        rng = np.concatenate([np.arange(start0, filter['filter_len']), np.arange(n_coef + start0 - filter['filter_len'])], axis=0).astype(int)

                    lowpass = np.zeros(n_coef)
                    lowpass[rng < int(data_len / 2)] = 1
                    lowpass[rng == int(data_len / 2)] = 1/2
                    lowpass[rng == int(filter['filter_len'] - data_len / 2)] = 1/2
                    lowpass[rng > int(filter['filter_len'] - data_len / 2)] = 1

                    # filter and periodize
                    coef = np.reshape(coef * lowpass, [int(n_coef / data_len), data_len]).sum(axis=0)
                # coef is rank 1
                n_coef = len(coef)
                coef = coef[np.newaxis, np.newaxis, :]
                j = int(np.round(np.log2(n_coef / data_len)))
                start = start % data_len

                if start + n_coef <= data_len:
                    # filter support contained in one period, no wrap-around
                    yf = data[:, :, start:n_coef+start] * coef
                else:
                    # filter support wraps around, extract both parts
                    yf = np.concatenate([data[:, :, start:], data[:, :, :n_coef + start - data_len]], axis=2) * coef

        else:
            # filter is ndarray type. perform fourier transform.
            # filter_j is a fraction taken from filter to match length with data_len.
            # if data_len is [10,11,12,13,14,15] and filter being range(100), 
            # filter_j would be [0, 1, 2, (3 + 98)/2, 99, 100].
            # REVIEW: figure out why the shifting is done before multiplying. 
            # Perhaps related to fftshift?
            # take only part of filter and shift it 
            filter_j = np.concatenate([filter[:int(data_len/2)],
                [filter[int(data_len / 2)] / 2 + filter[int(-data_len / 2)] / 2],
                filter[int(-data_len / 2 + 1):]], axis=0)
            filter_j = filter_j[np.newaxis, np.newaxis, :] # shaped (1, 1, data_len)
            yf = data * filter_j
        
        # calculate the downsampling factor with respect to yf
        dsj = ds + np.round(np.log2(yf.shape[2] / data_len))
        assert(float(dsj).is_integer()), "dsj should be an integer"

        if dsj > 0:
            # downsample (periodize in Fourier)
            # REVIEW: don't understand why reshape and sum things. Why is this downsampling in real space? (review 6.003 notes)
            # I tested and see that this is correct. try running the following in a notebook:
            # a = np.sin(np.linspace(0,100,10000)) + np.sin(np.linspace(0,300,10000)); af = np.fft.fft(a[np.newaxis, :]); af2 = np.reshape(af, [4,2500]).sum(axis=0); a2 = np.fft.ifft(af2)
            # fig1,ax1 = plt.subplots(); fig2,ax2 = plt.subplots(); ax1.plot(a); ax2.plot(np.real(a2))
            yf_ds = np.reshape(yf, [n_data, n_channels, int(2**dsj), int(np.round(yf.shape[2]/2**dsj))]).sum(axis=2)
        elif dsj < 0:
            # upsample (zero-pad in Fourier)
            # note that this only happens for fourier_truncated filters, since otherwise
            # filter sizes are always the same as the signal size
            # also, we have to do one-sided padding since otherwise we might break 
            # continuity of Fourier transform
            yf_ds = np.concatenate(yf, np.zeros(n_data, n_channels, (2**(-dsj)-1)*yf.shape[1]), axis=2)
        else:
            yf_ds = yf
        if isinstance(filter, dict):
            if filter_format == 'fourier_truncated':
                # result has been shifted in frequency so that the zero frequency is actually at -filter.start+1

                # always recenter if fourier_truncated
                yf_ds = np.roll(yf_ds, filter['start'], axis=2)

        y_ds = np.fft.ifft(yf_ds, axis=2) / 2**(ds/2)
        # the 2**(ds/2) factor seems like normalization

        return y_ds
Exemplo n.º 51
0
    async def process(self, json: JSONType) -> JSONType:
        total_rebuffer = float(json['rebuffer']) / M_IN_K

        # compute data
        rebuffer_time = total_rebuffer - self.last_rebuffer_time
        last_quality = self.last_quality
        last_bit_rate = self.last_bit_rate

        reward = (get_video_bit_rate(self.video, last_quality) / M_IN_K 
                - REBUF_PENALTY * rebuffer_time / M_IN_K 
                - SMOOTH_PENALTY * np.abs(get_video_bit_rate(self.video, last_quality) 
                                          - last_bit_rate) / M_IN_K)

        # retrieve previous state
        if len(self.s_batch) == 0:
            state = [np.zeros((S_INFO, S_LEN))]
        else:
            state = np.array(self.s_batch[-1], copy=True)

        # compute bandwidth measurement
        bandwidth = max(float(json['bandwidth']), MIN_BW_EST_MBPS * M_IN_K)
        chunk_fetch_time = float(json['last_fetch_time']) 

        # compute number of video chunks left
        video_chunk_remain = get_video_chunks(self.video) - self.video_chunk_count
        self.video_chunk_count += 1

        # dequeue history record
        state = np.roll(state, -1, axis=1)
        next_video_chunk_sizes = []
        for i in range(self.adim):
            next_video_chunk_sizes.append(get_chunk_size(
                self.video, i, self.video_chunk_count
            ))

        total_buffer = float(json['buffer'])
        # this should be S_INFO number of terms
        try:
            state[0, -1] = (
                get_video_bit_rate(self.video, last_quality) / 
                get_max_video_bit_rate(self.video)
            )
            state[1, -1] = total_buffer / M_IN_K / BUFFER_NORM_FACTOR # s
            state[2, -1] = bandwidth / M_IN_K / 8 # k byte / ms
            state[3, -1] = float(chunk_fetch_time) / M_IN_K / BUFFER_NORM_FACTOR # 10 s 
            state[4, :self.adim] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # m byte  
            state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)
            print(state[:, -1])
        except ZeroDivisionError:
            if len(self.s_batch) == 0:
                state = [np.zeros((S_INFO, S_LEN))]
            else:
                state = np.array(self.s_batch[-1], copy=True)

        action_prob = self.actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
        action_cumsum = np.cumsum(action_prob)
        bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
        
        self.s_batch.append(state)
        print(f"[Pensieve] > bit rate {bit_rate}")

        self.last_rebuffer_time = total_rebuffer
        self.last_bit_rate = get_video_bit_rate(self.video, last_quality)
        self.last_quality = bit_rate
        return {
            'decision' : float(bit_rate),
        }
    def process(self, sweep):
        mode = self.sensor_config.mode

        if self.sweep == 0:
            self.x_mm = utils.get_range_depths(self.sensor_config, self.session_info) * 1000
            if mode == Mode.SPARSE:
                self.num_sensors, point_repeats, self.data_len = sweep.shape
                self.hist_env = np.zeros((self.num_sensors, self.data_len, self.image_buffer))
            else:
                self.data_len = sweep.size
                self.num_sensors = 1
                if len(sweep.shape) > 1:
                    self.num_sensors, self.data_len = sweep.shape
                self.hist_env = np.zeros(
                    (self.num_sensors, self.data_len, self.image_buffer)
                )

        sweep_data = sweep.copy()

        env = None
        if mode == Mode.SPARSE:
            env = sweep.mean(axis=1)
        else:
            env = np.abs(sweep_data)

        for s in range(self.num_sensors):
            self.hist_env[s, :, :] = np.roll(self.hist_env[s, :, :], 1, axis=1)
            self.hist_env[s, :, 0] = env[s, :]

        plot_data = {
            "sweep_data": sweep_data,
            "env_ampl": env,
            "hist_env": self.hist_env,
            "sensor_config": self.sensor_config,
            "x_mm": self.x_mm,
            "sweep": self.sweep,
            "num_sensors": self.num_sensors,
            "ml_plotting": True,
            "ml_frame_data": None,
            "prediction": None,
            "prediction_hist": None,
            "session_info": self.session_info,
        }

        plot_data["ml_frame_data"] = self.feature_process.feature_extraction(plot_data)

        feature_map = plot_data["ml_frame_data"]["current_frame"]["feature_map"]
        complete = plot_data["ml_frame_data"]["current_frame"]["frame_complete"]
        if complete and self.evaluate and feature_map is not None:
            if plot_data["ml_frame_data"]["frame_info"].get("time_series", 1) > 1:
                feature_map = ml_helper.convert_time_series(
                    plot_data["ml_frame_data"]["current_frame"]["feature_map"],
                    plot_data["ml_frame_data"]["frame_info"]
                )
            try:
                plot_data["prediction"] = self.evaluate(feature_map)
            except Exception as e:
                print(e)
                plot_data["ml_frame_data"]["current_frame"]["frame_complete"] = False
                return plot_data

            prediction_label = plot_data["prediction"]["prediction"]
            plot_data["ml_frame_data"]["frame_list"][-1]["label"] = prediction_label

            if self.prediction_hist is None:
                self.prediction_hist = np.zeros((plot_data["prediction"]["number_labels"],
                                                 self.hist_len))
            predictions = plot_data["prediction"]["label_predictions"]
            self.prediction_hist = np.roll(self.prediction_hist, 1, axis=1)
            for key in predictions:
                pred, idx = predictions[key]
                self.prediction_hist[idx, 0] = pred
            plot_data["prediction_hist"] = self.prediction_hist

        self.sweep += 1

        return plot_data
Exemplo n.º 53
0
def generate(
    c,
    fs,
    r,
    s,
    L,
    beta=None,
    reverberation_time=None,
    nsample=None,
    mtype=mtype.omnidirectional,
    order=-1,
    dim=3,
    orientation=None,
    hp_filter=True,
):
    """Generate room impulse response.

    Parameters
    ----------
    c : float
        Sound velocity in m/s. Usually between 340 and 350.
    fs : float
        Sampling frequency in Hz.
    r : array_like
        1D or 2D array of floats, specifying the :code:`(x, y, z)` coordinates of the receiver(s)
        in m. Must be of shape :code:`(3,)` or :code:`(x, 3)` where :code:`x`
        is the number of receivers.
    s : array_like
        1D array of floats specifying the :code:`(x, y, z)` coordinates of the source in m.
    L : array_like
        1D array of floats specifying the room dimensions :code:`(x, y, z)` in m.
    beta : array_like, optional
        1D array of floats specifying the reflection coefficients

        .. code-block::

            [beta_x1, beta_x2, beta_y1, beta_y2, beta_z1, beta_z2]

        or

        .. code-block::

            [(beta_x1, beta_x2), (beta_y1, beta_y2), (beta_z1, beta_z2)]

        Must be of shape :code:`(6,)` or :code:`(3, 2)`.

        You must define **exactly one** of :attr:`beta` or
        :attr:`reverberation_time`.
    reverberation_time : float, optional
        Reverberation time (T_60) in seconds.

        You must define **exactly one** of :attr:`beta` or
        :attr:`reverberation_time`.
    nsample : int, optional
        number of samples to calculate, default is :code:`T_60 * fs`.
    mtype : mtype, optional
        Microphone type, one of :class:`mtype`.
        Defaults to :class:`mtype.omnidirectional`.
    order : int, optional
        Reflection order, default is :code:`-1`, i.e. maximum order.
    dim : int, optional
        Room dimension (:code:`2` or :code:`3`), default is :code:`3`.
    orientation : array_like, optional
        1D array direction in which the microphones are pointed, specified
        using azimuth and elevation angles (in radians), default is
        :code:`[0, 0]`.
    hp_filter : boolean, optional
        Enable high-pass filter, the high-pass filter is enabled by default.

    Returns
    -------
    h : array_like
        The room impulse response, shaped `(nsample, len(r))`

    Example
    -------

    >>> import rir_generator
    >>> h = rir_generator.generate(
    ...     c=340,
    ...     fs=16000,
    ...     r=[
    ...       [2, 1.5, 2],
    ...       [2, 1.5, 3]
    ...     ],
    ...     s=[2, 3.5, 2],
    ...     L=[5, 4, 6],
    ...     reverberation_time=0.4,
    ...     nsample=4096,
    ...     mtype=rir_generator.mtype.omnidirectional,
    ... )


    """
    r = np.atleast_2d(np.asarray(r, dtype=np.double)).T.copy()
    assert r.shape[0] == 3

    L = np.asarray(L, dtype=np.double)
    assert L.shape == (3,)

    s = np.asarray(s, dtype=np.double)
    assert s.shape == (3,)

    if beta is not None:
        beta = np.asarray(beta, dtype=np.double)
        assert beta.shape == (6,) or beta.shape == (3, 2)
        beta = beta.reshape(3, 2)

    if (r > L[:, None]).any() or (r < 0).any():
        raise ValueError("r is outside the room")

    if (s > L).any() or (s < 0).any():
        raise ValueError("s is outside the room")

    # Make sure orientation is a 2-element array, even if passed a single value
    if orientation is None:
        orientation = np.zeros(2, dtype=np.double)
    orientation = np.atleast_1d(np.asarray(orientation, dtype=np.double))
    if orientation.shape == (1,):
        orientation = np.pad(orientation, (0, 1), "constant")
    assert orientation.shape == (2,)

    assert order >= -1
    assert dim in (2, 3)

    # Volume of room
    V = np.prod(L)
    # Surface area of walls
    A = L[::-1] * np.roll(L[::-1], 1)

    if beta is not None:
        alpha = np.sum(np.sum(1 - beta ** 2, axis=1) * np.sum(A))

        reverberation_time = max(
            24 * np.log(10.0) * V / (c * alpha),
            0.128,
        )

    elif reverberation_time is not None:
        if reverberation_time != 0:
            S = 2 * np.sum(A)

            alpha = 24 * np.log(10.0) * V / (c * S * reverberation_time)

            if alpha > 1:
                raise ValueError(
                    "Error: The reflection coefficients cannot be "
                    "calculated using the current room parameters, "
                    "i.e. room size and reverberation time. Please "
                    "specify the reflection coefficients or change the "
                    "room parameters."
                )

            beta = np.full((3, 2), fill_value=np.sqrt(1 - alpha), dtype=np.double)
        else:
            beta = np.zeros((3, 2), dtype=np.double)
    else:
        raise ValueError(
            "Error: Specify either RT60 (ex: reverberation_time=0.4) or "
            "reflection coefficients (beta=[0.3,0.2,0.5,0.1,0.1,0.1])"
        )

    if nsample is None:
        nsample = int(reverberation_time * fs)

    if dim == 2:
        beta[-1, :] = 0

    numMics = r.shape[1]

    imp = np.zeros((nsample, numMics), dtype=np.double)

    p_imp = rir.ffi.cast("double*", rir.ffi.from_buffer(imp))
    p_r = rir.ffi.cast("double*", rir.ffi.from_buffer(r))
    p_s = rir.ffi.cast("double*", rir.ffi.from_buffer(s))
    p_L = rir.ffi.cast("double*", rir.ffi.from_buffer(L))
    p_beta = rir.ffi.cast("double*", rir.ffi.from_buffer(beta))
    p_orientation = rir.ffi.cast("double*", rir.ffi.from_buffer(orientation))

    rir.lib.computeRIR(
        p_imp,
        float(c),
        float(fs),
        p_r,
        numMics,
        nsample,
        p_s,
        p_L,
        p_beta,
        mtype.value,
        order,
        p_orientation,
        1 if hp_filter else 0,
    )
    return imp
Exemplo n.º 54
0
def find_peaks_noise(win, t, x):
    """
  Find peaks in the data via moving polynomial regression  

  Parameters :
  - win : Float. The time window over which to regress.
  - t : Float array. The time series.
  - x : Float 2D array. The dependent variables.
  Returns :
  - t_peaks : the times of peaks
  - x_peaks : the values of peaks
  - lr : the likelihood ratios

  """
    # Fit a 2nd degree polynomial on a window of like 1 month
    # Actually we want to do all this only on the I data right ?
    # So we're only passing the I data in x
    n = np.shape(t)[0]
    #k = np.shape(x)[1]
    k = 1
    x.shape = (n, k)
    t_peaks = list()
    x_peaks = list()
    ends = np.zeros([n, k])
    lra = np.zeros([n, k])
    sel = np.zeros([n, k])
    l01a = np.zeros([n, 2, k])
    tl = list()
    x0l = list()
    x1l = list()

    for i, ti in enumerate(t):
        start = i
        try:
            end = np.where(
                t > ti + win)[0][0]  # first index that passes the window
        except IndexError:
            # we've reached the last window
            break
        ends[i] = end
        twin, xwin = t[start:end], x[start:end]
        p0, res0, _, _, _ = np.polyfit(twin, xwin, 1, full=True)
        p1, res1, _, _, _ = np.polyfit(twin, xwin, 2, full=True)

        sig0 = np.sqrt(res0 / (end - start))
        sig1 = np.sqrt(res1 / (end - start))

        # need to repeat t so that dimensiosn are ok
        twin_r = np.repeat(twin[:, np.newaxis], k, axis=1)
        l0a = 1 / (np.sqrt(2 * np.pi) * sig0) \
              * np.exp(- (xwin - p0[0] * twin_r - p0[1]) ** 2 / (2 * sig0 ** 2))
        l1a = 1 / (np.sqrt(2 * np.pi) * sig1) \
              * np.exp(- (xwin - p1[0] * twin_r ** 2 - p1[1] * twin_r - p1[2]) ** 2 / (2 * sig1 ** 2))   # likelihood ratio array
        # compute the statistic

        l0 = np.prod(l0a, axis=0)
        l1 = np.prod(l1a, axis=0)
        lr = -2 * np.sum(np.log(l0a) - np.log(l1a), axis=0)
        # with a type 1 error of 1 % : compare to 6.64
        #maxi = np.argmax(xwin, axis=0)
        #tmax = twin_r[maxi]
        #xmax = xwin[maxi]
        # the square poly fits much better and is concave
        #sel_polyfit = np.logical_and(lr > 6.64, p1[0] < 0)
        sel_polyfit = np.logical_and(lr > 100, p1[0] < 0)
        lra[i] = lr
        sel[i] = sel_polyfit
        l01a[i, 0] = l0
        l01a[i, 1] = l1
        tl.append(twin)
        x0l.append(p0[0] * twin + p0[1])
        x1l.append(p1[0] * twin**2 + p1[1] * twin + p1[2])
    lows = np.where(
        np.logical_and(np.logical_not(sel[:-1]),
                       np.roll(sel, shift=-1, axis=0)[:-1]))
    highs = np.where(
        np.logical_and(sel[:-1],
                       np.logical_not(np.roll(sel, shift=-1, axis=0)[:-1])))
    highs = (ends[highs].astype(int), highs[1])
    # problem : has to work if k > 1

    if t[highs[0][0]] < t[lows[0][0]] + win:
        highs = (highs[0][1:], highs[1][1:])
    if t[lows[0][-1]] > t[highs[0][-1]]:
        lows = (lows[0][:-1], lows[1][:-1])

    maxi = np.array([
        l0 + np.argmax(x[l0:h0, l1], axis=0)
        for (l0, h0, l1, h1) in zip(lows[0], highs[0], lows[1], highs[1])
        if (l1 == h1)  #and (t[h0] - t[l0] > win) 
    ])

    t_peaks = t[maxi]
    x_peaks = x[maxi]
    #t_peaks = [ np.append(l, tmax[j]) if selection[j] else l for j, l in enumerate(t_peaks) ]
    #x_peaks = [ np.append(l, xmax[j]) if selection[j] else l for j, l in enumerate(x_peaks) ]
    # pas mal mais il vaut mieux se baser sur des points successifs ayant le fit
    # et prendre que un point là-dessus ?

    return t_peaks, x_peaks, lra, lows, highs, maxi, sel  #, l01a, tl, x0l, x1l
    def generator(self, signal):
        """Generator that yields correlation results each hyperplane of the
signal, for each basis function.

signal: An iterable of hyperplanes of the signal (arrays of shape `shape[1:]`
(or `shape[1:]+(n_fields,)`)), or an iterator over such an iterable.

---
Yield: An hyperplane of the correlation results perpendicular to the slowest
varrying dimension (e.g. YX plane of a ZYX image) + a last dimension of size B
that contains one coefficient per basis function, in the same order as the basis.
"""
        N = len(self.shape)
        if np.iterable(signal):
            it_signal = iter(signal)
        else:
            it_signal = signal
        # if self.n_fields is None:
        #     assert signal.shape == self.shape
        # else:
        #     assert signal.shape == self.shape + (self.n_fields,)
        thickness = len(self.applicability[0])
        halfth = thickness // 2

        #ensures the storage is zero everywhere
        for value in self._res.values():
            value[:] = 0

        #t_hyperplane = 0
        #t_out = 0

        for z in range(self.shape[0] + thickness):
            # Internally, a new hyperplane overwrites the hyperplane that was
            # input thickness-of-the-band planes ago.
            rollingZ = z % thickness

            #t_hp = time.time()
            if z < self.shape[0]:
                # Store the hyperplane in the band
                self._res[(0, ) * N][rollingZ] = next(it_signal)

                # Perform correlation on all the dimensions of the hyperplane,
                # fastest varrying dimension first
                for operation in self.planeops:
                    self._perform_an_hyperplane_correlation(
                        rollingZ, **operation)
            else:
                # if close to the edge, just erase the current hyperplane everywhere
                for value in self._res.values():
                    value[rollingZ] = 0
            #t_hyperplane += time.time() - t_hp
            if z >= halfth and z - halfth < self.shape[0]:
                #t_o = time.time()
                # Prepare output
                if self.n_fields is None:
                    out = np.empty(self.shape[1:] + self.basis.shape[1:],
                                   dtype=self.dtype)
                else:
                    out = np.empty(self.shape[1:] + (self.n_fields, ) +
                                   self.basis.shape[1:],
                                   dtype=self.dtype)
                #roll monomial and applicability to be in phase with the current plane
                rollshift = z + 1
                X = np.ascontiguousarray(np.roll(self.X[0].ravel(), rollshift))
                app = np.ascontiguousarray(
                    np.roll(self.applicability[0].ravel(), rollshift))
                # Perform correlation in the slowest varrying dimension (along the
                # thinckness of the band), in the order of the basis
                for b, index in enumerate(self.basis.T):
                    prior = np.copy(index)
                    prior[0] = 0
                    prior = tuple(prior.tolist())
                    kernel = (app * X**index[0]).reshape(
                        self.X[0].shape).astype(self.dtype)
                    #kernel = (app * X**index[0]).astype(self.dtype)
                    out[..., b] = (self._res[prior] * kernel).sum(axis=0)
                    #out[...,b] = np.dot(np.moveaxis(self._res[prior], 0, -1), kernel)
                #t_out += time.time() - t_o
                yield out
Exemplo n.º 56
0
def neighbors(arr, x, y, n):
    arr = np.roll(np.roll(arr, shift=-x + n // 2, axis=0),
                  shift=-y + n // 2,
                  axis=1)
    return arr[:n, :n]
Exemplo n.º 57
0
def test_play():
    f2 = open(path2, 'a')
    total_reward = 0
    for j in range(3):
        obs4steps = np.zeros((4, 84, 84), dtype=np.float32)
        obs = env.reset()
        obs = cv2.cvtColor(obs, cv2.COLOR_RGB2BGR)
        obs = obs[0:170, 0:160]
        obs = cv2.resize(obs, (obs.shape[1] * 2, obs.shape[0] * 2))
        #顕著性検出,最も顕著性が高い座標を返す
        max_loc = saliency(obs)
        y1 = 0 if max_loc[1] - h < 0 else max_loc[1] - h
        y2 = max_loc[1] + h if max_loc[1] + h < obs.shape[
            0] else obs.shape[0] - 1
        x1 = 0 if max_loc[0] - w < 0 else max_loc[0] - w
        x2 = max_loc[0] + w if max_loc[0] + w < obs.shape[
            1] else obs.shape[1] - 1
        #print y1,y2,x1,x2
        #切り出し
        center = obs[y1:y2, x1:x2]
        #グラデーションぼかし
        obs = blur(x1, y1, x2, y2, 50, 0, obs)
        #print obs.shape
        #print center.shape
        #貼り付け
        obs[y1:y2, x1:x2] = center
        obs = obs[:, :, 0]
        obs = (misc.imresize(obs, (110, 84)))[110 - 84 - 8:110 - 8, :]
        obs4steps[0] = obs
        r = 0
        done = False
        R = 0
        t = 0
        while not done:
            env.render()
            action = agent.act(obs4steps)
            obs, r, done, _ = env.step(action)
            obs = cv2.cvtColor(obs, cv2.COLOR_RGB2BGR)
            obs = obs[0:170, 0:160]
            obs = cv2.resize(obs, (obs.shape[1] * 2, obs.shape[0] * 2))
            #顕著性検出,最も顕著性が高い座標を返す
            max_loc = saliency(obs)
            y1 = 0 if max_loc[1] - h < 0 else max_loc[1] - h
            y2 = max_loc[1] + h if max_loc[1] + h < obs.shape[
                0] else obs.shape[0] - 1
            x1 = 0 if max_loc[0] - w < 0 else max_loc[0] - w
            x2 = max_loc[0] + w if max_loc[0] + w < obs.shape[
                1] else obs.shape[1] - 1
            #切り出し
            center = obs[y1:y2, x1:x2]
            #グラデーションぼかし
            obs = blur(x1, y1, x2, y2, 50, 0, obs)
            #貼り付け
            obs[y1:y2, x1:x2] = center
            obs = obs[:, :, 0]
            obs = (misc.imresize(obs, (110, 84)))[110 - 84 - 8:110 - 8, :]
            obs4steps = np.roll(obs4steps, 1, axis=0)
            obs4steps[0] = obs
            R += r
            t += 1
        print('test play:', j, 'R:', R)
        total_reward += R
    f2.write(str(total_reward / 3) + "\n")
    f2.close()
def prepare_displacement_matrices_homogeneous(A1,
                                              b1,
                                              A2,
                                              b2,
                                              displacement=None):
    """Compute matrices used for displacement estimation as defined by equations
(7.32) and (7.33) in Gunnar Farnebäck's thesis "Polynomial Expansion for
Orientation and Motion Estimation". Here we suppose an homogenous translation.

A1,b1: Local polynomial expension coefficients at time 1. A1 is a N+2
dimensional array, where the first N indices indicates the position in the
signal and the last two contains the matrix for each point. In the same way, b1
is a N+1 dimensional array. Such arrays can be obtained via QuadraticToAbc.

A2,b2: Local polynomial expension coefficients at time 2.

displacement: The global translation vector from time 1 to time 2.

----
Returns

A: Advected average of A1 and A2 matrices (Eq. 7.32)

Delta_b: advected difference of b2 and b1 (Eq. 7.33)
"""
    assert A1.shape == A2.shape
    assert A1.shape[:-1] == b1.shape
    assert A1.shape[-1] == b1.shape[-1]
    assert b1.shape == b2.shape
    shape = A1.shape[:-2]
    # N is the dimensionality of the signal we consider here (it might be
    # an hyperplane of the original signal), not the rank of the matrices and
    # vectors.
    N = len(shape)
    if displacement is None:
        displacement = np.zeros(N, dtype=A1.dtype)
    assert displacement.shape == (N, )
    # Integral part of the backward displacement vector
    displ = -np.rint(displacement).astype(np.int64)
    # Advect back A2 and b2 by rolling
    A = np.roll(A2, displ, axis=tuple(range(N)))
    Delta_b = -0.5 * np.roll(b2, displ, axis=tuple(range(N)))
    #take care of the margins by repeating the last available element of A2 or b2
    for dim, d in enumerate(displ):
        if d >= 0:
            # Use only A1 where A2 is not available
            A[(slice(None, None), ) * dim +
              (slice(0, d), )] = A1[(slice(None, None), ) * dim +
                                    (slice(0, d), )]
            # Use only b1 where b2 is not available (aims for a displacement equal to the homogeneous input)
            Delta_b[(slice(None, None), ) * dim +
                    (slice(0, d), )] = -0.5 * b1[(slice(None, None), ) * dim +
                                                 (slice(0, d), )]
            # Use the last available element of b2
            #Delta_b[(slice(None,None),)*dim + (slice(0,d),)] = -b2[(slice(None,None),)*dim + (slice(0,1),)]
        else:
            # Use only A1 where A2 is not available
            A[(slice(None, None), ) * dim +
              (slice(d, None), )] = A1[(slice(None, None), ) * dim +
                                       (slice(d, None), )]
            # Use only b1 where b2 is not available (aims for a displacement equal to the homogeneous input)
            Delta_b[(slice(None, None), ) * dim +
                    (slice(d, None), )] = -0.5 * b1[
                        (slice(None, None), ) * dim + (slice(d, None), )]
            # Use the last available element of b2
            #Delta_b[(slice(None,None),)*dim + (slice(-d,None),)] = -0.5*b2[(slice(None,None),)*dim + (slice(-1,None),)]
    #Advected average for A1 and A2
    A += A1
    A *= 0.5
    # Advected difference for b1 and b2, to which we add back the forward
    # rounded a priori displacement. Here we have to expand the displacement
    # vector to the same rank as the original signal dimension.
    df = np.zeros(A1.shape[-1], A1.dtype)
    df[-N:] = -displ  #displacement
    Delta_b += 0.5 * b1 + A @ df
    return A, Delta_b
Exemplo n.º 59
0
            y1 = 0 if max_loc[1] - h < 0 else max_loc[1] - h
            y2 = max_loc[1] + h if max_loc[1] + h < obs.shape[
                0] else obs.shape[0] - 1
            x1 = 0 if max_loc[0] - w < 0 else max_loc[0] - w
            x2 = max_loc[0] + w if max_loc[0] + w < obs.shape[
                1] else obs.shape[1] - 1
            #切り出し
            center = obs[y1:y2, x1:x2]
            #グラデーションぼかし
            obs = blur(x1, y1, x2, y2, 30, 0, obs)
            #貼り付け
            obs[y1:y2, x1:x2] = center
            cv2.imwrite(path4 + str(j) + ".png", obs)
            obs = obs[:, :, 0]
            obs = (misc.imresize(obs, (110, 84)))[110 - 84 - 8:110 - 8, :]
            obs4steps = np.roll(obs4steps, 1, axis=0)
            obs4steps[0] = obs
            R += reward
            t += 1
            frame += 1
        print('episode:', i, 'R:', R)
        print('time:', time.time() - start)
        print('frame:', frame)
        agent.stop_episode_and_train(obs4steps, reward, done)
        f.write(str(R) + "\n")
        f.close()
        #if(i % 50 == 0):
        #    agent.save('saliency-MsPacman-4')

    print('Finished')
    #agent.save('saliency-MsPacman-4')
Exemplo n.º 60
0
def update():
    global lines_prog, data, t, frame_num, colors, color_perc_list, \
     show_imgs_for_colors, p, pfft, data_fft, axis_fft, RATE, phist_colors

    # read frame
    img, frame2 = read_frame(0.5)

    # calatulate color percentage
    color_prec_frame = [] # list of color percentages for current frame
    imgs = img # save orginal image
    index = 0
    for color, threshold in colors:
        color_pers_i, img = percent_color_singel(img, color=color, threshold=threshold, disp= str(color))
        color_prec_frame.append(color_pers_i)
        imgs = np.hstack((imgs,img))
        index += 1

    if show_imgs_for_colors ==  1:
       #images_per_row = 2
       #cv2.imshow( "ALL_1", np.hstack(imgs))
       cv2.imshow( "ALL_1", imgs)

    # for x in enumerate(pq_images):
    #     #pimg.image(x)
    #     x.setImage(imgs[i])

        #x.setData(imgs[i])

    # add color from frame the last frames perc list
    color_perc_list = np.roll(color_perc_list, -1, axis=1)
    color_perc_list[:,-1] = color_prec_frame

    # update data for line Progration
    for i, x in enumerate(lines_prog):
        #print(color_perc_list[i,:])
        x.setData(color_perc_list[i,:])

#    map(lambda x,y: x.setData(y), lines_prog, color_perc_list.tolist())

    # update RGB color space histogram and set plot data
    for i, x in enumerate(phist):
        histr = cv2.calcHist([frame2], [i], None, [256], [0, 256])
        histr = cv2.normalize(histr,histr)
        #print(np.shape(histr))
        x.setData(np.reshape(histr, np.shape(histr)[0]))

    # update fft and set data for plot
    for i, x in enumerate(pfft):

        # calc fft
        data_fft = color_perc_list[i,:]
        fft_data=FFT_AMP(data_fft)
        axis_pfft=np.fft.fftfreq(len(data_fft), d=1.0/RATE)

        #plot data
        x.setData(x=np.abs(axis_fft), y=fft_data)

    # update lab colorspace histogram and set data for plot
    frame2Lab = cv2.cvtColor(frame2, cv2.COLOR_BGR2LAB)
    for i, x in enumerate(phist_lab):
        histr = cv2.calcHist([frame2Lab], [i], None, [256], [0, 256])
        histr = cv2.normalize(histr,histr)
        #print(np.shape(histr))
        x.setData(np.reshape(histr, np.shape(histr)[0]))


    #  calc frame rate
    if frame_num%10 == 0:
        elapsed = time.time() - t
        print("fps: " + str(10/elapsed))
        t = time.time()

    app.processEvents()  ## force complete redraw for every plot