Esempio n. 1
1
def centroid(xarr, yarr, kern=default_kernal, mask=None, mode="same"):
    """Find the centroid of a line following a similar algorithm as
        the center1d algorithm in IRAF.   xarr and yarr should be an area
        around the desired feature to be centroided.  The default kernal
        is used if the user does not specific one.

        The algorithm solves for the solution to the equation

        ..math:: \int (I-I_0) f(x-x_0) dx = 0

        returns xc
    """
    if len(yarr) < len(kern):
        raise SpectrographError("Array has to be larger than kernal")

    if mask is not None:
        # catch the fact that at the edges it
        if mask.sum() < len(default_kernal):
            warr = np.convolve(yarr, kern, mode=mode)
            xc = np.interp(0, warr[mask], xarr[mask])
            return xc
        else:
            yarr = yarr[mask]
            xarr = xarr[mask]

    # convle the input array with the default kernal
    warr = np.convolve(yarr, kern, mode=mode)

    # interpolate the results
    xc = np.interp(0, warr, xarr)

    return xc
Esempio n. 2
0
def high_pass_filter(x, y, L):
    y_filter = -np.ones(L)/L
    y_filter[L/2] += 1
    new_y = np.convolve(y, y_filter, 'valid') 
    x_filter = np.zeros(L)
    x_filter[L/2] = 1
    return np.convolve(x, x_filter, 'valid'), np.convolve(y, y_filter, 'valid')
Esempio n. 3
0
    def function1D(self, t):
        A  = self.getParamValue(0)
        B  = self.getParamValue(1)
        R  = self.getParamValue(2)
        T0 = self.getParamValue(3)
        Scale = self.getParamValue(4)
        HatWidth  = self.getParamValue(5)
        KConv  = self.getParamValue(6)

        # A/2 Scale factor has been removed to make A and Scale independent
        f_int = Scale*((1-R)*np.power((A*(t-T0)),2)*
                       np.exp(-A*(t-T0))+2*R*A**2*B/np.power((A-B),3) *
                       (np.exp(-B*(t-T0))-np.exp(-A*(t-T0))*(1+(A-B)*(t-T0)+0.5*np.power((A-B),2)*np.power((t-T0),2))))
        f_int[t<T0] = 0

        mid_point_hat = len(f_int)//2
        gc_x = np.array(range(len(f_int))).astype(float)
        ppd = 0.0*gc_x
        lowIDX  = int(np.floor(np.max([mid_point_hat-np.abs(HatWidth),0])))
        highIDX = int(np.ceil(np.min([mid_point_hat+np.abs(HatWidth),len(gc_x)])))

        ppd[lowIDX:highIDX] = 1.0
        ppd = ppd/sum(ppd)

        gc_x = np.array(range(len(f_int))).astype(float)
        gc_x = 2*(gc_x-np.min(gc_x))/(np.max(gc_x)-np.min(gc_x))-1
        gc_f = np.exp(-KConv*np.power(gc_x,2))
        gc_f = gc_f/np.sum(gc_f)

        npad = len(f_int) - 1
        first = npad - npad//2
        f_int = np.convolve(f_int,ppd,'full')[first:first+len(f_int)]
        f_int = np.convolve(f_int,gc_f,'full')[first:first+len(f_int)]

        return f_int
Esempio n. 4
0
def filter(dat, bin_freq, type='exp', window=300, prm=0.2):
    """
        turn psth into firing rate estimate. window size is in ms
    """
    if dat is None:
        return None, None
    window = np.int(window / (1. / bin_freq * 1000))
    r = np.arange(-window, window + 1)
    kern = np.exp(r * -prm)
    kern[: len(kern) / 2] = 0
    kern = kern / kern.sum()
    if len(dat.shape) > 1:
        fr = np.zeros_like(dat, dtype=np.float)
        for d in range(len(dat)):
            fr[d] = np.convolve(dat[d], kern, 'same')
    else:
        fr = np.convolve(dat, kern, 'same')
#    import pylab as plt
#    plt.subplot(311)
#    plt.plot(kern)
#    plt.subplot(312)
#    plt.plot(dat[:5, :50].T)
#    plt.subplot(313)
#    plt.plot(fr[:5,  :50].T)
#    plt.show()

    return fr, len(kern) / 2
Esempio n. 5
0
    def richardson_lucy_deconvolution(self,  psf, iterations=15, 
                                      mask=None):
        """1D Richardson-Lucy Poissonian deconvolution of 
        the spectrum by the given kernel.
    
        Parameters
        ----------
        iterations: int
            Number of iterations of the deconvolution. Note that 
            increasing the value will increase the noise amplification.
        psf: EELSSpectrum
            It must have the same signal dimension as the current 
            spectrum and a spatial dimension of 0 or the same as the 
            current spectrum.
            
        Notes:
        -----
        For details on the algorithm see Gloter, A., A. Douiri, 
        M. Tence, and C. Colliex. “Improving Energy Resolution of 
        EELS Spectra: An Alternative to the Monochromator Solution.” 
        Ultramicroscopy 96, no. 3–4 (September 2003): 385–400.
        
        """
        self._check_signal_dimension_equals_one()
        ds = self.deepcopy()
        ds.data = ds.data.copy()
        ds.mapped_parameters.title += (
            ' after Richardson-Lucy deconvolution %i iterations' % 
                iterations)
        if ds.tmp_parameters.has_item('filename'):
                ds.tmp_parameters.filename += (
                    '_after_R-L_deconvolution_%iiter' % iterations)
        psf_size = psf.axes_manager.signal_axes[0].size
        kernel = psf()
        imax = kernel.argmax()
        j = 0
        maxval = self.axes_manager.navigation_size
        if maxval > 0:
            pbar = progressbar(maxval=maxval)
        for D in self:
            D = D.data.copy()
            if psf.axes_manager.navigation_dimension != 0:
                kernel = psf(axes_manager=self.axes_manager)
                imax = kernel.argmax()

            s = ds(axes_manager=self.axes_manager)
            mimax = psf_size -1 - imax
            O = D.copy()
            for i in xrange(iterations):
                first = np.convolve(kernel, O)[imax: imax + psf_size]
                O = O * (np.convolve(kernel[::-1], 
                         D / first)[mimax: mimax + psf_size])
            s[:] = O
            j += 1
            if maxval > 0:
                pbar.update(j)
        if maxval > 0:
            pbar.finish()
        
        return ds
def noiseFilter(data_in):

	N = int(np.ceil((4 / b)))
	if not N % 2: N += 1  # Make sure that N is odd.
	n = np.arange(N)
	 
	# Compute a low-pass filter with cutoff frequency fH.
	hlpf = np.sinc(2 * fH * (n - (N - 1) / 2.))
	hlpf *= np.blackman(N)
	hlpf = hlpf / np.sum(hlpf)
	 
	# Compute a high-pass filter with cutoff frequency fL.
	hhpf = np.sinc(2 * fL * (n - (N - 1) / 2.))
	hhpf *= np.blackman(N)
	hhpf = hhpf / np.sum(hhpf)
	hhpf = -hhpf
	hhpf[(N - 1) / 2] += 1
	 
	# Convolve both filters.
	h = np.convolve(hlpf, hhpf)
	s = np.convolve(data_in, hlpf)

	fig, ax = plt.subplots()
	ax.plot(data_in)
	plt.show()
	fig1, ax1 = plt.subplots()
	ax1.plot(s)
	plt.show()
	return s
Esempio n. 7
0
def auto_guess(f, data):
    """
    Use the linewidth and the transmission ratio on and off resonance
    to guess the initial Q values.  Estimate the linewidth by
    smoothing then looking for the extrema of the first
    derivative. This may fail if the resonance is very close to the
    edge of the data.
    """
    p = Parameters()
    bw = f.max() - f.min()
    # Allow f_0 to vary by +/- the bandwidth over which we have data
    p.add('f_0', value = f[np.argmin(abs(data))],
          min = f.min() - bw, max = f.max() + bw)
    off = np.mean((np.abs(data[0]), np.abs(data[-1])))
    p.add('A_mag', value = off,
          min = 0, max = 1e6)
    p.add('A_phase', value = np.mean(np.angle(data)),
          min = -np.pi, max = np.pi)
    width = int(f.size / 10)
    gaussian = np.exp(-np.linspace(-4, 4, width)**2)
    gaussian /= np.sum(gaussian) # not necessary
    smoothed = np.convolve(gaussian, abs(data), mode='same')
    derivative = np.convolve(np.array([1, -1]), smoothed, mode='same')
    # Exclude the edges, which are affected by zero padding.
    linewidth = (f[np.argmax(derivative[width:-width])] -
                 f[np.argmin(derivative[width:-width])])
    p.add('Q', value = p['f_0'].value / linewidth,
          min = 1, max = 1e7) # This seems to stop an occasional failure mode.
    p.add('Q_e_real', value = (p['Q'].value /
                               (1 - np.min(np.abs(data)) / off)),
          min = 1, max = 1e6) # As above.
    p.add('Q_e_imag', value = 0, min = -1e6, max = 1e6)
    return p
Esempio n. 8
0
def gauss_filter(dat, bin_freq, window=300, sigma=100):
    """
        turn psth into firing rate estimate. window size is in ms
    """
    if dat is None:
        return None, None
    window = np.int(1. / bin_freq * window)
    sigma = np.int(1. / bin_freq * sigma)
    r = range(-int(window / 2), int(window / 2) + 1)
    gaus = [1 / (sigma * np.sqrt(2 * np.pi)) *
            np.exp(-float(x) ** 2 / (2 * sigma ** 2)) for x in r]
    if len(dat.shape) > 1:
        fr = np.zeros_like(dat, dtype=np.float)
        for d in range(len(dat)):
            fr[d] = np.convolve(dat[d], gaus, 'same')
    else:
        fr = np.convolve(dat, gaus, 'same')
#    import pylab as plt
#    print bin_freq
#    plt.subplot(311)
#    plt.plot(gaus)
#    plt.subplot(312)
#    plt.plot(dat[:5].T)
#    plt.subplot(313)
#    plt.plot(fr[:5].T)
#    plt.show()

    return fr, len(gaus) / 2
Esempio n. 9
0
def runExperiment3():
  """
  Change task at iteration=500, test continuous learning
  :return:
  """
  Nelements = 20
  noiseLevel = 0.0
  encoder = initializeEncoder(Nelements, seed=1)
  cla, sdrClassifier = initializeClassifiers(Nelements, encoder)
  (negLLTrack,
   accuracyTrack) = runSimulation(encoder, cla, sdrClassifier,
                                  noiseLevel, changeTaskAfter=500)

  plt.figure(5)
  negLLTrack = numpy.array(negLLTrack)
  v = numpy.ones((5,)) / 5
  plt.subplot(2, 2, 1)
  plt.plot(numpy.convolve(negLLTrack[:, 1], v, 'valid'))
  plt.plot(numpy.convolve(negLLTrack[:, 0], v, 'valid'))
  plt.ylim([-4, .1])
  plt.ylabel(' Log-Likelihood')
  plt.xlabel(' Iteration ')
  plt.title(' Noise Level: ' + str(noiseLevel))
  plt.legend(['SDR Classifier', 'CLA Classifier'], loc=4)
  if not os.path.exists('results'):
    os.makedirs('results')
  plt.savefig(os.path.join('results', 'LLvsTraining_ChangeAt500.pdf'))
Esempio n. 10
0
def find_strand_shift(gdb, fwd_track, rev_track):
    # use a single chromosome to find shift between strands
    # that gives max covariance.
    if gdb.assembly == "dm3":
        chrom = gdb.get_chromosome("chr2L")
    else:
        chrom = gdb.get_chromosome("chr21")

    sys.stderr.write("retrieving values\n")
    fwd_vals = fwd_track.get_nparray(chrom)
    rev_vals = rev_track.get_nparray(chrom)

    # smooth values using sliding window
    sys.stderr.write("smoothing values\n")
    win = np.ones(SMOOTH_WIN_SIZE)
    fwd_vals = np.convolve(fwd_vals, win, mode="same")
    rev_vals = np.convolve(rev_vals, win, mode="same")

    # only use regions with high density of sites so all of contribution
    # to covariance comes from these
    fwd_cutoff = scipy.stats.scoreatpercentile(fwd_vals, 95)
    rev_cutoff = scipy.stats.scoreatpercentile(rev_vals, 95)
    fwd_vals[fwd_vals < fwd_cutoff] = 0
    rev_vals[rev_vals < rev_cutoff] = 0

    # find the shift that yields the max covariance
    max_cov_shift = find_max_cov(fwd_vals, rev_vals)

    return max_cov_shift
Esempio n. 11
0
 def _jacobian(self, param, y, weights=None):
     if weights is None:
         weights = 1.
     if self.convolved is True:
         counter = 0
         grad = np.zeros(len(self.axis.axis))
         for component in self:  # Cut the parameters list
             if component.active:
                 component.fetch_values_from_array(
                     param[
                         counter:counter +
                         component._nfree_param],
                     onlyfree=True)
                 if component.convolved:
                     for parameter in component.free_parameters:
                         par_grad = np.convolve(
                             parameter.grad(self.convolution_axis),
                             self.low_loss(self.axes_manager),
                             mode="valid")
                         if parameter._twins:
                             for par in parameter._twins:
                                 np.add(par_grad, np.convolve(
                                     par.grad(
                                         self.convolution_axis),
                                     self.low_loss(self.axes_manager),
                                     mode="valid"), par_grad)
                         grad = np.vstack((grad, par_grad))
                 else:
                     for parameter in component.free_parameters:
                         par_grad = parameter.grad(self.axis.axis)
                         if parameter._twins:
                             for par in parameter._twins:
                                 np.add(par_grad, par.grad(
                                     self.axis.axis), par_grad)
                         grad = np.vstack((grad, par_grad))
                 counter += component._nfree_param
         to_return = grad[1:, self.channel_switches] * weights
     else:
         axis = self.axis.axis[self.channel_switches]
         counter = 0
         grad = axis
         for component in self:  # Cut the parameters list
             if component.active:
                 component.fetch_values_from_array(
                     param[
                         counter:counter +
                         component._nfree_param],
                     onlyfree=True)
                 for parameter in component.free_parameters:
                     par_grad = parameter.grad(axis)
                     if parameter._twins:
                         for par in parameter._twins:
                             np.add(par_grad, par.grad(
                                 axis), par_grad)
                     grad = np.vstack((grad, par_grad))
                 counter += component._nfree_param
         to_return = grad[1:, :] * weights
     if self.signal.metadata.Signal.binned is True:
         to_return *= self.signal.axes_manager[-1].scale
     return to_return
Esempio n. 12
0
    def output(self):
        """The natural output for this analyzer is the analytic signal"""
        data = self.input.data
        sampling_rate = self.input.sampling_rate
        
        a_signal =\
    ts.TimeSeries(data=np.zeros(self.freqs.shape+data.shape,
                                        dtype='D'),sampling_rate=sampling_rate)
        if self.freqs.ndim == 0:
            w = self.wavelet(self.freqs,self.sd,
                             sampling_rate=sampling_rate,ns=5,
                                                     normed='area')

            nd = (w.shape[0]-1)/2
            a_signal.data[...] = (np.convolve(data,np.real(w),mode='same') +
                                  1j*np.convolve(data,np.imag(w),mode='same'))
        else:    
            for i,(f,sd) in enumerate(zip(self.freqs,self.sd)):
                w = self.wavelet(f,sd,sampling_rate=sampling_rate,
                                 ns=5,normed='area')

                nd = (w.shape[0]-1)/2
                a_signal.data[i,...] = (np.convolve(data,np.real(w),mode='same')+1j*np.convolve(data,np.imag(w),mode='same'))
                
        return a_signal
Esempio n. 13
0
 def smooth(self, signal, pad=True):
     """
     Returns smoothed signal (or it's n-th derivative).
     
     Parameters
     ---------- 
     y : array_like, shape (N,)
         the values of the time history of the signal.
     pad : bool
        pad first and last values to lessen the end effects.
         
     Returns
     -------
     ys : ndarray, shape (N)
         the smoothed signal (or it's n-th derivative).
     """
     coeff = self._coeff
     n = size(coeff - 1) // 2
     y = np.squeeze(signal)
     if pad:
         first_vals = y[0] - abs(y[n:0:-1] - y[0])
         last_vals = y[-1] + abs(y[-2:-n - 2:-1] - y[-1]) 
         y = concatenate((first_vals, y, last_vals))
         n *= 2 
     d = y.ndim
     if d > 1:
         y1 = y.reshape(y.shape[0], -1)
         res = []
         for i in range(y1.shape[1]):
             res.append(convolve(y1[:, i], coeff)[n:-n])
         res = np.asarray(res).T
     else:
         res = convolve(y, coeff)[n:-n]
     return res
Esempio n. 14
0
 def apply_window_function(cls, input_array, window_size, output_array):
     sum_filter = np.ones((window_size,), dtype=np.float32)/window_size
     squares = np.square(input_array)
     sum_of_squares = np.convolve(squares, sum_filter, mode='valid')
     sums = np.convolve(input_array, sum_filter, mode='valid')
     squares_of_sums = np.square(sums)
     output_array[:] = squares_of_sums/sum_of_squares
Esempio n. 15
0
def find_blinks_using_edge(data, plot = False, **kwargs):
    """Find location of blinks in data"""
    global window_size_
    records = OrderedDict()
    window = np.ones(window_size_)/window_size_
    t, y = data[:,0], data[:,1]
    # Smooth out the vectors.
    yvec = np.convolve(y, window, 'same')
    records['smooth'] = (t, y)
    newY = 0.5*yvec.mean() - yvec
    newY = newY + np.fabs(newY)
    window = np.ones(window_size_)/(window_size_)
    yy = np.convolve(newY, window, 'same')
    blinks = []
    while yy.max() > 10:
        i = np.argmax(yy)
        isBlink, a = get_blink(i, yy)
        if isBlink:
            blinks.append((i, a))

    xvec, yvec = [], []
    for i, x in sorted(blinks):
        xvec.append(t[i])
        yvec.append(x)
    return xvec, yvec
Esempio n. 16
0
def slidingWindowV(P,inner=3,outer=64,maxM=50,minM=7,maxT=59,norm=True):
	""" Enhance the constrast vertically (along frequency dimension)

		Cut off extreme values and demean the image
		Utilize numpy convolve to get the mean at a given pixel
		Remove local mean with inner exclusion region

		Args:
			P: 2-d numpy array image
			inner: inner exclusion region 
			outer: length of the window
			maxM: size of the output image in the y-dimension
			norm: boolean to cut off extreme values

		Returns:
			Q: 2-d numpy contrast enhanced vertically
	"""
	Q = P.copy()
	m, n = Q.shape
		
	if norm:
		mval, sval = np.mean(Q[minM:maxM,:maxT]), np.std(Q[minM:maxM,:maxT])
		fact_ = 1.5
		Q[Q > mval + fact_*sval] = mval + fact_*sval
		Q[Q < mval - fact_*sval] = mval - fact_*sval
		Q[:minM,:] = mval
	wInner = np.ones(inner)
	wOuter = np.ones(outer)
	for i in range(maxT):
		Q[:,i] = Q[:,i] - (np.convolve(Q[:,i],wOuter,'same') - np.convolve(Q[:,i],wInner,'same'))/(outer - inner)
	Q[Q < 0] = 0.

	return Q[:maxM,:]
Esempio n. 17
0
def find_center_by_convolution(IM, **kwargs):
    """ Center the image by convolution of two projections along each axis.
        Code from the ``linbasex`` juptyer notebook

    Parameters
    ----------
    IM: numpy 2D array
        image data

    Returns
    -------
    center: tuple
        (row-center, col-center)

    """
    # projection along axis=0 of image (rows)
    QL_raw0 = IM.sum(axis=1)
    # projection along axis=1 of image (cols)
    QL_raw1 = IM.sum(axis=0)

    # autocorrelate projections
    conv_0 = np.convolve(QL_raw0, QL_raw0, mode='full')
    conv_1 = np.convolve(QL_raw1, QL_raw1, mode='full')
    len_conv = len(conv_0)/2

    # Take the first max, should there be several equal maxima.
    # 10May16 - axes swapped - check this
    center = (np.argmax(conv_0)/2, np.argmax(conv_1)/2)

    if "projections" in kwargs.keys():
        return center, conv_0, conv_1
    else:
        return center
Esempio n. 18
0
def plot_data(data, nplots = 4):
    global window_size_
    window = np.ones(window_size_) / window_size_
    tvec, yvec = data[:,0], data[:,1]
    pylab.subplot(nplots, 1, 1)
    pylab.plot(tvec, yvec, label="raw data")
    pylab.legend()

    yvec = np.convolve(yvec, window, 'same')
    pylab.subplot(nplots, 1, 2)
    pylab.plot(tvec, yvec, label='Window size = %s' % window_size_)
    pylab.plot([0, tvec[-1]], [0.5*np.mean(yvec)]*2, label = '0.5*Mean pupil size')
    pylab.legend()

    pylab.subplot(nplots, 1, 4)
    # When area reduces to half of eye pupil, it should be considered.
    newY = 0.5*yvec.mean() - yvec
    newY = newY + np.fabs(newY)
    window = np.ones(3*window_size_)/(3*window_size_)

    yy = np.convolve(newY, window, 'same')
    pylab.plot(tvec, yy, label='Blinks')

    pylab.xlabel("Time (seconds)")
    outfile = 'output.png'
    print("[INFO] Writing to %s" % outfile)
    pylab.savefig(outfile)
def get_divergence_diversity_sliding(aft, block_length, VERBOSE=0):
    '''Get local divergence and diversity in a sliding window'''
    cons_ind = Patient.get_initial_consensus_noinsertions(aft, return_ind=True)
    ind_N = cons_ind == 5
    cons_ind[ind_N] = 0
    aft_nonanc = 1.0 - aft[:, cons_ind, np.arange(aft.shape[2])]
    aft_nonanc[:, ind_N] = 0

    aft_var = (aft * (1 - aft)).sum(axis=1)

    struct = np.ones(block_length)

    dg = np.ma.array(np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                                         axis=1, arr=aft_nonanc), hard_mask=True)
    ds = np.ma.array(np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                                         axis=1, arr=aft_var), hard_mask=True)

    # NOTE: normalization happens based on actual coverage
    norm = np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                               axis=1, arr=(-aft[:, 0].mask))

    dg.mask = norm < block_length
    dg /= norm

    ds.mask = norm < block_length
    ds /= norm

    x = np.arange(dg.shape[1]) + (block_length - 1) / 2.0

    return (x, dg, ds)
Esempio n. 20
0
def lsf2poly(lsf):
    """Convert line spectral frequencies to prediction filter coefficients

    returns a vector a containing the prediction filter coefficients from a vector lsf of line spectral frequencies.

    .. doctest::

        >>> from spectrum import lsf2poly
        >>> lsf = [0.7842 ,   1.5605  ,  1.8776 ,   1.8984,    2.3593]
        >>> a = lsf2poly(lsf)

    # array([  1.00000000e+00,   6.14837835e-01,   9.89884967e-01,
    # 9.31594056e-05,   3.13713832e-03,  -8.12002261e-03 ])

    .. seealso:: poly2lsf, rc2poly, ac2poly, rc2is
    """
    #   Reference: A.M. Kondoz, "Digital Speech: Coding for Low Bit Rate Communications
    #   Systems" John Wiley & Sons 1994 ,Chapter 4

    # Line spectral frequencies must be real.

    lsf = numpy.array(lsf)

    if max(lsf) > numpy.pi or min(lsf) < 0:
        raise ValueError('Line spectral frequencies must be between 0 and pi.')

    p = len(lsf) # model order

    # Form zeros using the LSFs and unit amplitudes
    z  = numpy.exp(1.j * lsf)

    # Separate the zeros to those belonging to P and Q
    rQ = z[0::2]
    rP = z[1::2]

    # Include the conjugates as well
    rQ = numpy.concatenate((rQ, rQ.conjugate()))
    rP = numpy.concatenate((rP, rP.conjugate()))

    # Form the polynomials P and Q, note that these should be real
    Q  = numpy.poly(rQ);
    P  = numpy.poly(rP);

    # Form the sum and difference filters by including known roots at z = 1 and
    # z = -1

    if p%2:
        # Odd order: z = +1 and z = -1 are roots of the difference filter, P1(z)
        P1 = numpy.convolve(P, [1, 0, -1])
        Q1 = Q
    else:
        # Even order: z = -1 is a root of the sum filter, Q1(z) and z = 1 is a
        # root of the difference filter, P1(z)
        P1 = numpy.convolve(P, [1, -1])
        Q1 = numpy.convolve(Q, [1,  1])

    # Prediction polynomial is formed by averaging P1 and Q1

    a = .5 * (P1+Q1)
    return a[0:-1:1] # do not return last element
Esempio n. 21
0
def watrous(z,scale,kernel=numpy.array([1.,4.,6.,4.,1.])/16.):
    z=numpy.float32(z)
    s=z.shape
    sk=kernel.shape[0]

    if len(s) == 1:
        n=s[0]
        w=numpy.zeros((s[0]*3,scale),dtype=numpy.float)
        temp=z[0:n]
        temp=temp[::-1]
        w[0:n,0]=temp
        w[n:s[0]+n,0]=z
        temp=z[s[0]-n:]
        temp=temp[::-1]
        w[s[0]+n:,0]=temp
        for i in range(0,scale-1):
           # print i
            k1=numpy.zeros((sk-1)*2.**i+1,dtype=numpy.float)
            i1=numpy.array((numpy.arange(sk))*2.**i,dtype=numpy.int)
            k1[i1]=kernel
         
            tsmooth=numpy.convolve(w[:,i],k1,mode='same')
            #tsmooth=scipy.signal.fftconvolve(w[:,i],k1,mode='same')    
            #print tsmooth[1000]
            w[:,i]=w[:,i]-tsmooth
            w[:,i+1]=tsmooth
        
        w=w[n:s[0]+n,:]
    
    elif len(s)==2:
        w=numpy.zeros((s[0],s[1],scale),dtype=numpy.float)
        w[:,:,0]=z

        for i in range(0,scale-1):
            k1=numpy.zeros((sk-1)*2.**i+1,dtype=numpy.float)
            i1=numpy.array((numpy.arange(sk))*2.**i,dtype=numpy.int)
            k1[i1]=kernel
            k2=numpy.dot(k1,k1)
            tsmooth=numpy.convolve(w[:,:,i],k2,mode='same')
            #tsmooth=scipy.signal.fftconvolve(w[:,:,i],k2,mode='same')            
            w[:,:,i]=w[:,:,i]-tsmooth            
            w[:,:,i+1]=tsmooth
    
    elif len(s)==3:
        w=numpy.zeros((s[0],s[1],s[2]),dtype=numpy.float)
        for l in range(0,s[2]):
            w[:,:,l,0]=z[:,:,l]
            for i in range(0,scale-1):
                k1=numpy.zeros((sk-1)*2.**i+1,dtype=numpy.float)
                i1=numpy.array((numpy.arange(sk))*2.**i,dtype=numpy.int)
                k1[i1]=kernel
                k2=numpy.dot(k1,k1)
                tsmooth=numpy.convolve(w[:,:,l,i],k2,mode='same')
                #tsmooth=scipy.signal.fftconvolve(w[:,:,l,i],k2,mode='same')                
                w[:,:,l,i]=w[:,:,l,i]-tsmooth
                w[:,:,l,i+1]=tsmooth
    else:
        print("Wrong dimensions!")
        return -1
    return w
Esempio n. 22
0
def cohere():
    """
    Compute the coherence of two signals
    mpl_examples/pylab_examples/cohere_demo.py
    """
    import numpy as np
    from smartplotlib import xyplot, subplot, alias


    # make a little extra space between the subplots

    dt = 0.01
    t = np.arange(0, 30, dt)
    nse1 = np.random.randn(len(t))                 # white noise 1
    nse2 = np.random.randn(len(t))                 # white noise 2
    r = np.exp(-t/0.05)

    cnse1 = np.convolve(nse1, r, mode='same')*dt   # colored noise 1
    cnse2 = np.convolve(nse2, r, mode='same')*dt   # colored noise 2

    # two signals with a coherent part and a random part
    s1 = 0.01*np.sin(2*np.pi*10*t) + cnse1
    s2 = 0.01*np.sin(2*np.pi*10*t) + cnse2

    ps1 = xyplot(t, s1, fmt='b-', y2= s2, fmt2="g-",
                 axes=211, xlabel="time", ylabel="s1 & s2")

    ps1.go("fclear", "axes", "plot")

    c = xyplot.cohere(s1, s2, 256, 1./dt,
                      ylabel="coherence", axes=212)

    c.go("axes", "plot", "draw", "show")
    return c
Esempio n. 23
0
def score_region(motifs, seq):
    # code the sequence
    coded_seq = {}
    for base in 'ACGT':
        coded_seq[base] = np.zeros(len(seq), dtype='float32')
    for i, base in enumerate(seq.upper()):
        coded_seq[base][i] = 1
    coded_RC_seq = {}
    for base in 'TGCA':
        coded_seq[base] = np.zeros(len(seq), dtype='float32')


    motif_scores = []
    for motif in motifs:
        score_mat = motif.motif_data
        scores = np.zeros(len(seq)-len(score_mat)+1, dtype='float32')
        for base, base_scores in zip('ACGT', score_mat.T):
            scores += np.convolve(coded_seq[base], base_scores, mode='valid')

        for i, base in enumerate(seq.upper()):
            coded_seq[base][len(seq)-i-1] = 1
        RC_scores = np.zeros(len(seq)-len(score_mat)+1, dtype='float32')
        for base, base_scores in zip('TCGA', score_mat.T):
            scores += np.convolve(coded_seq[base], base_scores, mode='valid')

        max_scores = np.vstack((scores, RC_scores)).max(0)
        motif_scores.append( max_scores.mean() )

    return np.array(motif_scores)
Esempio n. 24
0
def csd():
    import numpy as np
    from smartplotlib import xyplot, alias
    # make some data
    dt = 0.01
    t = np.arange(0, 30, dt)
    nse1 = np.random.randn(len(t))                 # white noise 1
    nse2 = np.random.randn(len(t))                 # white noise 2
    r = np.exp(-t/0.05)

    cnse1 = np.convolve(nse1, r, mode='same')*dt   # colored noise 1
    cnse2 = np.convolve(nse2, r, mode='same')*dt   # colored noise 2

    # two signals with a coherent part and a random part
    s1 = 0.01*np.sin(2*np.pi*10*t) + cnse1
    s2 = 0.01*np.sin(2*np.pi*10*t) + cnse2

    xy = xyplot.derive(xlabel="time", ylabel="s1 & s2",
                       xlim=(0,5), axes=211)


    csd = xyplot.csd(s1, s2, 256, 1./dt, axes=212)

    xy.go("fclear", "axes")
    xy.plot(t, s1, 'b-', t, s2, 'g-')
    csd.go("plot", "axes", "show", "draw")
Esempio n. 25
0
def get_spline(data):
	""" Returns array of cubic spline interpolation polynoms (for every interval [x[i -1]; x[i]] ) """
	h = set_h(data)
	A = set_matrix_A(h)
	B = set_vector_B(data, h)
	m = find_vector_m(A, B)	
	spline_array = []

	for i in range(1,len(data)):
		xi_minus_x_cub = [(data[i][0] ** 3), -3 * (data[i][0] ** 2), 3*(data[i][0]),   -1]
		s1 = list(np.convolve( (m[i - 1]  / (6 * h[i-1])), xi_minus_x_cub))	
		x_minus_xi_1_cub = [-(data[i-1][0] ** 3), 3 * (data[i-1][0] ** 2), -3 * data[i-1][0],  1]
		s2 = list(np.convolve( (m[i] / (6 * h[i-1])), x_minus_xi_1_cub ))
		ai = data[i-1][1] - ((m[i-1]*h[i-1]**2)/6)
		s3 = list(np.convolve((ai/h[i-1]), [data[i][0], -1]))
		bi = data[i][1] - ((m[i]*h[i-1]**2)/6)
		s4 = list(np.convolve((bi/h[i-1]), [-data[i-1][0], 1]))

		iter_length = max(len(s1), len(s2), len(s3), len(s4))
		for k in range(iter_length - len(s1)): s1.append(0)
		for k in range(iter_length - len(s2)): s2.append(0)
		for k in range(iter_length - len(s3)): s3.append(0)
		for k in range(iter_length - len(s4)): s4.append(0)

		spline = [0 for t in range(iter_length)]
		for j in range(iter_length):
			spline[j] = s1[j] + s2[j] + s3[j] + s4[j] 
		spline_array.append(spline)
	return spline_array
def running_average_masked(obs, ws, min_valid_fraction=0.95):
    '''
    calculates a running average via convolution, fixing the edges
    obs     --  observations (a masked array)
    ws      --  window size (number of points to average)
    '''
    #tmp_vals = np.convolve(np.ones(ws, dtype=float), obs*(1-obs.mask), mode='same')
    tmp_vals = np.convolve(np.ones(ws, dtype=float), obs.filled(0), mode='same')

     # if the array is not masked, edges needs to be explictly fixed due to smaller counts
    if len(obs.mask.shape) == 0:
        tmp_valid = ws*np.ones_like(tmp_vals)
        # fix the edges. using mode='same' assumes zeros outside the range
        if ws%2==0:
            tmp_vals[:ws//2]*=float(ws)/np.arange(ws//2,ws)
            if ws//2>1:
                tmp_vals[-ws//2+1:]*=float(ws)/np.arange(ws-1,ws//2,-1.0)
        else:
            tmp_vals[:ws//2]*=float(ws)/np.arange(ws//2+1,ws)
            tmp_vals[-ws//2:]*=float(ws)/np.arange(ws,ws//2,-1.0)

    # if the array is masked, then we get the normalizer from counting the unmasked values
    else:
        tmp_valid = np.convolve(np.ones(ws, dtype=float), (1-obs.mask), mode='same')

    run_avg = np.ma.array(tmp_vals / tmp_valid)
    run_avg.mask = tmp_valid < ws * min_valid_fraction

    return run_avg
Esempio n. 27
0
def perbaseToWindowAverageMasked( 
                            V, 
                            arMasked,
                            cooStart,
                            cooStop,
                            wndWidth,
                            kernel='sum',  # can be sum, or mean
                           ):
    assert arMasked.shape==V.shape, (arMasked.shape,V.shape)
    
    
    krn = np.ones( (wndWidth,), 'int32' )
        
    Vconv = np.convolve( V*((1-arMasked).astype('uint64')), krn, 'full' )
    Vconv = Vconv[ (wndWidth-1):Vconv.shape[0]:wndWidth ]
        
    assert Vconv.shape[0]==(V.shape[0]/wndWidth + min(V.shape[0]%wndWidth,1))
        
    nBpMasked = np.convolve( arMasked, np.ones(wndWidth,'int32'), 'full' )
    nBpMasked = nBpMasked[ (wndWidth-1):nBpMasked.shape[0]:wndWidth ]
    
    assert nBpMasked.shape[0]==Vconv.shape[0]
    
    outStarts = cooStart+np.arange(Vconv.shape[0])*wndWidth
    
    nBpUnmasked = wndWidth - nBpMasked

    if kernel=='mean':
        nBpUnmasked[-1] = cooStop - outStarts[-1] + 1
        
        Vconv = (ma.masked_array( Vconv, nBpUnmasked == 0 ) / nBpUnmasked.astype('float64')).filled(0.)
        
    return outStarts,Vconv,nBpUnmasked
Esempio n. 28
0
def make_synth(rc,wavelet):
    '''
    Convolves reflectivities with wavelet.

    INPUT
    rc: 2D numpy array containing reflectivities
    wavelet

    OUTPUT
    synth: 2D numpy array containing seismic data

    Works with 1D arrays now (2015-05-07).
    '''
    nt=np.size(wavelet)
    if rc.ndim>1:
        [n_samples, n_traces] = rc.shape
        synth = np.zeros((n_samples+nt-1, n_traces))
        for i in range(n_traces):
            synth[:,i] = np.convolve(rc[:,i], wavelet)
        synth = synth[np.ceil(len(wavelet))/2:-np.ceil(len(wavelet))/2, :]
        synth=np.concatenate((synth,np.zeros((1,n_traces))))
    else:
        n_samples = rc.size
        synth = np.zeros(n_samples+nt-1)
        synth = np.convolve(rc, wavelet)
        synth = synth[np.ceil(len(wavelet))/2:-np.ceil(len(wavelet))/2]
        synth=np.concatenate((synth,[0]))
    return synth
Esempio n. 29
0
    def __init__(self, 
                 simulator,
                 n = 60 * 60 * 2,  # 1 hour
                 stdev_coef = 1.5,
                 days = 5,
                 trading_hours_per_day = 3.75,
                 tick_size = 1,
                 smooth_n = 3
                 ):
        self.__dict__.update(locals())
        del self.self
        
        self.price = self.simulator.history['last_price'].astype('int')
        self.vol = self.simulator.history['vol']
        self.m = int(days * trading_hours_per_day * 60 * 60 * 2)
        self.w = np.ones(self.n + 1) / float(self.n + 1)
        self.y = self.price - np.convolve(self.price, self.w, mode='same')
        denorm = np.sqrt(np.convolve(self.y**2, self.w, mode='same'))
        denorm_mean = denorm.mean()
        denorm2 = (denorm > denorm_mean) * denorm + (denorm <= denorm_mean) * denorm_mean
#        self.ny = self.y
#        self.ny = (denorm > 0) * self.y / denorm
        self.ny = (denorm2 > 0) * self.y / denorm2
        self.ny_mean_EMA = EMA(self.m * 1. / self.n)
        self.ny_stdev_EMA = EMA(self.m * 1. / self.n)
        
        self.start_tick = self.n * 4
        self.saliency = np.zeros(self.price.max() + self.simulator.stopprofit + 10)
        self.saliency2 = np.zeros(self.price.max() + self.simulator.stopprofit + 10)
        self.saliency_EMA = EMA(self.m)
        self.saliency2_EMA = EMA(self.m)
        self.mean_saliency_EMA = EMA(self.m * 1. / self.n)
        
        assert self.smooth_n % 2 == 1
        self.smooth_w = np.ones((self.smooth_n - 1) * self.tick_size + 1) / self.smooth_n 
Esempio n. 30
0
def lowpass( x, dt, cutoff, window='hann', repeat=0 ):
    """
    Lowpass filter

    Parameters
    ----------
        x : samples
        dt : sampling interval
        cutoff : cutoff frequency
        window : can be either 'hann' for zero-phase Hann window filter
                 or an integer n for an n-pole Butterworth filter.
    """
    if not cutoff:
        return x
    if window == 'hann':
        n = 2 * int( 0.5 / (cutoff * dt) ) + 1
        if n > 0:
            w = 0.5 - 0.5 * np.cos(
                2.0 * np.pi * np.arange( n ) / (n - 1) )
            w /= w.sum()
            x = np.convolve( x, w, 'same' )
            if repeat:
                x = np.convolve( x, w, 'same' )
    else:
        import scipy.signal
        wn = cutoff * 2.0 * dt
        b, a = scipy.signal.butter( window, wn )
        x = scipy.signal.lfilter( b, a, x )
        if repeat < 0:
            x = scipy.signal.lfilter( b, a, x[...,::-1] )[...,::-1]
        elif repeat:
            x = scipy.signal.lfilter( b, a, x )
    return x
def convolve_spectra_with_COS_LSF(input_x,
                                  input_flux,
                                  rest_wavelength,
                                  redshift,
                                  vel_kms=True,
                                  chan=None,
                                  directory_with_COS_LSF='./'):

    if directory_with_COS_LSF[-1] != '/':
        directory_with_COS_LSF += '/'

    if vel_kms:
        input_vel = input_x
    else:
        input_vel = ang_to_vel(input_x, rest_wavelength, redshift)

    input_delta_x = np.abs(input_x[0] - input_x[1])
    input_total_x_range = np.max(input_x) - np.min(input_x)
    observed_lambda = rest_wavelength * (1. + redshift)

    if chan == None:
        if observed_lambda > 1450.:
            chan = 'G160M'
            angstroms_per_pixel = 0.012
        else:
            chan = 'G130M'
            angstroms_per_pixel = 0.0096

    elif ((chan != 'G160M') & (chan != 'G130M')):
        raise ValueError(
            'Improper channel (chan) passed. Acceptable inputs are None, G130M, and G160M, the latter two must be strings!'
        )

    lsf = np.loadtxt('%sCOS_%s_LSF.dat' % (directory_with_COS_LSF, chan),
                     delimiter=',',
                     comments='#')
    if chan == 'G130M':
        lsf_wavelengths = np.array(
            [1150., 1200., 1250., 1300., 1350., 1400., 1450.])
    else:
        lsf_wavelengths = np.array(
            [1450., 1500., 1550., 1600., 1650., 1700., 1750.])
    lsf_index = find_nearest_index(
        lsf_wavelengths,
        observed_lambda) + 1  # +1 because first column is pixel number
    lsf_to_use = lsf[:, lsf_index]

    if vel_kms:
        lsf_delta_x = (np.abs(lsf[0, 0] - lsf[1, 0]) * angstroms_per_pixel *
                       c_kms) / (observed_lambda)
    else:
        lsf_delta_x = (np.abs(lsf[0, 0] - lsf[1, 0]) * angstroms_per_pixel)

    number_of_input_points = int(input_total_x_range / lsf_delta_x)
    resampled_input_flux, resampled_input_vel = signal.resample(
        1. - input_flux, number_of_input_points, input_vel)

    convolved_line = np.convolve(resampled_input_flux, lsf_to_use, mode='same')
    resampled_input_flux = 1. - resampled_input_flux
    convolved_line = 1. - convolved_line
    if vel_kms:
        resampled_input_wavelength = vel_to_ang(resampled_input_vel,
                                                rest_wavelength, redshift)
    else:
        resampled_input_wavelength = signal.resample(input_x,
                                                     number_of_input_points)

    return resampled_input_vel, resampled_input_wavelength, convolved_line
Esempio n. 32
0
Department of Mechanical Engineering
"""
import numpy as np
import matplotlib.pyplot as plt

dt = 0.01
t = np.arange(0, 30, dt)
np.random.seed(1)

nse1 = np.random.randn(len(t))  # white noise 1
nse2 = np.random.randn(len(t))  # white noise 2

r = np.exp(-t / 0.05)

cnse1 = np.convolve(nse1, r, mode="same") * dt
cnse2 = np.convolve(nse2, r, mode="same") * dt

s1 = 0.01 * np.sin(2 * np.pi * 10 * t) + cnse1
s2 = 0.01 * np.sin(2 * np.pi * 10 * t) + cnse2

fig, ax = plt.subplots(2, 1)

ax[0].plot(t, s1, t, s2)
ax[0].set_xlim(0, 5)
ax[0].set_xlabel('time')
ax[0].set_ylabel('s1 and s2')
ax[0].grid(True)

cxy, f = ax[1].csd(s1, s2, 256, 1. / dt)
ax[1].set_ylabel('CSD (db)')
Esempio n. 33
0
#------------------------------------------------------------
# Generate random x, y with a given covariance length
np.random.seed(1)
x = np.linspace(0, 1, 500)
h = 0.01
C = np.exp(-0.5 * (x - x[:, None]) ** 2 / h ** 2)
y = 0.8 + 0.3 * np.random.multivariate_normal(np.zeros(len(x)), C)

#------------------------------------------------------------
# Define a normalized top-hat window function
w = np.zeros_like(x)
w[(x > 0.12) & (x < 0.28)] = 1

#------------------------------------------------------------
# Perform the convolution
y_norm = np.convolve(np.ones_like(y), w, mode='full')
valid_indices = (y_norm != 0)
y_norm = y_norm[valid_indices]

y_w = np.convolve(y, w, mode='full')[valid_indices] / y_norm

# trick: convolve with x-coordinate to find the center of the window at
#        each point.
x_w = np.convolve(x, w, mode='full')[valid_indices] / y_norm

#------------------------------------------------------------
# Compute the Fourier transforms of the signal and window
y_fft = np.fft.fft(y)
w_fft = np.fft.fft(w)

yw_fft = y_fft * w_fft
Esempio n. 34
0
def smooth(x, window_len=11, window='hanning', periodic=False):
    """smooth the data using a window with requested size.
    
    if periodic = True
    This method is based on the convolution of a scaled window with the signal.
    The signal is prepared by introducing reflected copies of the signal 
    (with the window size) in both ends so that transient parts are minimized
    in the begining and end part of the output signal.

    if periodic = False
    do not modify the incoming signal
    
    input:
        x: the input signal 
        window_len: the dimension of the smoothing window; should be an odd integer
        window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
            flat window will produce a moving average smoothing.

    output:
        the smoothed signal
        
    example:

    t=linspace(-2,2,0.1)
    x=sin(t)+randn(len(t))*0.1
    y=smooth(x)
    
    see also: 
    
    numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
    scipy.signal.lfilter
 
    TODO: the window parameter could be the window itself if an array instead of a string   
    from: http://www.scipy.org/Cookbook/SignalSmooth
    modified by vcvicek
    """

    if x.ndim != 1:
        raise ValueError, "smooth only accepts 1 dimension arrays."

    if x.size < window_len:
        raise ValueError, "Input vector needs to be bigger than window size."

    if window_len < 3:
        return x

    if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
        raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"

    if periodic:
        s = numpy.r_[2 * x[0] - x[window_len - 1::-1], x,
                     2 * x[-1] - x[-1:-window_len:-1]]
    else:
        s = x

    #print(len(s))
    if window == 'flat':  #moving average
        w = numpy.ones(window_len, 'd')
    else:
        w = eval('numpy.' + window + '(window_len)')

    if periodic:
        y = numpy.convolve(w / w.sum(), s, mode='same')
        return y[window_len:-window_len + 1]
    else:
        y = numpy.convolve(w / w.sum(), s, mode='valid')
        return y
Esempio n. 35
0
def CalucNeighborExp(feat, rate, exp, conv):
    target_movie_len = len(feat)
    neighbor = target_movie_len * rate

    self_distance = np.zeros((len(feat), len(feat)))
    # euclid distance
    for i in range(len(feat)):
        self_distance[i] = np.linalg.norm(feat - feat[i], axis=1)

    # battatyalia distance
    # feat_sum = feat.sum(axis=1)
    # feat_sum[feat_sum==0] = 1.0
    # for i in range(len(feat)):
    #         tmp1 = np.sqrt(feat*feat[i])
    #         tmp1 = tmp1.sum(axis=1)
    #         tmp2 = np.sqrt(feat_sum*feat_sum[i])
    #         self_distance[i] = np.sqrt(1-tmp1.astype(np.float32)/tmp2)
    # self_distance[self_distance!=self_distance] = 0.0

    threshold = otsu(self_distance.flatten())

    alpha = exp
    beta = alpha / float(target_movie_len)
    f = lambda x: (math.e)**(beta * x)
    neighbor_num = np.zeros(target_movie_len)
    idx = [True for i in range(target_movie_len)]
    for i in range(target_movie_len):
        s = i - int(neighbor / 2.) if i - int(neighbor / 2.) >= 0 else 0
        e = i + int(neighbor / 2.) if i + int(
            neighbor / 2.) < target_movie_len else target_movie_len - 1

        l = np.arange(len(feat))
        # first_len = len(l[:s])
        # latter_len = len(l[e+1:])
        inter_len = len(l[s:e + 1])
        # first = np.arange(first_len).astype(np.float32)
        # if first_len != 0 and first_len != 1: first /= first.max()
        # latter = np.arange(latter_len).astype(np.float32)
        # if latter_len != 0 and latter_len != 1: latter /= latter.max()

        first = np.array(l[:s])
        latter = np.array(l[e + 1:])
        first = np.array(list(map(f, first - s + 1)))
        latter = np.array(list(map(f, -(latter - e - 1))))
        inter = np.array([np.nan for _ in range(inter_len)])
        ll = np.concatenate((first, inter, latter))
        ll[ll != ll] = 0

        # r = np.array(list(map(f,ll)))
        # r[r!=r] = 0

        tmp = np.copy(idx)
        tmp[s:e + 1] = False
        # neighbor_num[i] = sum(self_distance[i][tmp]<threshold)
        neighbor_num[i] = sum(ll[self_distance[i] < threshold])

    num = conv
    kernel = np.ones(num) / num
    neighbor_num = np.convolve(neighbor_num, kernel, mode='same')

    # neighbor_num = neighbor_num/float(target_movie_len)

    return neighbor_num
    def load_data(ticker='AAPL',
                  momentum_window=30,
                  newsTimeToMarket=0,
                  X_window_average=40,
                  set_verbosity=True):
        X_path = '../tensorflow_model/for_server/SentimentSingleNewsFullNoNorm/' + str(
            ticker) + '.csv'
        Y_path = '../tensorflow_model/for_server/DataSetIndexes/indexes' + str(
            ticker) + '.csv'

        x = pd.read_csv(X_path)
        x.drop('Unnamed: 0', axis=1, inplace=True)
        x = x.rename(index=str, columns={"initTime": "PUBLICATION_DATE"})
        x = x.sort_values(by=['PUBLICATION_DATE'])
        x = x.reset_index(drop=True)
        y = pd.read_csv(Y_path)
        for i, row in x.iterrows():
            x.at[i, 'PUBLICATION_DATE'] = datetime.strptime(
                x['PUBLICATION_DATE'][i],
                '%Y-%m-%d %H:%M:%S') + timedelta(hours=newsTimeToMarket)

        momentum_window = 30
        y = y.rename(index=str, columns={"Unnamed: 0": "DATE"})

        for i, row in y.iterrows():
            y['DATE'].at[i] = datetime.strptime(y['DATE'][i],
                                                '%Y-%m-%d %H:%M:%S')
        z = list()
        for i in range(0, y.shape[0] - momentum_window):
            z.append((y['close'][i] - y['close'][i - momentum_window]) /
                     y['close'][i])

        y = y.reset_index(drop=True)
        y.drop(np.arange(y.shape[0] - momentum_window, y.shape[0]),
               inplace=True)
        y = y.reset_index(drop=True)
        y['labels'] = [sign(entry) for entry in z]
        min_max_scaler = preprocessing.MinMaxScaler()

        initDate = max(y['DATE'][0], x['PUBLICATION_DATE'][0])
        finalDate = min(y['DATE'][len(y) - 1],
                        x['PUBLICATION_DATE'][len(x) - 1])
        i = 0
        j = 0

        close = []
        labels = []
        pos = []
        neg = []

        dates = []
        # ALLINEAMENTO INIZIO
        while (y['DATE'][j] < initDate):
            j += 1
        while (x['PUBLICATION_DATE'][i] < initDate):
            i += 1

        while (x['PUBLICATION_DATE'][i] < finalDate
               and y['DATE'][j] < finalDate):
            timeSlotPos = list()
            timeSlotNeg = list()
            while (i < len(x) - 1 and y['DATE'][j] > x['PUBLICATION_DATE'][i]):
                timeSlotPos.append(x['POSITIVE'][i])
                timeSlotNeg.append(x['NEGATIVE'][i])
                i += 1
            if (len(timeSlotPos) == 0):
                timeSlotPos.append(0)
                timeSlotNeg.append(0)
            pos.append(np.mean(np.asarray(timeSlotPos), axis=0))
            neg.append(np.mean(np.asarray(timeSlotNeg), axis=0))

            close.append(y['close'][j])
            labels.append(y['labels'][j])
            dates.append(
                str(y['DATE'][j].year) + '/' + str(y['DATE'][j].month))

            j += 1

        pos = np.convolve(np.asarray(pos),
                          np.repeat(1.0, X_window_average) / X_window_average,
                          'same')
        neg = np.convolve(np.asarray(neg),
                          np.repeat(1.0, X_window_average) / X_window_average,
                          'same')

        Data.pos = pos
        Data.neg = neg
        Data.Y = labels
Esempio n. 37
0
matplotlib.rc('xtick', labelsize=35) 
matplotlib.rc('ytick', labelsize=35) 

import matplotlib as mpl
mpl.rcParams['text.usetex']=True
mpl.rcParams['text.latex.unicode']=True

# %%
# %% Stimuli
T = 5000
D = 3
smooth = 100
noise = np.random.randn(T,D)
X = noise.copy()
for ii in range(D):
    X[:,ii] = np.convolve(noise[:,ii],np.ones(smooth),'same')/smooth *1

# %% Network settings
N = 20
Phi = np.random.randn(D,N)
J = np.random.randn(N,N)/N**0.5
J = 0.5*(J + J.T)  #make symmetric
J = J-np.diag(J)
#vv = np.random.randn(N)
#J = np.outer(vv,vv)*0.1 + np.random.randn(N,N)*0.05
#J = J_nMF.copy()

def Current(h,J,s):
    theta = h + J @ s
    return theta
Esempio n. 38
0
 # Comparison with impedances: FREQUENCY DOMAIN (TWCs from impedance sources, params: R_S, frequency_R, a_factor)
 TWC200_4 = TravelingWaveCavity(0.876e6, 200.222e6, 3.899e-6)
 TWC200_5 = TravelingWaveCavity(1.38e6, 200.222e6, 4.897e-6)
 # indVoltageTWC = InducedVoltageTime(beam, profile, [TWC200_4, TWC200_4, TWC200_5, TWC200_5])
 indVoltageTWC = InducedVoltageTime(beam, profile, [TWC200_4])
 indVoltage = TotalInducedVoltage(beam, profile, [indVoltageTWC])
 indVoltage.induced_voltage_sum()
 plt.plot(indVoltage.time_array, indVoltage.induced_voltage*1e-6, color='limegreen', label='Time domain w FFT (imp)')
 
 # Comparison with impedances: TIME DOMAIN
 TWC200_4.wake_calc(profile.bin_centers - profile.bin_centers[0])
 TWC200_5.wake_calc(profile.bin_centers - profile.bin_centers[0])
 # wake1 = 2*(TWC200_4.wake + TWC200_5.wake)
 wake1 = TWC200_4.wake
 Vind = -profile.Beam.ratio*profile.Beam.Particle.charge*e*\
     np.convolve(wake1, profile.n_macroparticles, mode='full')[:140]
 plt.plot(convtime[:140], Vind*1e-6, ':k', label='Time domain w conv (imp)')
 
 # Wake from impulse response
 OTFB_4.TWC.impulse_response_gen(omega_c, profile.bin_centers)
 OTFB_5.TWC.impulse_response_gen(omega_c, profile.bin_centers)
 OTFB_4.TWC.compute_wakes(profile.bin_centers)
 OTFB_5.TWC.compute_wakes(profile.bin_centers)
 # wake2 = 2*(OTFB_4.TWC.W_beam + OTFB_5.TWC.W_beam)
 wake2 = OTFB_4.TWC.W_beam
 Vind = -profile.Beam.ratio*profile.Beam.Particle.charge*e*\
     np.convolve(wake2, profile.n_macroparticles, mode='full')[:140]
 plt.plot(convtime[:140], Vind*1e-6, '--', color='turquoise', label='From wake, OTFB')
 plt.xlabel("Time [s]")
 plt.ylabel("Induced voltage [MV]")
 plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
Esempio n. 39
0
def plot_boxcar():
    """Makes a plot showing the effect of convolution with a boxcar window.
    """
    # start with a square signal
    signal = thinkdsp.SquareSignal(freq=440)
    wave = signal.make_wave(duration=1, framerate=44100)

    # and a boxcar window
    window = np.ones(11)
    window /= sum(window)

    # select a short segment of the wave
    segment = wave.segment(duration=0.01)

    # and pad with window out to the length of the array
    N = len(segment)
    padded = thinkdsp.zero_pad(window, N)

    # compute the first element of the smoothed signal
    prod = padded * segment.ys
    print(sum(prod))

    # compute the rest of the smoothed signal
    smoothed = np.zeros(N)
    rolled = padded
    for i in range(N):
        smoothed[i] = sum(rolled * segment.ys)
        rolled = np.roll(rolled, 1)

    # plot the results
    segment.plot(color=GRAY)
    smooth = thinkdsp.Wave(smoothed, framerate=wave.framerate)
    smooth.plot()
    thinkplot.config(xlabel='Time(s)', ylim=[-1.05, 1.05])
    thinkplot.save(root='convolution2')

    # compute the same thing using np.convolve
    segment.plot(color=GRAY)
    ys = np.convolve(segment.ys, window, mode='valid')
    smooth2 = thinkdsp.Wave(ys, framerate=wave.framerate)
    smooth2.plot()
    thinkplot.config(xlabel='Time(s)', ylim=[-1.05, 1.05])
    thinkplot.save(root='convolution3')

    # plot the spectrum before and after smoothing
    spectrum = wave.make_spectrum()
    spectrum.plot(color=GRAY)

    ys = np.convolve(wave.ys, window, mode='same')
    smooth = thinkdsp.Wave(ys, framerate=wave.framerate)
    spectrum2 = smooth.make_spectrum()
    spectrum2.plot()
    thinkplot.config(xlabel='Frequency (Hz)',
                     ylabel='Amplitude',
                     xlim=[0, 22050])
    thinkplot.save(root='convolution4')

    # plot the ratio of the original and smoothed spectrum
    amps = spectrum.amps
    amps2 = spectrum2.amps
    ratio = amps2 / amps    
    ratio[amps<560] = 0
    thinkplot.plot(ratio)

    thinkplot.config(xlabel='Frequency (Hz)',
                     ylabel='Amplitude ratio',
                     xlim=[0, 22050])
    thinkplot.save(root='convolution5')


    # plot the same ratio along with the FFT of the window
    padded = thinkdsp.zero_pad(window, len(wave))
    dft_window = np.fft.rfft(padded)

    thinkplot.plot(abs(dft_window), color=GRAY, label='boxcar filter')
    thinkplot.plot(ratio, label='amplitude ratio')

    thinkplot.config(xlabel='Frequency (Hz)',
                     ylabel='Amplitude ratio',
                     xlim=[0, 22050])
    thinkplot.save(root='convolution6')
Esempio n. 40
0
reveal_type(np.count_nonzero(
    A, axis=0))  # E: Union[numpy.signedinteger[Any], numpy.ndarray]

reveal_type(np.isfortran(i8))  # E: bool
reveal_type(np.isfortran(A))  # E: bool

reveal_type(np.argwhere(i8))  # E: numpy.ndarray
reveal_type(np.argwhere(A))  # E: numpy.ndarray

reveal_type(np.flatnonzero(i8))  # E: numpy.ndarray
reveal_type(np.flatnonzero(A))  # E: numpy.ndarray

reveal_type(np.correlate(B, A, mode="valid"))  # E: numpy.ndarray
reveal_type(np.correlate(A, A, mode="same"))  # E: numpy.ndarray

reveal_type(np.convolve(B, A, mode="valid"))  # E: numpy.ndarray
reveal_type(np.convolve(A, A, mode="same"))  # E: numpy.ndarray

reveal_type(np.outer(i8, A))  # E: numpy.ndarray
reveal_type(np.outer(B, A))  # E: numpy.ndarray
reveal_type(np.outer(A, A))  # E: numpy.ndarray
reveal_type(np.outer(A, A, out=C))  # E: SubClass

reveal_type(np.tensordot(B, A))  # E: numpy.ndarray
reveal_type(np.tensordot(A, A))  # E: numpy.ndarray
reveal_type(np.tensordot(A, A, axes=0))  # E: numpy.ndarray
reveal_type(np.tensordot(A, A, axes=(0, 1)))  # E: numpy.ndarray

reveal_type(np.isscalar(i8))  # E: bool
reveal_type(np.isscalar(A))  # E: bool
reveal_type(np.isscalar(B))  # E: bool
Esempio n. 41
0
def plot(paths,
         strategies,
         limits=(0, 1),
         convolve=False,
         mylabels=None,
         myloc='lower right'):
    font = {'family': 'normal', 'size': 21}
    matplotlib.rc('font', **font)
    current_palette = sns.color_palette()

    sns.set_style('whitegrid')
    figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
    plt.ylim(limits[0], limits[1])
    plt.xlim(0, 1000)
    linewidth = 3
    markersize = 7

    results = read_results(results_file=paths[0])
    for i in range(1, len(paths)):
        results.update(read_results(results_file=paths[i]))
    if len(strategies) == 0:
        strategies = list(results.keys())
    if mylabels == None:
        mylabels = strategies
    plot_policy_by_episode = False

    if plot_policy_by_episode:
        strategies = []
        labels = []
        keys = []
        for i in range(0, prop.NUM_EPISODES):
            strategies.append("policy_{}".format(str(i)))
            labels.append("policy_{}".format(str(i)))
            keys.append(i)
    else:
        labels = mylabels
        keys = range(len(strategies))

    with_stddev = True
    if convolve:
        with_stddev = False
    width = 10
    for i in keys:
        means = np.array(results[strategies[i]][0])
        # Convolve output (moving average)
        if convolve:
            means = np.convolve(means, np.ones(width), 'valid') / width
        std = np.array(results[strategies[i]][1])
        x = [x * ACQ_SIZE + INIT_SIZE for x in range(len(means))]
        if labels[i].endswith('ALIL'):
            color = 'cyan'
            plt.plot(x,
                     means,
                     marker='.',
                     label=labels[i],
                     linewidth=linewidth,
                     markersize=markersize,
                     color=color)
            if with_stddev:
                plt.fill_between(x, means + std, means - std, alpha=0.1)
        elif labels[i].endswith('andom'):
            color = 'black'
            plt.plot(x,
                     means,
                     marker='.',
                     label=labels[i],
                     linestyle='--',
                     linewidth=5,
                     markersize=markersize,
                     color=color)
            if with_stddev:
                plt.fill_between(x,
                                 means + std,
                                 means - std,
                                 alpha=0.1,
                                 color=color)
        else:
            plt.plot(x,
                     means,
                     marker='.',
                     label=labels[i],
                     linewidth=linewidth,
                     markersize=markersize)
            if with_stddev:
                plt.fill_between(x, means + std, means - std, alpha=0.1)
    plt.legend(loc='lower right', ncol=3, prop={'size': 20})
    plt.ylabel("Test data accuracy score")
    plt.xlabel("Labeling effort")
    #plt.savefig(path + '.pdf', format="pdf", dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, transparent=False, bbox_inches='tight', pad_inches=0.0, frameon=None, metadata=None)
    plt.show()
Esempio n. 42
0
def smooth(y, box_pts):
    box = np.ones(box_pts) / box_pts
    y_smooth = np.convolve(y, box, mode='same')
    return [i for i in y_smooth]
Esempio n. 43
0
def window_rms(a):
    window_length = len(a)
    square = np.power(a, 2)
    window = np.ones(window_length) / float(window_length)
    return np.sqrt(np.convolve(square, window, 'valid'))
Esempio n. 44
0
def smooth_curve(x):
    window_len = 11
    s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
    w = np.kaiser(window_len, 2)
    y = np.convolve(w / w.sum(), s, mode='valid')
    return y[5:len(y) - 5]
Esempio n. 45
0
def movingaverage(values, window):
    weigths = np.repeat(1.0, window) / window
    smas = np.convolve(values, weigths, 'valid')
    return smas  # as a numpy array
Esempio n. 46
0
 def _ema(self, data, window):
     weights = np.exp(np.linspace(-1., 1., window))
     weights /= weights.sum()
     ema = np.convolve(data[:, [0]][:, 0],
                       np.flip(weights, 0))
     return ema[len(data)-1:-len(data)+1]
Esempio n. 47
0
def get_total(incidence=True, smooth=True):
    def countData(dicti, date, value, count):
        date = str(date)
        value = str(value)

        def add2dict_dict(dicti, value):
            if value not in dicti:
                dicti[value] = dict()
            # return dicti

        def add2dict(dicti, value, count):
            if value not in dicti:
                dicti[value] = count
            else:
                dicti[value] += count
            # return dicti

        add2dict_dict(dicti, date)
        add2dict(dicti[date], value, count)
        # return dicti

    def get_cases(dicti, date):
        plus = minus = null = 0
        try:
            minus = -dicti[date]['-1']
        except:
            None
        try:
            plus = dicti[date]['1']
        except:
            None
        try:
            null = dicti[date]['0']
        except:
            None
        return null, plus, minus

    def download_raw_data():
        time_case_dict = dict()
        time_death_dict = dict()
        notFinished = True
        offset = 0  # 0

        while notFinished:
            with urllib.request.urlopen(
                    "https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_COVID19/"
                    "FeatureServer/0/query?where=1%3D1&outFields=Meldedatum,Refdatum,AnzahlFall,AnzahlTodesfall,NeuerFall,NeuerTodesfall&"
                    "outSR=4326&" + "resultOffset={:}".format(offset) +
                    "&f=json") as url:
                data = json.loads(url.read().decode())
                if 'exceededTransferLimit' not in data:
                    notFinished = False

                offset = offset + len(data['features'])
                print(offset)
                # if offset > 10000:
                #    notFinished = False
                for it in data[
                        'features']:  # alternativ statt Meldedatum: Refdatum
                    countData(time_case_dict,
                              int(it['attributes']['Meldedatum'] / 1000),
                              it['attributes']['NeuerFall'],
                              it['attributes']['AnzahlFall'])
                    countData(time_death_dict,
                              int(it['attributes']['Meldedatum'] / 1000),
                              it['attributes']['NeuerTodesfall'],
                              it['attributes']['AnzahlTodesfall'])
        save_obj(time_case_dict, 'case')
        save_obj(time_death_dict, 'death')

    if not os.path.exists('obj'):
        os.mkdir('obj')
    if not os.path.isfile('obj/case.pkl'):
        download_raw_data()
    if time.time() - os.path.getmtime('obj/case.pkl') > 3600:
        download_raw_data()
    time_case_dict = load_obj('case')
    time_death_dict = load_obj('death')

    time_array = np.array(list(time_case_dict.keys()), dtype=int)
    time_array.sort()
    case_list = np.zeros((3, len(time_array)))
    death_list = np.zeros((3, len(time_array)))
    time_list = []
    for i, item in enumerate(time_array):
        case_list[0, i], case_list[1, i], case_list[-1, i] = get_cases(
            time_case_dict, str(item))
        time_list.append(datetime.datetime.fromtimestamp(item))
    time_list = to_datetime(time_list)
    if not smooth:
        return time_list, case_list[0] + case_list[1]  #+case_list[-1])
    if incidence:
        factor = 1 / count_age('Gesamt') * 100000
    else:
        factor = 1
    return time_list, np.convolve(case_list[0] + case_list[1],
                                  [1, 1, 1, 1, 1, 1, 1], 'full')[:-6] * factor
Esempio n. 48
0
def ExpMovingAverage(values, window):
    weights = np.exp(np.linspace(-1., 0., window))
    weights /= weights.sum()
    a = np.convolve(values, weights, mode='full')[:len(values)]
    a[:window] = a[window]
    return a
    K = len(longer)-len(shorter)+1
    convolution = np.zeros(K, longer.dtype)
    for i in range(K):
        convolution[i] = np.dot(longer[i:len(shorter)+i], shorter[::-1])
    return convolution
# def convolvetest(a, w, b = 0, stride = 1, pad = 0):
#     """
#     compute 1d convolutional (with bias)
#     """
#     w_old = a.shape[0]
#     f = w.shape[0]
#     a_pad = np.pad(a, pad_width=pad, mode = 'constant', constant_values = 0)
#     w_new = int((w_old - f + 2*pad)/stride) + 1
#     a_res = np.zeros((w_new))
#     for i in range(w_new):
#         start = i*stride
#         end = start + f
#         a_res[i] = np.sum(a_pad[start:end]*w) + b
#     return a_res

# a = np.random.normal(0.0, 1.0, 100)
# b = np.random.normal(0.0, 1.0, 11)
a = np.array([1,2,3])
b = np.array([3,4,5])
#Hàm có sẵn convolve của python tính linear convolution
# print(np.allclose(np.convolve(a, b, mode='same'),
#             convolvetest(np.pad(a, ((len(b)-1)/2, (len(b)-1)/2), mode='constant', constant_values=0.0), b)))
print(np.allclose(np.convolve(a, b, mode='same'), convolvetest(a, b)))
print(a)
print(b)
print(convolvetest(a,b))
Esempio n. 50
0
def process_yeast(dir_path,
                  excels_path,
                  template1,
                  template2,
                  debug=False,
                  extensions=['[jJ][pP][gG]', '[pP][nN][gG]']):
    global activityThreshold
    # if len(sys.argv) == 1:
    #     print("Usage test.py <path_to_images>")
    #     exit(-1)
    path = []
    output_path = os.path.join(dir_path, 'output')
    for ext in extensions:
        path.extend(glob.glob(os.path.join(
            dir_path,
            "*.{}".format(ext))))  # glob.glob(sys.argv[1] + "/*.JPG")
    path.sort()
    df = pd.read_excel(excels_path, engine='openpyxl')
    # clusterPaths=[]
    baits = {}
    for file in path:
        name = Path(file).stem.split('.')[0]
        properties = name.split('_')
        baitNo = properties[1].split('-')[-1]
        if baitNo not in baits:
            baits[baitNo] = []
        baits[baitNo].append(file)

    for bait in baits.keys():
        headers = '<thead><tr>' + '<th colspan="3">Bait Number {}</th>'.format(
            bait
        ) + '<th colspan="2">TF</th>' + ''.join(
            [
                '<th colspan="3">Day {}</th>'.format(name.split('_')[-1][0])
                for name in baits[bait]
            ]
        ) + '</tr>' + '<tr><th>Index</th><th>Activated</th><th>Coordinate</th><th>TF1</th><th>TF2</th>' + (
            header * len(baits[bait])) + '</tr></thead>'
        dataframes = []
        outputPaths = []
        fileNames = []
        for imagePath in baits[bait]:
            name = Path(imagePath).stem
            name = name.split('.')
            fileNames.append(name)
            outputPath = os.path.join(output_path,
                                      name[0])  # "./output/{}".format(name[0])
            Path(outputPath).mkdir(parents=True, exist_ok=True)
            outputPaths.append(outputPath)
            # clusterPath = os.path.join(output_path, "cluster/")  # "./output/cluster/"
            # clusterPaths.append(clusterPath)
            # rmtree(clusterPath, ignore_errors=True)
            # for i in range(10, 110, 10):
            #     Path(clusterPath + str(i)).mkdir(parents=True, exist_ok=True)
            # dataframe = pd.read_excel(os.path.join(output_path, name[
            #     0] + ".xlsx"), engine = 'openpyxl')  # pd.read_excel(name[0] + ".xlsx", engine='openpyxl')
            dataframe = df.copy(deep=True)
            # print(list(dataframe.columns))
            image = cv2.imread(imagePath)
            if image is None:
                return -1, "Can't access image at " + dir_path
            img = cv2.Canny(image, 20, 30)
            # cv2.imwrite(os.path.join(output_path, 'edge-detect.jpg'), img)#'edge-detect.jpg', img)
            # template1 = cv2.imread(os.path.join(output_path,'upperLeft.png'), 0)
            # template2 = cv2.imread(os.path.join(output_path,'bottomRight.png'), 0)

            res = cv2.matchTemplate(img, template1, cv2.TM_CCORR_NORMED)
            res1 = cv2.matchTemplate(img, template2, cv2.TM_CCORR_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            top_left = np.add(max_loc, [140, 40])
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res1)
            bottom_right = np.add(max_loc, [0, 140])
            img = image[top_left[1]:bottom_right[1],
                        top_left[0]:bottom_right[0]]
            # cv2.imwrite(os.path.join(output_path, 'first crop.png'), img)

            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            levelRange = getLevelRange(gray)
            gray = rescaleIntensity(gray, [0, levelRange[1]])
            gray = cv2.Sobel(gray, cv2.CV_8UC1, 1, 0, ksize=5)
            gray = cv2.medianBlur(gray, 5)
            gray = cv2.GaussianBlur(gray, (5, 5), 3)
            levelRange = getLevelRange(gray)
            gray = rescaleIntensity(gray, [sum(levelRange) / 2, levelRange[1]])
            gray = ((gray > 90) * 255).astype(np.uint8)
            # cv2.imwrite(os.path.join(output_path, 'gray.jpg'), gray)
            xProj, yProj = getHistogramProjection(gray)
            smooth = 5
            xProj = np.convolve(xProj,
                                [1 / smooth
                                 for i in range(smooth)])[:-(smooth - 1)]
            yProj = np.convolve(yProj,
                                [1 / smooth
                                 for i in range(smooth)])[:-(smooth - 1)]
            xProj = (xProj > 20) * xProj
            yProj = (yProj > 20) * yProj

            xNonZero = [i for i, elem in enumerate(xProj) if elem > 30]
            yNonZero = [i for i, elem in enumerate(yProj) if elem > 30]
            crop = img
            xpeaks, xGrad = getPeaks(xProj, 0, 0)

            ypeaks, yGrad = getPeaks(yProj, 0, 0)
            outputPercent = []
            outputIntensity = []
            outputArea = []
            hsvCrop = cv2.cvtColor(crop, cv2.COLOR_BGR2HSV)
            outputImage = []
            # reference, refCols = getIndex("3-B7", xpeaks, ypeaks)

            for c in df["Coordinate"]:
                if (len(c.split("-")[1]) > 1):
                    index, colonies = getIndex(c, xpeaks, ypeaks)
                    roi = (hsvCrop[index[1][0]:index[1][1],
                                   index[0][0]:index[0][1]])[:, :, 2]
                    hsvImage = np.zeros_like(roi, dtype=np.uint8)
                    for ix, colony in enumerate(colonies):
                        subRoi = roi[colony[1][0]:colony[1][1],
                                     colony[0][0]:colony[0][1]]
                        levelRange = getLevelRange(subRoi)
                        if True:
                            colonyProcessed = rescaleIntensity(
                                subRoi, [0, levelRange[1]])
                            colonyProcessed = cv2.GaussianBlur(
                                colonyProcessed, (3, 3), 2.5)
                            colonyProcessed = cv2.medianBlur(colonyProcessed,
                                                             ksize=9)
                            test = getLevelRange(colonyProcessed, 0.05)
                            # if test[1] - test[0] <= 127:
                            #     test[0] = 127
                            colonyProcessed = cv2.threshold(
                                colonyProcessed, test[0], 255,
                                cv2.THRESH_BINARY_INV)[1]
                            contours, hierarchy = cv2.findContours(
                                colonyProcessed, cv2.RETR_TREE,
                                cv2.CHAIN_APPROX_SIMPLE)
                            center = np.divide(colonyProcessed.shape, 2)
                            centerContour = None
                            minDistance = colonyProcessed.size
                            for contour in contours:
                                moments = cv2.moments(contour)
                                if moments["m00"] != 0:
                                    centroid = [
                                        int(moments["m10"] / moments["m00"]),
                                        int(moments["m01"] / moments["m00"])
                                    ]
                                    distance = math.sqrt(
                                        np.sum(np.square(centroid - center)))
                                    if distance < minDistance:
                                        centerContour = contour
                                        minDistance = distance
                            colonyProcessed = np.zeros_like(colonyProcessed,
                                                            dtype=np.uint8)

                            colonyProcessed = cv2.drawContours(
                                colonyProcessed, [centerContour], 0,
                                (255, 255, 255), cv2.FILLED)
                            hsvImage[
                                colony[1][0]:colony[1][1],
                                colony[0][0]:colony[0][1]] = colonyProcessed

                    inv = (hsvCrop[index[1][0]:index[1][1],
                                   index[0][0]:index[0][1]])[:, :, 0]
                    inv = rescaleIntensity(inv, [getLevelRange(inv)[0], 255])
                    invRange = getLevelRange(inv, 0)
                    if invRange[1] - invRange[0] < 3:
                        inv = np.zeros_like(inv, dtype=np.uint8)
                    roi = cv2.bitwise_and(inv, inv, mask=hsvImage)
                    if c == '1-B5':
                        print('')
                    roi[roi <= getLevelRange(
                        roi, skipZero=True, percentile=.10)[0]] = 0
                    areaImage = roi.copy()
                    areaImage[areaImage > 0] = 1
                    area = np.sum(areaImage)
                    intensity = roi.copy()
                    intensity = np.divide(intensity,
                                          np.subtract(256, intensity))
                    outputIntensity.append(np.sum(intensity))
                    outputArea.append(area)
                    cv2.imwrite(
                        os.path.join(outputPath, c +
                                     ".png"),  # outputPath + "/" + c + ".png",
                        crop[index[1][0]:index[1][1], index[0][0]:index[0][1]])
                    if debug:
                        cv2.imwrite(
                            os.path.join(outputPath, c) + "_final.png", roi)
                        cv2.imwrite(
                            os.path.join(outputPath, c) + "_preMask.png", inv)
                        cv2.imwrite(
                            os.path.join(outputPath, c) + "_mask.png",
                            hsvImage)
                    outputImage.append(os.path.join(name[0], c + ".png"))
                else:
                    outputArea.append(math.nan)
                    outputIntensity.append(math.nan)
                    outputPercent.append(0)
                    outputImage.append(None)
            outputIntensity = np.nan_to_num(outputIntensity,
                                            nan=np.nanmin(outputIntensity))
            outputArea = np.nan_to_num(outputArea, nan=-1)
            dataframe["Intensity"] = outputIntensity
            dataframe["Area"] = outputArea
            dataframe["Image"] = outputImage
            for i, p in enumerate(xpeaks):
                if i % 4 == 0:
                    cv2.line(crop, (p, 0), (p, crop.shape[1] - 1), (0, 0, 255),
                             thickness=3)
                else:
                    cv2.line(crop, (p, 0), (p, crop.shape[1] - 1), (0, 0, 0),
                             thickness=1)
            for i, p in enumerate(ypeaks):
                if i % 4 == 0:
                    cv2.line(crop, (0, p), (crop.shape[1] - 1, p), (0, 0, 255),
                             thickness=3)
                else:
                    cv2.line(crop, (0, p), (crop.shape[1] - 1, p), (0, 0, 0),
                             thickness=1)
            cv2.imwrite(os.path.join(output_path, name[0] + '_crop.png'), crop)
            dataframes.append(dataframe)
        intensities = []
        area = []
        plateMedianArea = []
        plateMedian = []
        for dataframe in dataframes:
            intensities.extend(list(dataframe['Intensity']))
            area.extend(list(dataframe['Area']))
            plateMedianArea.append(np.median(dataframe['Area']))
            plateMedian.append(np.median(dataframe['Intensity']))
        median = np.median(plateMedian)
        aMedian = np.median(plateMedianArea)
        for dataframe in dataframes:
            normalized = np.array(list(dataframe['Intensity'])) / median
            # normalized = stats.zscore(normalized)
            dataframe['Intensity'] = normalized.round(3)
            normalized = np.array(list(dataframe['Area'])) / aMedian
            # normalized = stats.zscore(normalized)
            dataframe['Area'] = normalized.round(3)
            # detected = genOutputIntensity > activityThreshold

        for dataframe, name in zip(dataframes, fileNames):
            excelColumns = list(dataframe.columns)
            excelColumns.remove('Image')
            dataframe[excelColumns].to_excel(
                os.path.join(output_path, name[0]) + ".xlsx", index=False)
        customDF = [
            dataframes[0][[
                'Coordinate', 'TF1', 'TF2', 'Intensity', 'Area', 'Image'
            ]]
        ]
        if len(dataframe) > 1:
            customDF.extend(dataframe[['Intensity', 'Area', 'Image']]
                            for dataframe in dataframes[1:])
        dataframe = pd.concat(customDF, axis=1)
        #Insert Additional rows
        dataframe = addRows(dataframe)
        #end
        dataframe.insert(0, 'Activated',
                         [False for i in range(len(dataframe.index))])

        # ################################################################
        # dataframe = main_add_cols(dataframe)  # COSMIN
        # ################################################################
        # activityThreshold = dataframe['EMP_EMP_Intensity'][0]
        # dataframe.insert(0, 'Activated', [False for i in range(len(dataframe.index))])
        opHtml = html.format(
            name[0], css,
            dataframe.to_html(header=False,
                              escape=False,
                              formatters=dict(
                                  Activated=checkBoxGenerator,
                                  Image=imageTageGenerator)).replace(
                                      "<tbody>", headers + '\n<tbody>'),
            javascript, downloadHTML)
        file = open(
            os.path.join(output_path,
                         '{}.html'.format(name[0][:name[0].rfind('_')])), "w")
        file.write(opHtml)
        file.close()

    return 0, 'ok'
Esempio n. 51
0
    def pan_tompkin(self, ecg, fs):
        ''' Initialize '''

        delay = 0
        skip = 0  # Becomes one when a T wave is detected
        m_selected_RR = 0
        mean_RR = 0
        ser_back = 0
        ''' Noise Cancelation (Filtering) (5-15 Hz) '''

        if fs == 200:
            ''' Remove the mean of Signal '''
            ecg = ecg - np.mean(ecg)
            ''' Low Pass Filter H(z) = (( 1 - z^(-6))^2) / (1-z^(-1))^2 '''
            ''' It has come to my attention the original filter does not achieve 12 Hz
                b = [1, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 1] 
                a = [1, -2, 1]
                ecg_l = filter(b, a, ecg)
                delay = 6
            '''
            Wn = 12 * 2 / fs
            N = 3
            a, b = signal.butter(N, Wn, btype='lowpass')
            ecg_l = signal.filtfilt(a, b, ecg)
            ecg_l = ecg_l / np.max(np.abs(ecg_l))
            ''' High Pass Filter H(z) = (-1 + 32z^(-16) + z^(-32)) / (1+z^(-1))'''
            ''' It has come to my attention the original filter does not achieve 5 Hz
                b = np.zeros((1,33))
                b(1) = -1
                b(17) = 32
                b(33) = 1
                a = [1, 1]
                ecg_h = filter(b, a, ecg_l)  -> Without delay
                delay = delay + 16'''

            Wn = 5 * 2 / fs
            N = 3  # Order of 3 less processing
            a, b = signal.butter(N, Wn, btype='highpass')  # Bandpass filtering
            ecg_h = signal.filtfilt(a,
                                    b,
                                    ecg_l,
                                    padlen=3 * (max(len(a), len(b)) - 1))
            ecg_h = ecg_h / np.max(np.abs(ecg_h))

        else:
            ''' Band Pass Filter for noise cancelation of other sampling frequencies (Filtering)'''
            f1 = 5  # cutoff low frequency to get rid of baseline wander
            f2 = 15  # cutoff frequency to discard high frequency noise
            Wn = [f1 * 2 / fs, f2 * 2 / fs]  # cutoff based on fs
            N = 3  # order of 3 less processing
            a, b = signal.butter(N=N, Wn=Wn,
                                 btype='bandpass')  # Bandpass filtering
            ecg_h = signal.filtfilt(a,
                                    b,
                                    ecg,
                                    padlen=3 * (max(len(a), len(b)) - 1))

            ecg_h = ecg_h / np.max(np.abs(ecg_h))
        ''' Derivative Filter '''
        ''' H(z) = (1/8T)(-z^(-2) - 2z^(-1) + 2z + z^(2)) '''

        vector = [1, 2, 0, -2, -1]
        if fs != 200:
            int_c = 160 / fs
            b = interp1d(range(1, 6),
                         [i * fs / 8
                          for i in vector])(np.arange(1, 5.1, int_c))
        else:
            b = [i * fs / 8 for i in vector]

        ecg_d = signal.filtfilt(b,
                                1,
                                ecg_h,
                                padlen=3 * (max(len(a), len(b)) - 1))

        ecg_d = ecg_d / np.max(ecg_d)
        ''' Squaring nonlinearly enhance the dominant peaks '''

        ecg_s = ecg_d**2
        ''' Moving Average '''
        ''' Y(nT) = (1/N)[x(nT-(N-1)T) + x(nT - (N-2) T) + ... + x(nT)] '''

        temp_vector = np.ones((1, round(0.150 * fs))) / round(0.150 * fs)
        temp_vector = temp_vector.flatten()
        ecg_m = np.convolve(ecg_s, temp_vector)

        delay = delay + round(0.150 * fs) / 2
        ''' Fiducial Marks '''
        ''' Note : a minimum distance of 40 samples is considered between each R wave since in physiological
            point of view, no RR wave can occur in less than 200ms distance'''

        pks = []
        locs = peakutils.indexes(y=ecg_m, thres=0, min_dist=round(0.2 * fs))
        for val in locs:
            pks.append(ecg_m[val])
        ''' Initialize Some Other Parameters '''
        LLp = len(pks)
        ''' Stores QRS with respect to Signal and Filtered Signal '''
        qrs_c = np.zeros(LLp)  # Amplitude of R
        qrs_i = np.zeros(LLp)  # index
        qrs_i_raw = np.zeros(LLp)  # Amplitude of R
        qrs_amp_raw = np.zeros(LLp)  # index
        ''' Noise Buffers '''
        nois_c = np.zeros(LLp)
        nois_i = np.zeros(LLp)
        ''' Buffers for signal and noise '''

        SIGL_buf = np.zeros(LLp)
        NOISL_buf = np.zeros(LLp)
        THRS_buf = np.zeros(LLp)
        SIGL_buf1 = np.zeros(LLp)
        NOISL_buf1 = np.zeros(LLp)
        THRS_buf1 = np.zeros(LLp)
        ''' Initialize the training phase (2 seconds of the signal) to determine the THR_SIG and THR_NOISE '''
        THR_SIG = np.max(
            ecg_m[:2 * fs + 1]) * 1 / 3  # 0.33 of the max amplitude
        THR_NOISE = np.mean(
            ecg_m[:2 * fs + 1]
        ) * 1 / 2  # 0.5 of the mean signal is considered to be noise
        SIG_LEV = THR_SIG
        NOISE_LEV = THR_NOISE
        ''' Initialize bandpath filter threshold (2 seconds of the bandpass signal) '''
        THR_SIG1 = np.max(ecg_h[:2 * fs + 1]) * 1 / 3
        THR_NOISE1 = np.mean(ecg_h[:2 * fs + 1]) * 1 / 2
        SIG_LEV1 = THR_SIG1  # Signal level in Bandpassed filter
        NOISE_LEV1 = THR_NOISE1  # Noise level in Bandpassed filter
        ''' Thresholding and decision rule '''

        Beat_C = 0
        Beat_C1 = 0
        Noise_Count = 0
        for i in range(LLp):
            ''' Locate the corresponding peak in the filtered signal '''
            if locs[i] - round(0.150 * fs) >= 1 and locs[i] <= len(ecg_h):
                temp_vec = ecg_h[
                    locs[i] - round(0.150 * fs):locs[i] +
                    1]  # -1 since matlab works differently with indexes
                y_i = np.max(temp_vec)
                x_i = list(temp_vec).index(y_i)
            else:
                if i == 0:
                    temp_vec = ecg_h[:locs[i] + 1]
                    y_i = np.max(temp_vec)
                    x_i = list(temp_vec).index(y_i)
                    ser_back = 1
                elif locs[i] >= len(ecg_h):
                    temp_vec = ecg_h[int(locs[i] - round(0.150 * fs)):]
                    y_i = np.max(temp_vec)
                    x_i = list(temp_vec).index(y_i)
            ''' Update the Hearth Rate '''
            if Beat_C >= 9:
                diffRR = np.diff(qrs_i[Beat_C -
                                       9:Beat_C])  # Calculate RR interval
                mean_RR = np.mean(
                    diffRR
                )  # Calculate the mean of 8 previous R waves interval
                comp = qrs_i[Beat_C - 1] - qrs_i[Beat_C - 2]  # Latest RR
                if comp <= 0.92 * mean_RR or comp >= 1.16 * mean_RR:
                    ''' lower down thresholds to detect better in MVI '''
                    THR_SIG = 0.5 * THR_SIG
                    THR_SIG1 = 0.5 * THR_SIG1
                else:
                    m_selected_RR = mean_RR  #The latest regular beats mean
            ''' Calculate the mean last 8 R waves to ensure that QRS is not '''
            if bool(m_selected_RR):
                test_m = m_selected_RR  #if the regular RR available use it
            elif bool(mean_RR) and m_selected_RR == 0:
                test_m = mean_RR
            else:
                test_m = 0

            if bool(test_m):
                if locs[i] - qrs_i[Beat_C - 1] >= round(
                        1.66 * test_m):  # it shows a QRS is missed

                    temp_vec = ecg_m[int(qrs_i[Beat_C - 1] + round(0.2 * fs)
                                         ):int(locs[i] - round(0.2 * fs)) + 1]
                    pks_temp = np.max(
                        temp_vec
                    )  #search back and locate the max in the interval
                    locs_temp = list(temp_vec).index(pks_temp)
                    locs_temp = qrs_i[Beat_C - 1] + round(
                        0.200 * fs) + locs_temp
                    if pks_temp > THR_NOISE:
                        Beat_C = Beat_C + 1
                        qrs_c[Beat_C - 1] = pks_temp
                        qrs_i[Beat_C - 1] = locs_temp
                        ''' Locate in Filtered Signal '''

                        if locs_temp <= len(ecg_h):
                            #temp_vec = ecg_h[int(locs_temp-round(0.150*fs)):int(locs_temp)+1]
                            temp_vec = ecg_h[int(locs_temp -
                                                 round(0.150 * fs)) +
                                             1:int(locs_temp) + 2]
                            y_i_t = np.max(temp_vec)
                            x_i_t = list(temp_vec).index(y_i_t)
                        else:
                            temp_vec = ecg_h[int(locs_temp -
                                                 round(0.150 * fs)):]
                            y_i_t = np.max(temp_vec)
                            x_i_t = list(temp_vec).index(y_i_t)
                        ''' Band Pass Signal Threshold '''
                        if y_i_t > THR_NOISE1:
                            Beat_C1 = Beat_C1 + 1
                            temp_value = locs_temp - round(0.150 * fs) + x_i_t
                            qrs_i_raw[Beat_C1 -
                                      1] = temp_value  # save index of bandpass
                            qrs_amp_raw[
                                Beat_C1 -
                                1] = y_i_t  # save amplitude of bandpass
                            SIG_LEV1 = 0.25 * y_i_t + 0.75 * SIG_LEV1  #when found with the second threshold

                        not_nois = 1
                        SIG_LEV = 0.25 * pks_temp + 0.75 * SIG_LEV

                else:
                    not_nois = 0
            ''' Find noise and QRS Peaks '''

            if pks[i] >= THR_SIG:
                ''' if NO QRS in 360 ms of the previous QRS See if T wave '''
                if Beat_C >= 3:
                    if locs[i] - qrs_i[Beat_C - 1] <= round(0.36 * fs):
                        temp_vec = ecg_m[locs[i] - round(0.075 * fs):locs[i] +
                                         1]
                        Slope1 = np.mean(
                            np.diff(temp_vec)
                        )  # mean slope of the waveform at that position
                        temp_vec = ecg_m[int(qrs_i[Beat_C - 1] -
                                             int(round(0.075 * fs))) -
                                         1:int(qrs_i[Beat_C - 1]) + 1]
                        Slope2 = np.mean(
                            np.diff(temp_vec))  # mean slope of previous R wave
                        if np.abs(Slope1) <= np.abs(
                                0.5 *
                                Slope2):  # slope less then 0.5 of previous R
                            Noise_Count = Noise_Count + 1
                            nois_c[Noise_Count] = pks[i]
                            nois_i[Noise_Count] = locs[i]
                            skip = 1  # T wave identification
                        else:
                            skip = 0
                ''' Skip is 1 when a T wave is detected '''
                if skip == 0:
                    Beat_C = Beat_C + 1
                    qrs_c[Beat_C - 1] = pks[i]
                    qrs_i[Beat_C - 1] = locs[i]
                    ''' Band pass Filter check threshold '''

                    if y_i >= THR_SIG1:
                        Beat_C1 = Beat_C1 + 1
                        if bool(ser_back):
                            # +1 to agree with Matlab implementation
                            temp_value = x_i + 1
                            qrs_i_raw[Beat_C1 - 1] = temp_value
                        else:
                            temp_value = locs[i] - round(0.150 * fs) + x_i
                            qrs_i_raw[Beat_C1 - 1] = temp_value

                        qrs_amp_raw[Beat_C1 - 1] = y_i

                        SIG_LEV1 = 0.125 * y_i + 0.875 * SIG_LEV1

                    SIG_LEV = 0.125 * pks[i] + 0.875 * SIG_LEV

            elif THR_NOISE <= pks[i] and pks[i] < THR_SIG:
                NOISE_LEV1 = 0.125 * y_i + 0.875 * NOISE_LEV1
                NOISE_LEV = 0.125 * pks[i] + 0.875 * NOISE_LEV

            elif pks[i] < THR_NOISE:
                nois_c[Noise_Count] = pks[i]
                nois_i[Noise_Count] = locs[i]
                Noise_Count = Noise_Count + 1

                NOISE_LEV1 = 0.125 * y_i + 0.875 * NOISE_LEV1
                NOISE_LEV = 0.125 * pks[i] + 0.875 * NOISE_LEV
            ''' Adjust the threshold with SNR '''

            if NOISE_LEV != 0 or SIG_LEV != 0:
                THR_SIG = NOISE_LEV + 0.25 * (np.abs(SIG_LEV - NOISE_LEV))
                THR_NOISE = 0.5 * THR_SIG
            ''' Adjust the threshold with SNR for bandpassed signal '''

            if NOISE_LEV1 != 0 or SIG_LEV1 != 0:
                THR_SIG1 = NOISE_LEV1 + 0.25 * (np.abs(SIG_LEV1 - NOISE_LEV1))
                THR_NOISE1 = 0.5 * THR_SIG1
            ''' take a track of thresholds of smoothed signal '''

            SIGL_buf[i] = SIG_LEV
            NOISL_buf[i] = NOISE_LEV
            THRS_buf[i] = THR_SIG
            ''' take a track of thresholds of filtered signal '''

            SIGL_buf1[i] = SIG_LEV1
            NOISL_buf1[i] = NOISE_LEV1
            THRS_buf1[i] = THR_SIG1
            ''' reset parameters '''

            skip = 0
            not_nois = 0
            ser_back = 0
        ''' Adjust lengths '''

        qrs_i_raw = qrs_i_raw[:Beat_C1]
        qrs_amp_raw = qrs_amp_raw[:Beat_C1]
        qrs_c = qrs_c[:Beat_C + 1]
        qrs_i = qrs_i[:Beat_C + 1]

        return qrs_amp_raw, qrs_i_raw, delay
"""
Compute the coherence of two signals
"""
import matplotlib.pyplot as plt
import numpy as np

# make a little extra space between the subplots
plt.subplots_adjust(wspace=0.5)

dt = 0.01
t = np.arange(0, 30, dt)
nse1 = np.random.randn(len(t))  # white noise 1
nse2 = np.random.randn(len(t))  # white noise 2
r = np.exp(-t / 0.05)

cnse1 = np.convolve(nse1, r, mode='same') * dt  # colored noise 1
cnse2 = np.convolve(nse2, r, mode='same') * dt  # colored noise 2

# two signals with a coherent part and a random part
s1 = 0.01 * np.sin(2 * np.pi * 10 * t) + cnse1
s2 = 0.01 * np.sin(2 * np.pi * 10 * t) + cnse2

plt.subplot(211)
plt.plot(t, s1, t, s2)
plt.xlim(0, 5)
plt.xlabel('time')
plt.ylabel('s1 and s2')
plt.grid(True)

plt.subplot(212)
cxy, f = plt.cohere(s1, s2, 256, 1. / dt)
Esempio n. 53
0
def grpdelay(b, a=1, nfft=512, whole='none', analog=False, Fs=2. * pi):
    #==================================================================
    """
    Calculate group delay of a discrete time filter, specified by
    numerator coefficients `b` and denominator coefficients `a` of the system
    function `H` ( `z`).
    
    When only `b` is given, the group delay of the transversal (FIR)
    filter specified by `b` is calculated.
    
    Parameters
    ----------
    b :  array_like
         Numerator coefficients (transversal part of filter)
    
    a :  array_like (optional, default = 1 for FIR-filter)
         Denominator coefficients (recursive part of filter)
    
    whole : string (optional, default : 'none')
         Only when whole = 'whole' calculate group delay around
         the complete unit circle (0 ... 2 pi)
    
    N :  integer (optional, default: 512)
         Number of FFT-points
    
    FS : float (optional, default: FS = 2*pi)
         Sampling frequency.
    
    
    Returns
    -------
    tau_g : ndarray
        The group delay
    
    
    w : ndarray
        The angular frequency points where the group delay was computed
    
    Notes
    -----
    The group delay :math:`\\tau_g(\\omega)` of discrete and continuous time
    systems is defined by
    
    .. math::
    
        \\tau_g(\\omega) = -  \\phi'(\\omega)
            = -\\frac{\\partial \\phi(\\omega)}{\\partial \\omega}
            = -\\frac{\\partial }{\\partial \\omega}\\angle H( \\omega)
    
    A useful form for calculating the group delay is obtained by deriving the
    *logarithmic* frequency response in polar form as described in [JOS]_ for
    discrete time systems:
    
    .. math::
    
        \\ln ( H( \\omega))
          = \\ln \\left({H_A( \\omega)} e^{j \\phi(\\omega)} \\right)
          = \\ln \\left({H_A( \\omega)} \\right) + j \\phi(\\omega)
    
          \\Rightarrow \\; \\frac{\\partial }{\\partial \\omega} \\ln ( H( \\omega))
          = \\frac{H_A'( \\omega)}{H_A( \\omega)} +  j \\phi'(\\omega)
    
    where :math:`H_A(\\omega)` is the amplitude response. :math:`H_A(\\omega)` and
    its derivative :math:`H_A'(\\omega)` are real-valued, therefore, the group
    delay can be calculated from
    
    .. math::
    
          \\tau_g(\\omega) = -\\phi'(\\omega) =
          -\\Im \\left\\{ \\frac{\\partial }{\\partial \\omega}
          \\ln ( H( \\omega)) \\right\\}
          =-\\Im \\left\\{ \\frac{H'(\\omega)}{H(\\omega)} \\right\\}
    
    The derivative of a polynome :math:`P(s)` (continuous-time system) or :math:`P(z)`
    (discrete-time system) w.r.t. :math:`\\omega` is calculated by:
    
    .. math::
    
        \\frac{\\partial }{\\partial \\omega} P(s = j \\omega)
        = \\frac{\\partial }{\\partial \\omega} \\sum_{k = 0}^N c_k (j \\omega)^k
        =  j \\sum_{k = 0}^{N-1} (k+1) c_{k+1} (j \\omega)^{k}
        =  j P_R(s = j \\omega)
    
        \\frac{\\partial }{\\partial \\omega} P(z = e^{j \\omega T})
        = \\frac{\\partial }{\\partial \\omega} \\sum_{k = 0}^N c_k e^{-j k \\omega T}
        =  -jT \\sum_{k = 0}^{N} k c_{k} e^{-j k \\omega T}
        =  -jT P_R(z = e^{j \\omega T})
    
    where :math:`P_R` is the "ramped" polynome, i.e. its `k` th coefficient is
    multiplied by `k` resp. `k` + 1.
    
    yielding:
    
    .. math::
    
        \\tau_g(\\omega) = -\\Im \\left\\{ \\frac{H'(\\omega)}{H(\\omega)} \\right\\}
        \\quad \\text{ resp. } \\quad
        \\tau_g(\\omega) = -\\Im \\left\\{ \\frac{H'(e^{j \\omega T})}
                        {H(e^{j \\omega T})} \\right\\}
    
    
    where::
    
                        (H'(e^jwT))       (    H_R(e^jwT))        (H_R(e^jwT))
        tau_g(w) = -im  |---------| = -im |-jT ----------| = T re |----------|
                        ( H(e^jwT))       (    H(e^jwT)  )        ( H(e^jwT) )
    
    where :math:`H(e^{j\\omega T})` is calculated via the DFT at NFFT points and
    the derivative
    of the polynomial terms :math:`b_k z^-k` using :math:`\\partial / \\partial w b_k e^-jkwT` = -b_k jkT e^-jkwT.
    This is equivalent to muliplying the polynome with a ramp `k`,
    yielding the "ramped" function H_R(e^jwT).
    
    
    
    For analog functions with b_k s^k the procedure is analogous, but there is no
    sampling time and the exponent is positive.
    
    
    
    .. [JOS] Julius O. Smith III, "Numerical Computation of Group Delay" in
        "Introduction to Digital Filters with Audio Applications",
        Center for Computer Research in Music and Acoustics (CCRMA),
        Stanford University, http://ccrma.stanford.edu/~jos/filters/Numerical_Computation_Group_Delay.html, referenced 2014-04-02,
    
    .. [Lyons] Richard Lyons, "Understanding Digital Signal Processing", 3rd Ed.,
        Prentice Hall, 2010.
    
    Examples
    --------
    >>> b = [1,2,3] # Coefficients of H(z) = 1 + 2 z^2 + 3 z^3
    >>> tau_g, td = dsp_lib.grpdelay(b)
    
    
    """
    ## If the denominator of the computation becomes too small, the group delay
    ## is set to zero.  (The group delay approaches infinity when
    ## there are poles or zeros very close to the unit circle in the z plane.)
    ##
    ## Theory: group delay, g(w) = -d/dw [arg{H(e^jw)}],  is the rate of change of
    ## phase with respect to frequency.  It can be computed as:
    ##
    ##               d/dw H(e^-jw)
    ##        g(w) = -------------
    ##                 H(e^-jw)
    ##
    ## where
    ##         H(z) = B(z)/A(z) = sum(b_k z^k)/sum(a_k z^k).
    ##
    ## By the quotient rule,
    ##                    A(z) d/dw B(z) - B(z) d/dw A(z)
    ##        d/dw H(z) = -------------------------------
    ##                               A(z) A(z)
    ## Substituting into the expression above yields:
    ##                A dB - B dA
    ##        g(w) =  ----------- = dB/B - dA/A
    ##                    A B
    ##
    ## Note that,
    ##        d/dw B(e^-jw) = sum(k b_k e^-jwk)
    ##        d/dw A(e^-jw) = sum(k a_k e^-jwk)
    ## which is just the FFT of the coefficients multiplied by a ramp.
    ##
    ## As a further optimization when nfft>>length(a), the IIR filter (b,a)
    ## is converted to the FIR filter conv(b,fliplr(conj(a))).
    if whole != 'whole':
        nfft = 2 * nfft
    nfft = int(nfft)
    #
    w = Fs * np.arange(0, nfft) / nfft  # create frequency vector

    try:
        len(a)
    except TypeError:
        a = 1
        oa = 0  # a is a scalar or empty -> order of a = 0
        c = b
        try:
            len(b)
        except TypeError:
            print('No proper filter coefficients: len(a) = len(b) = 1 !')
    else:
        oa = len(a) - 1  # order of denom. a(z) resp. a(s)
        c = np.convolve(b, a[::-1])  # a[::-1] reverses denominator coeffs a
        # c(z) = b(z) * a(1/z)*z^(-oa)
    try:
        len(b)
    except TypeError:
        b = 1
        ob = 0  # b is a scalar or empty -> order of b = 0
    else:
        ob = len(b) - 1  # order of b(z)

    if analog:
        a_b = np.convolve(a, b)
        if ob > 1:
            br_a = np.convolve(b[1:] * np.arange(1, ob), a)
        else:
            br_a = 0
        ar_b = np.convolve(a[1:] * np.arange(1, oa), b)

        num = np.fft.fft(ar_b - br_a, nfft)
        den = np.fft.fft(a_b, nfft)
    else:
        oc = oa + ob  # order of c(z)
        cr = c * np.arange(
            0, oc + 1)  # multiply with ramp -> derivative of c wrt 1/z

        num = np.fft.fft(cr, nfft)  #
        den = np.fft.fft(c, nfft)  #
#
    minmag = 10. * np.spacing(1)  # equivalent to matlab "eps"
    polebins = np.where(abs(den) < minmag)[0]  # find zeros of denominator
    #    polebins = np.where(abs(num) < minmag)[0] # find zeros of numerator
    if np.size(polebins) > 0:  # check whether polebins array is empty
        print('*** grpdelay warning: group delay singular -> setting to 0 at:')
        for i in polebins:
            print('f = {0} '.format((Fs * i / nfft)))
            num[i] = 0
            den[i] = 1

    if analog:
        tau_g = np.real(num / den)
    else:
        tau_g = np.real(num / den) - oa


#
    if whole != 'whole':
        nfft = nfft // 2
        tau_g = tau_g[0:nfft]
        w = w[0:nfft]

    return tau_g, w
Esempio n. 54
0
    def __init__(self,
                 samples_per_symbol=_def_samples_per_symbol,
                 bits_per_symbol=_def_bits_per_symbol,
                 h_numerator=_def_h_numerator,
                 h_denominator=_def_h_denominator,
                 cpm_type=_def_cpm_type,
                 bt=_def_bt,
                 symbols_per_pulse=_def_symbols_per_pulse,
                 generic_taps=_def_generic_taps,
                 verbose=_def_verbose,
                 log=_def_log):

        gr.hier_block2.__init__(
            self,
            "cpm_mod",
            gr.io_signature(1, 1, gr.sizeof_char),  # Input signature
            gr.io_signature(1, 1, gr.sizeof_gr_complex))  #  Output signature

        self._samples_per_symbol = samples_per_symbol
        self._bits_per_symbol = bits_per_symbol
        self._h_numerator = h_numerator
        self._h_denominator = h_denominator
        self._cpm_type = cpm_type
        self._bt = bt
        if cpm_type == 0 or cpm_type == 2 or cpm_type == 3:  # CPFSK, RC, Generic
            self._symbols_per_pulse = symbols_per_pulse
        elif cpm_type == 1:  # GMSK
            self._symbols_per_pulse = 4
        else:
            raise TypeError, (
                "cpm_type must be an integer in {0,1,2,3}, is %r" %
                (cpm_type, ))

        self._generic_taps = numpy.array(generic_taps)

        if samples_per_symbol < 2:
            raise TypeError, ("samples_per_symbol must be >= 2, is %r" %
                              (samples_per_symbol, ))

        self.nsymbols = 2**bits_per_symbol
        self.sym_alphabet = numpy.arange(-(self.nsymbols - 1), self.nsymbols,
                                         2).tolist()

        self.ntaps = int(self._symbols_per_pulse * samples_per_symbol)
        sensitivity = 2 * pi * h_numerator / h_denominator / samples_per_symbol

        # Unpack Bytes into bits_per_symbol groups
        self.B2s = blocks.packed_to_unpacked_bb(bits_per_symbol,
                                                gr.GR_MSB_FIRST)

        # Turn it into symmetric PAM data.
        self.pam = digital_swig.chunks_to_symbols_bf(self.sym_alphabet, 1)

        # Generate pulse (sum of taps = samples_per_symbol/2)
        if cpm_type == 0:  # CPFSK
            self.taps = (1.0 / self._symbols_per_pulse / 2, ) * self.ntaps
        elif cpm_type == 1:  # GMSK
            gaussian_taps = filter.firdes.gaussian(
                1.0 / 2,  # gain
                samples_per_symbol,  # symbol_rate
                bt,  # bandwidth * symbol time
                self.ntaps  # number of taps
            )
            sqwave = (1, ) * samples_per_symbol  # rectangular window
            self.taps = numpy.convolve(numpy.array(gaussian_taps),
                                       numpy.array(sqwave))
        elif cpm_type == 2:  # Raised Cosine
            # generalize it for arbitrary roll-off factor
            self.taps = (1 - numpy.cos(
                2 * pi * numpy.arange(0, self.ntaps) / samples_per_symbol /
                self._symbols_per_pulse)) / (2 * self._symbols_per_pulse)
        elif cpm_type == 3:  # Generic CPM
            self.taps = generic_taps
        else:
            raise TypeError, (
                "cpm_type must be an integer in {0,1,2,3}, is %r" %
                (cpm_type, ))

        self.filter = filter.pfb.arb_resampler_fff(samples_per_symbol,
                                                   self.taps)

        # FM modulation
        self.fmmod = analog.frequency_modulator_fc(sensitivity)

        if verbose:
            self._print_verbage()

        if log:
            self._setup_logging()

        # Connect
        self.connect(self, self.B2s, self.pam, self.filter, self.fmmod, self)