Esempio n. 1
0
def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):
    """Smooth images by applying a Gaussian filter.

    Apply a Gaussian filter along the three first dimensions of arr.

    Parameters
    ==========
    arr: numpy.ndarray
        4D array, with image number as last dimension. 3D arrays are also
        accepted.

    affine: numpy.ndarray
        (4, 4) matrix, giving affine transformation for image. (3, 3) matrices
        are also accepted (only these coefficients are used).

    fwhm: scalar or numpy.ndarray
        Smoothing strength, as a full-width at half maximum, in millimeters.
        If a scalar is given, width is identical on all three directions.
        A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
        If fwhm is None, no filtering is performed (useful when just removal
        of non-finite values is needed)

    ensure_finite: bool
        if True, replace every non-finite values (like NaNs) by zero before
        filtering.

    copy: bool
        if True, input array is not modified. False by default: the filtering
        is performed in-place.

    Returns
    =======
    filtered_arr: numpy.ndarray
        arr, filtered.

    Notes
    =====
    This function is most efficient with arr in C order.
    """

    if copy:
        arr = arr.copy()

    # Keep only the scale part.
    affine = affine[:3, :3]

    if ensure_finite:
        # SPM tends to put NaNs in the data outside the brain
        arr[np.logical_not(np.isfinite(arr))] = 0

    if fwhm is not None:
        # Convert from a FWHM to a sigma:
        # Do not use /=, fwhm may be a numpy scalar
        fwhm = fwhm / np.sqrt(8 * np.log(2))
        vox_size = np.sqrt(np.sum(affine ** 2, axis=0))
        sigma = fwhm / vox_size
        for n, s in enumerate(sigma):
            ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)

    return arr
Esempio n. 2
0
def calc_hist(hsv,mask, figure=None, ax1=None, ax2=None):
    chans = cv2.split(hsv)
    # Or maybe I should use Numpy indexing for faster splitting: h=hsv[:,:,0] 
    hist_h = cv2.calcHist([chans[0]], [0], mask, [180], [0, 180])
    hist_s = cv2.calcHist([chans[1]], [0], mask, [256], [0, 256])
    #print hist_s
    hist_h = hist_h.flatten()
    hist_s = hist_s.flatten()
    # Apply Gaussian low pass for histogram (using scipy)
    hist_hg = ndi.gaussian_filter1d(hist_h, sigma=1.5, output=np.float64, mode='nearest')
    hist_sg = ndi.gaussian_filter1d(hist_s, sigma=1.5, output=np.float64, mode='nearest')
    hue_max = np.argmax(hist_hg)
    saturation_max = np.argmax(hist_sg) if np.argmax(hist_sg) >= 20 else 20
    #print hue_max, saturation_max
    #ax1.clear(), ax2.clear()
    #ax1.set_autoscale_on(False)
    # ax1.plot(range(180),hist_hg)
    # ax2.plot(range(256),hist_sg)
    # ax1.set_ylim([0,1200])
    # ax1.set_xlim([0,180])
    # ax2.set_xlim([0,256])
    # ax2.set_ylim([0,1200])
    # figure.canvas.draw()
    #plt.xlim([0, 180])
    lower = np.array([hue_max+20,saturation_max-20,20])
    upper = np.array([hue_max+20,saturation_max+20,255])
    mask_color = cv2.inRange(hsv, lower, upper)
    return hue_max, hist_hg, saturation_max, hist_sg, mask_color
Esempio n. 3
0
def createOrdered3D(centerPoints,edgeSets):
	f = open(name + 'TrackPointsTest.xyz', 'w')
	fig = plt.figure()
	ax = fig.add_subplot(111, projection='3d')
	ax.set_zbound(lower=0, upper=1400)
	for edgeSet in edgeSets:
		xArr = []
		yArr = []
		zArr = []
		for point in edgeSet:
			xArr.append(point[0])
			yArr.append(point[1])
			height = getHeight(point[0],point[1],centerPoints)*HEIGHT_SCALE
			zArr.append(height)
			f.write(str(point[0]) + " "+ str(point[1]) + " " + str(height)+"\n")
		t = np.linspace(0, 1, len(zArr))
		t2 = np.linspace(0, 1, len(zArr))

		x2 = np.interp(t2, t, xArr)
		y2 = np.interp(t2, t, yArr)
		z2 = np.interp(t2, t, zArr)
		sigma = 10
		x3 = gaussian_filter1d(x2, sigma)
		y3 = gaussian_filter1d(y2, sigma)
		z3 = gaussian_filter1d(z2, sigma)
		ax.plot(xArr,yArr,zArr,color = 'b')
	plt.show()
Esempio n. 4
0
    def _homogenize_params(self, other, maxdiff=1):
        """
        Return triple with a tuple of indices (in self and other, respectively),
        factors and constants at these frequencies.
        
        Parameters
        ----------
        other : CallistoSpectrogram
            Spectrogram to be homogenized with the current one.
        maxdiff : float
            Threshold for which frequencies are considered equal.
        """

        pairs_indices = [(x, y) for x, y, d in minimal_pairs(self.freq_axis, other.freq_axis) if d <= maxdiff]

        pairs_data = [(self[n_one, :], other[n_two, :]) for n_one, n_two in pairs_indices]

        # XXX: Maybe unnecessary.
        pairs_data_gaussian = [(gaussian_filter1d(a, 15), gaussian_filter1d(b, 15)) for a, b in pairs_data]

        # If we used integer arithmetic, we would accept more invalid
        # values.
        pairs_data_gaussian64 = np.float64(pairs_data_gaussian)
        least = [leastsq(self._to_minimize(a, b), [1, 0])[0] for a, b in pairs_data_gaussian64]

        factors = [x for x, y in least]
        constants = [y for x, y in least]

        return pairs_indices, factors, constants
Esempio n. 5
0
def test_multiple_modes_sequentially():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying the filters with
    # different modes sequentially
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    modes = ['reflect', 'wrap']

    expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
    expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.gaussian_filter(arr, 1, mode=modes))

    expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
    expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.uniform_filter(arr, 5, mode=modes))

    expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.maximum_filter(arr, size=5, mode=modes))

    expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.minimum_filter(arr, size=5, mode=modes))
Esempio n. 6
0
def bootdensity(data, min, max, nboot, ci):
    """ Calculate density and confidence intervals on density
    for a 1D array of points.  Bandwidth is selected automatically.
    """
    r("""
      limdensity <- function(data, weights=NULL, bw="nrd0")
      {
        density(data, from=%f, to=%f, weights=weights, bw=bw)
      }
      """%(min, max))
    density = r.limdensity(data)
    xdens = N.array(density['x'])
    ydens = N.array(density['y'])
    bw = density['bw']
    #print 'bandwidth:', bw
    ydensboot = N.zeros((nboot, len(xdens)), N.float)
    ndata = len(data)
    ran = N.random.uniform(0, ndata, (nboot,ndata)).astype(N.int)
    for i in range(nboot):
        den = r.limdensity(data[ran[i]])
        y = N.array(den['y'])
        ydensboot[i] = y
    ydensbootsort = N.sort(ydensboot, axis=0)
    ydensbootsort = interp1d(N.arange(0, 1.000001, 1.0/(nboot-1)),
                             ydensbootsort, axis=0)
    ilow = (0.5-ci/2.0)
    ihigh = (0.5+ci/2.0)
    ydenslow, ydenshigh = ydensbootsort((ilow, ihigh))
    ydenslow = gaussian_filter1d(ydenslow, bw*512/10.0)
    ydenshigh, ydenshigh = ydensbootsort((ihigh, ihigh))
    ydenshigh = gaussian_filter1d(ydenshigh, bw*512/10.0)
    return xdens, ydens, ydenslow, ydenshigh, bw
Esempio n. 7
0
def test_orders_gauss():
    # Check order inputs to Gaussians
    arr = np.zeros((1,))
    assert_equal(0, sndi.gaussian_filter(arr, 1, order=0))
    assert_equal(0, sndi.gaussian_filter(arr, 1, order=3))
    assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1)
    assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0))
    assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3))
    assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1)
 def extract_arrays(self):
     self.xs, self.ys = self.chromatogram.as_arrays()
     if self.smooth:
         self.ys = gaussian_filter1d(self.ys, 1)
     if len(self.xs) > MAX_POINTS:
         new_xs = np.linspace(self.xs.min(), self.xs.max(), MAX_POINTS)
         new_ys = np.interp(new_xs, self.xs, self.ys)
         self.xs = new_xs
         self.ys = new_ys
         self.ys = gaussian_filter1d(self.ys, 1)
Esempio n. 9
0
def fit_semiconductor(t, data, sav_n=11, sav_deg=4, mode='sav', tr=0.4):
    from scipy.signal import savgol_filter
    from scipy.ndimage import gaussian_filter1d
    from scipy.optimize import leastsq
    ger =   data[..., -1].sum(2).squeeze()
    plt.subplot(121)
    plt.title('Germanium sum')
    plt.plot(t, ger[:,  0])
    plt.plot(t, ger[:,  1])
    if mode =='sav':
        plt.plot(t, savgol_filter(ger[:, 0], sav_n, sav_deg, 0))
        plt.plot(t, savgol_filter(ger[:, 1], sav_n, sav_deg, 0))
    plt.xlim(-1, 3)
    plt.subplot(122)
    plt.title('First dervitate')
    if mode == 'sav':
        derv0 = savgol_filter(ger[:, 0], sav_n, sav_deg, 1)
        derv1 = savgol_filter(ger[:, 1], sav_n, sav_deg, 1)
    elif mode == 'gauss':
        derv0 =  gaussian_filter1d(ger[:, 0], sav_n, order=1)
        derv1 =  gaussian_filter1d(ger[:, 1], sav_n, order=1)
    plt.plot(t , derv0)
    plt.plot(t , derv1)
    plt.xlim(-.8, .8)
    plt.ylim(0, 700)
    plt.minorticks_on()
    plt.grid(1)

    def gaussian(p, ch, res=True):

        i, j = dv.fi(t, -tr), dv.fi(t, tr)
        w = p[0]
        A = p[1]
        x0 = p[2]
        fit = A*np.exp(-(t[i:j]-x0)**2/(2*w**2))
        if res:
            return fit-ch[i:j]
        else:
            return fit


    x0 = leastsq(gaussian, [.2, max(derv0), 0], derv0)
    plt.plot(t[dv.fi(t, -tr):dv.fi(t, tr)], gaussian(x0[0], 0, 0), '--k', )
    plt.text(0.05, 0.9, 'x$_0$ = %.2f\nFWHM = %.2f\nA = %.1f\n'%(x0[0][2],2.35*x0[0][0], x0[0][1]),
             transform=plt.gca().transAxes, va='top')

    x0 = leastsq(gaussian, [.2, max(derv1), 0], derv1)
    plt.plot(t[dv.fi(t, -tr):dv.fi(t, tr)], gaussian(x0[0], 1, 0), '--b', )

    plt.xlim(-.8, .8)
    plt.minorticks_on()
    plt.grid(0)
    plt.tight_layout()
    plt.text(0.5, 0.9, 'x$_0$ = %.2f\nFWHM = %.2f\nA = %.1f\n'%(x0[0][2],2.35*x0[0][0], x0[0][1]),
             transform=plt.gca().transAxes, va='top')
Esempio n. 10
0
File: trace.py Progetto: jni/lesion
def trace_profile(image, sigma=5., width_factor=1., check_vertical=False):
    """Trace the intensity profile of a tubular structure in an image.

    Parameters
    ----------
    image : array of int or float, shape (M, N[, P])
        The input image. If 3D, the first dimension is flattened by
        summing along that axis.
    sigma : float, optional
        Convolve the intensity with this sigma to estimate the start
        and end of the scan lines.
    width_factor : float, optional
        The width of the line profile is determined automatically, then
        multiplied by this factor.
    check_vertical : bool, optional
        Check whether the tube is arranged top-to-bottom in the image.
        If `False`, it is assumed to be vertical, otherwise, the
        orientation is automatically determined from the image.

    Returns
    -------
    profile : 1D array of float
        The intensity profile of the tube.

    Examples
    --------
    >>> edges = np.array([8, 16, 22, 16, 8])
    >>> middle = np.array([0, 0, 0, 0, 0])
    >>> image = np.vstack([edges, middle, edges])
    >>> trace_profile(image, sigma=0)
    array([ 18.,   0.,  18.])
    >>> image3d = np.array([image, image, image])
    >>> trace_profile(image3d, sigma=0)
    array([ 54.,   0.,  54.])
    >>> trace_profile(image.T, sigma=0, check_vertical=True)
    array([ 18.,   0.,  18.])
    """
    if image.ndim > 2:
        image = image.sum(axis=0)
    if check_vertical:
        top_bottom_mean = np.mean(image[[0, image.shape[0] - 1], :])
        left_right_mean = np.mean(image[:, [0, image.shape[1] - 1]])
        if top_bottom_mean < left_right_mean:
            image = image.T
    top_distribution = nd.gaussian_filter1d(image[0], sigma)
    bottom_distribution = nd.gaussian_filter1d(image[-1], sigma)
    top_loc, top_whm = estimate_mode_width(top_distribution)
    bottom_loc, bottom_whm = estimate_mode_width(bottom_distribution)
    angle = np.arctan(np.abs(float(bottom_loc - top_loc)) / image.shape[0])
    width = np.int(np.ceil(max(top_whm, bottom_whm) * np.cos(angle)))
    profile = profile_line(image,
                           (0, top_loc), (image.shape[0] - 1, bottom_loc),
                           linewidth=width, mode='nearest')
    return profile
def computeSegmentGradients(segmentArray):
    xs, ys, zs, ii, ij = segmentArray
    
    xd = ndimage.gaussian_filter1d(xs, 1.0, order=1)
    yd = ndimage.gaussian_filter1d(ys, 1.0, order=1)
    zd = ndimage.gaussian_filter1d(zs, 1.0, order=1)
    
    mag = sqrt( xd**2 + yd**2 + zd**2 )
    mag[mag==0] = 1
    

    return c_[xd/mag, yd/mag, zd/mag].T
Esempio n. 12
0
def derivatives(flux, dx, s_factor):
    dxdxdx = dx * dx * dx
    # First derivative
    gf = ndimage.gaussian_filter1d(flux, sigma=s_factor, order=1, mode='wrap') / dx

    # Second derivative
    ggf = ndimage.gaussian_filter1d(flux, sigma=s_factor, order=2, mode='wrap') / (dx * dx)

    # Third derivative
    gggf = np.array(ndimage.gaussian_filter1d(flux, sigma=s_factor, order=3, mode='wrap') / dxdxdx)

    return gf, ggf, gggf
Esempio n. 13
0
	def getBoundsInfo(self):
		'''
		Each variable needs an upper and a lower bound. This portion of the
		algorithm needs some fine-tuning... these bounds determine the feasible
		set. Early experiments show that defining a tight feasible set
		produces quite good results, so that is why the multipliers here 
		are quite small.

		The primal variables are ordered [mu, theta], and the constraints are
		order [L, R, S, T]. The fast index is over position (i.e. there are
		nWrap entries before the variables changes).
		'''

		# Extract single columns
		L = self.columns['%04d.target' % (self.column)]['L']
		R = self.columns['%04d.target' % (self.column)]['R']
		S = self.columns['%04d.target' % (self.column)]['S']
		T = self.columns['%04d.target' % (self.column)]['T']

		# Bounds
		lowerBound = np.zeros(self.optDict['nPrimal'], dtype=float)
		lowerBound[self.optDict['nWrap']:] = 0.33
		upperBound = np.empty(self.optDict['nPrimal'], dtype=float)
		upperBound[:self.optDict['nWrap']] = 30.0 * self.parameter_scale
		upperBound[self.optDict['nWrap']:] = 0.34 * self.parameter_scale #3.0 / 4.0

		# Constraints
		factor = 1e-2
		lowerConstraint = np.empty(self.optDict['nConstraint'], dtype=float)
		upperConstraint = np.empty(self.optDict['nConstraint'], dtype=float)
		for i, val in enumerate([L, R, S, T]):
			lowerConstraint[i*self.optDict['nWrap']:(i+1)*self.optDict['nWrap']] = \
			ndimage.gaussian_filter1d((val - factor * val), 10)
		for i, val in enumerate([L, R, S, T]):
			upperConstraint[i*self.optDict['nWrap']:(i+1)*self.optDict['nWrap']] = \
			ndimage.gaussian_filter1d((val + factor * val), 10)
		for i, name in enumerate(['L','R','S','T']):
			self.sliceDict['con%s' % (name)] = slice(
				i*self.optDict['nWrap'],(i+1)*self.optDict['nWrap'])

		# Save these values in a dict
		self.constraintDict['lowerBound'] = lowerBound
		self.constraintDict['upperBound'] = upperBound
		self.constraintDict['lowerConstraint'] = lowerConstraint
		self.constraintDict['upperConstraint'] = upperConstraint
		self.constraintDict['constraintArray'] = np.empty_like(lowerConstraint)

		# Get the model update from FWI
		self.fwiTheta = self.columns['%04d.gradient' % (self.column)]['theta']
		self.fwiMu = self.columns['%04d.gradient' % (self.column)]['mu']

		self.fwiTheta *= self.parameter_scale #ndimage.filters.gaussian_filter1d(self.fwiTheta, 10)
Esempio n. 14
0
def handler(points, mr, gofscale, gof, sigma):
    from pdf2py import readwrite
    from meg import density
    from mri import transform
    from scipy import ndimage
    from nifti import NiftiImage
    from numpy import float32, int16, array

    report = {}
    fids = eval(mr.description)
    lpa = fids[0]
    rpa = fids[1]
    nas = fids[2]
    # self.points = array([[0,0,0],[10,0,0],[0,20,0]])#DEBUG-----------------
    xyz = transform.meg2mri(lpa, rpa, nas, dipole=points)
    # readwrite.writedata(xyz, os.path.dirname(mripath)+'/'+'xyz')
    print "lpa, rpa, nas", lpa, rpa, nas
    print mr.pixdim

    # do some scaling of the dips using the GOF as a weight.
    VoxDim = mr.voxdim[::-1]
    xyzscaled = (xyz / VoxDim).T
    print xyzscaled
    d = density.calc(xyz)
    gofscale = float32(gofscale)
    print "gofscale", gofscale
    s = gof - gofscale
    sf = (1 / (1 - gofscale)) * s
    ds = d * sf

    # apply a 1D gaussian filter
    z = density.val2img(mr.data, ds, xyzscaled)
    # sigma = float32(self.sigmaval.GetValue())
    print "sigma", sigma
    # sigma = 3
    print "filtering 1st dimension"
    f = ndimage.gaussian_filter1d(z, sigma * 1 / VoxDim[0], axis=0)
    print "filtering 2nd dimension"
    f = ndimage.gaussian_filter1d(f, sigma * 1 / VoxDim[1], axis=1)
    print "filtering 3rd dimension"
    f = ndimage.gaussian_filter1d(f, sigma * 1 / VoxDim[2], axis=2)

    scaledf = int16((z.max() / f.max()) * f * 1000)
    print "writing nifti output image"
    overlay = NiftiImage(int16(scaledf))

    overlay.setDescription(mr.description)
    overlay.setFilename(mr.filename + "dd")
    overlay.setQForm(mr.getQForm())

    return overlay
Esempio n. 15
0
 def induced_voltage_generation(self, Beam, length = 'slice_frame'):
     '''
     *Method to calculate the induced voltage through the derivative of the
     profile; the impedance must be of inductive type.*
     '''
     
     index = self.current_turn[0]
         
     if self.periodicity:
         self.derivative_line_density_not_filtered = np.zeros(self.slices.n_slices)
         find_index_slice = np.searchsorted(self.slices.edges, self.t_rev[index])
         if self.smooth_before_after[0]:
             if self.filter_ind_imp == 'gaussian':
                 self.slices.n_macroparticles = ndimage.gaussian_filter1d(self.slices.n_macroparticles, sigma=self.filter_options, mode='wrap')
             elif self.filter_ind_imp == 'chebyshev':
                 nCoefficients, b, a = self.slices.beam_profile_filter_chebyshev(self.filter_options)
             else:
                 raise RuntimeError('filter method not recognised')
         if (self.t_rev[index]-self.slices.bin_centers[find_index_slice])>0: 
             temp = np.concatenate((np.array([self.slices.n_macroparticles[find_index_slice]]), self.slices.n_macroparticles[:find_index_slice+1], np.array([self.slices.n_macroparticles[0]])))
         else:
             temp = np.concatenate((np.array([self.slices.n_macroparticles[find_index_slice-1]]), self.slices.n_macroparticles[:find_index_slice], self.slices.n_macroparticles[:2]))
         self.derivative_line_density_not_filtered[: find_index_slice+1] = np.gradient(temp, self.slices.bin_centers[1]-self.slices.bin_centers[0])[1:-1] / (self.slices.bin_centers[1] - self.slices.bin_centers[0])
         if self.smooth_before_after[1]:
             if self.filter_ind_imp == 'gaussian':
                 self.derivative_line_density_filtered = ndimage.gaussian_filter1d(self.derivative_line_density_not_filtered, sigma=self.filter_options, mode='wrap')
             elif self.filter_ind_imp == 'chebyshev':
                 self.derivative_line_density_filtered = filtfilt(b, a, self.derivative_line_density_not_filtered)
                 self.derivative_line_density_filtered = np.ascontiguousarray(self.derivative_line_density_filtered)
             else:
                 raise RuntimeError('filter method not recognised')
             induced_voltage = - Beam.charge * e * Beam.ratio * \
             self.Z_over_n[0][index] * \
             self.derivative_line_density_filtered / (2 * np.pi * self.revolution_frequency[index])   
         else:
             induced_voltage = - Beam.charge * e * Beam.ratio * \
             self.Z_over_n[0][index] * \
             self.derivative_line_density_not_filtered / (2 * np.pi * self.revolution_frequency[index])
     else:
         induced_voltage = - Beam.charge * e / (2 * np.pi) * Beam.ratio * \
             self.Z_over_n[0][index] / self.revolution_frequency[index] * \
             self.slices.beam_profile_derivative(self.deriv_mode)[1] / \
             (self.slices.bin_centers[1] - self.slices.bin_centers[0])    
     
     self.induced_voltage = induced_voltage[0:self.slices.n_slices]
     
     if isinstance(length, int):
         max_length = len(induced_voltage)
         if length > max_length:
             induced_voltage = np.lib.pad(self.induced_voltage, (0, length - max_length), 'constant', constant_values=(0,0))
         return induced_voltage[0:length]
Esempio n. 16
0
def _smooth_data_array(arr, affine, fwhm, copy=True):
    """Smooth images with a a Gaussian filter.

    Apply a Gaussian filter along the three first dimensions of arr.

    Parameters
    ----------
    arr: numpy.ndarray
        3D or 4D array, with image number as last dimension.

    affine: numpy.ndarray
        Image affine transformation matrix for image.

    fwhm: scalar, numpy.ndarray
        Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.
        If a scalar is given, kernel width is identical on all three directions.
        A numpy.ndarray must have 3 elements, giving the FWHM along each axis.

    copy: bool
        if True, will make a copy of the input array. Otherwise will directly smooth the input array.

    Returns
    -------
    smooth_arr: numpy.ndarray
    """

    if arr.dtype.kind == 'i':
        if arr.dtype == np.int64:
            arr = arr.astype(np.float64)
        else:
            arr = arr.astype(np.float32)
    if copy:
        arr = arr.copy()

    # Zeroe possible NaNs and Inf in the image.
    arr[np.logical_not(np.isfinite(arr))] = 0

    try:
        # Keep the 3D part of the affine.
        affine = affine[:3, :3]

        # Convert from FWHM in mm to a sigma.
        fwhm_sigma_ratio = np.sqrt(8 * np.log(2))
        vox_size         = np.sqrt(np.sum(affine ** 2, axis=0))
        sigma            = fwhm / (fwhm_sigma_ratio * vox_size)
        for n, s in enumerate(sigma):
            ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)
    except:
        raise ValueError('Error smoothing the array.')
    else:
        return arr
def gaussian_filter():
    shape = (40, 41, 42, 100)
    smooth_sigma = np.asarray((1., 2, 3.))

    data1 = random_matrix(shape, order='F')
    data2 = order_preserving_copy(data1)

    for img in np.rollaxis(data1, -1):
        img[...] = ndimage.gaussian_filter(img, smooth_sigma)

    for n, s in enumerate(smooth_sigma):
        ndimage.gaussian_filter1d(data2, s, output=data2, axis=n)

    np.testing.assert_almost_equal(data1, data2)
Esempio n. 18
0
def get_y_derivativemap(flat, flat_bpix, bg_std_norm,
                        max_sep_order=150, pad=50,
                        med_filter_size=(7, 7),
                        flat_mask=None):

    """
    flat
    flat_bpix : bpix'ed flat
    """

    # 1d-derivatives along y-axis : 1st attempt
    # im_deriv = ni.gaussian_filter1d(flat, 1, order=1, axis=0)

    # 1d-derivatives along y-axis : 2nd attempt. Median filter first.

    flat_deriv_bpix = ni.gaussian_filter1d(flat_bpix, 1,
                                           order=1, axis=0)

    # We also make a median-filtered one. This one will be used to make masks.
    flat_medianed = ni.median_filter(flat,
                                     size=med_filter_size)

    flat_deriv = ni.gaussian_filter1d(flat_medianed, 1,
                                      order=1, axis=0)

    # min/max filter

    flat_max = ni.maximum_filter1d(flat_deriv, size=max_sep_order, axis=0)
    flat_min = ni.minimum_filter1d(flat_deriv, size=max_sep_order, axis=0)

    # mask for aperture boundray
    if pad is None:
        sl=slice()
    else:
        sl=slice(pad, -pad)

    flat_deriv_masked = np.zeros_like(flat_deriv)
    flat_deriv_masked[sl,sl] = flat_deriv[sl, sl]

    if flat_mask is not None:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5) & flat_mask
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5) & flat_mask
    else:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5)
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5)

    return dict(data=flat_deriv, #_bpix,
                pos_mask=flat_deriv_pos_msk,
                neg_mask=flat_deriv_neg_msk,
                )
Esempio n. 19
0
def main():
    poly = load_file('monkey.ply')
    poly.BuildLinks(0)
    np = poly.GetNumberOfPoints()

    Ls = numpy.zeros((np, np))
    X, Y, Z = numpy_support.vtk_to_numpy(poly.GetPoints().GetData()).transpose()

    for i in xrange(np):
        for j in xrange(np):
            if i != j :
                if poly.IsEdge(i, j):
                    Ls[i, j] = -1
                    Ls[i, i] += 1

    Ls = numpy.matrix(Ls)
    Dx = X * Ls
    Dy = Y * Ls
    Dz = Z * Ls
    
    Dxg = gaussian_filter1d(Dx, 1)
    Dyg = gaussian_filter1d(Dy, 1)
    Dzg = gaussian_filter1d(Dz, 1)

    #Lsn = numpy.linalg.inv(Ls)

    #Xn = Lsn * Dxg
    #Yn = Lsn * Dyg
    #Zn = Lsn * Dzg

    X[:] = numpy.linalg.lstsq(Ls, Dxg.transpose())
    Y[:] = Dyg[0, :]
    Z[:] = Dzg[0, :]

    print X.shape
    
    V = numpy.zeros((np, 3))
    V[:,0] = X
    V[:,1] = Y
    V[:,2] = Z

    poly.GetPoints().SetData(numpy_support.numpy_to_vtk(V))
    poly.BuildLinks(0)
    poly.BuildCells()

    w = vtk.vtkSTLWriter()
    w.SetFileName('/tmp/teste.stl')
    w.SetInput(poly)
    w.Write()
Esempio n. 20
0
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
                     data=None, epochs=None, sigma=None,
                     order=None, scalings=None, vline=None,
                     x_label=None, y_label=None, colorbar=False,
                     cmap='RdBu_r'):
    """Aux function to plot erfimage on sensor topography"""
    from scipy import ndimage
    import matplotlib.pyplot as plt
    this_data = data[:, ch_idx, :].copy()
    ch_type = channel_type(epochs.info, ch_idx)
    if ch_type not in scalings:
        raise KeyError('%s channel type not in scalings' % ch_type)
    this_data *= scalings[ch_type]

    if callable(order):
        order = order(epochs.times, this_data)

    if order is not None:
        this_data = this_data[order]

    if sigma > 0.:
        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)

    ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
              origin='lower', vmin=vmin, vmax=vmax, picker=True,
              cmap=cmap, interpolation='nearest')

    if x_label is not None:
        plt.xlabel(x_label)
    if y_label is not None:
        plt.ylabel(y_label)
    if colorbar:
        plt.colorbar()
Esempio n. 21
0
def convolve_to_resolution(flux, source_R, source_R_sampling, target_R):
    """
    Requires the grid to be in logarithmic wavelength scaling. Here we define
    the resolution R as lambda/delta_lambda

    Parameters
    ----------
    flux: numpy.ndarray
        flux in numpy ndarray
    source_R: float
        R of the input flux
    source_R_sampling: float
        pixels per resolution element for the input flux
    target_R: float
        R of the output flux
    Returns
    -------
        : numpy.ndarray
        flux convolved to the new resolution target_R with target_R_sampling pixels
        per resolution element
    """

    assert target_R < source_R, ("Requested resolution {target_R} must be "
                                 "smaller than given resolution source "
                                 "resolution {source_R}".format(target_R=target_R,
                                                                source_R=source_R))

    rescale_R = 1 / np.sqrt((1 / target_R) ** 2 - (1 / source_R) ** 2)
    sigma = ((source_R / rescale_R) * source_R_sampling
             / (2 * np.sqrt(2 * np.log(2))))

    convolved_flux = nd.gaussian_filter1d(flux, sigma)

    return convolved_flux
Esempio n. 22
0
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, data=None,
                     epochs=None, sigma=None, order=None, scalings=None,
                     vline=None, x_label=None, y_label=None, colorbar=False,
                     cmap='RdBu_r', vlim_array=None):
    """Plot erfimage on sensor topography."""
    from scipy import ndimage
    import matplotlib.pyplot as plt
    this_data = data[:, ch_idx, :]
    if vlim_array is not None:
        vmin, vmax = vlim_array[ch_idx]

    if callable(order):
        order = order(epochs.times, this_data)

    if order is not None:
        this_data = this_data[order]

    if sigma > 0.:
        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)

    img = ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)],
                    aspect='auto', origin='lower', vmin=vmin, vmax=vmax,
                    picker=True, cmap=cmap, interpolation='nearest')

    ax = plt.gca()
    if x_label is not None:
        ax.set_xlabel(x_label)
    if y_label is not None:
        ax.set_ylabel(y_label)
    if colorbar:
        plt.colorbar(mappable=img)
Esempio n. 23
0
def calc_fq(times, pulse_delay, on_time, off_time, pulse_number):
    '''This develops a group of baisis functions somewhat like our
       oscilating stimuli.  All inputs in seconds'''
    # Basically, this is the standard experiment type we run.
    #                      Experiment 
    # /=======================^===========================================\
    # <--Delay--><--on--><--off--><--on--><--off--><--on--><--off--> . . . 

    answers = zeros_like(times)

    # Calculate the "on" windows in seconds
    openings = []               # Start of the n'th window
    closings = []               # End   of the n'th window

    # Populate the above arrays
    for pn in range(pulse_number):
        offset = pulse_delay + ((on_time + off_time) * pn)
        openings.append(offset)
        closings.append(offset + on_time)

    # Based on the start and stop times, alter the signal
    for opening_time, closing_time in zip(openings, closings):
        during_this_pulse = (times > opening_time) & (times < closing_time)
        answers[during_this_pulse] = 1

    return ndimage.gaussian_filter1d(answers, 1)
Esempio n. 24
0
def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
                             data=None, epochs=None, sigma=None, order=None,
                             scalings=None, vline=None, x_label=None,
                             y_label=None, colorbar=False, cmap='RdBu_r',
                             vlim_array=None):
    """Plot erfimage topography using a single axis."""
    from scipy import ndimage
    _compute_ax_scalings(bn, (tmin, tmax), (0, len(epochs.events)))
    ax = bn.ax
    data_lines = bn.data_lines
    extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax, bn.y_t,
              bn.y_t + bn.y_s * len(epochs.events))
    this_data = data[:, ch_idx, :]
    vmin, vmax = (None, None) if vlim_array is None else vlim_array[ch_idx]

    if callable(order):
        order = order(epochs.times, this_data)

    if order is not None:
        this_data = this_data[order]

    if sigma > 0.:
        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)

    data_lines.append(ax.imshow(this_data, extent=extent, aspect='auto',
                                origin='lower', vmin=vmin, vmax=vmax,
                                picker=True, cmap=cmap,
                                interpolation='nearest'))
Esempio n. 25
0
 def _process_psth_data(self,begin,end,param_index):
     duration = 2.0
     binsize = 0.01 #binsize 10 ms
     bins = np.arange(0.,duration,binsize)
     for channel,channel_trains in self.spike_trains.iteritems():
         if channel not in self.histogram_data:
             self.histogram_data[channel] = {}
         for unit,unit_train in channel_trains.iteritems():
             if unit not in self.histogram_data[channel]:
                 self.histogram_data[channel][unit] = {}
             if param_index not in self.histogram_data[channel][unit]:
                 self.histogram_data[channel][unit][param_index] = {}
                 self.histogram_data[channel][unit][param_index]['trials'] = 0
                 self.histogram_data[channel][unit][param_index]['spikes'] = []
                 self.histogram_data[channel][unit][param_index]['means'] = []
             take = ((unit_train >= begin) & (unit_train < begin + duration) & (unit_train< end))
             trial_spikes = unit_train[take] - begin
             trial_mean = np.mean(np.array(np.histogram(trial_spikes, bins=bins)[0],dtype='float') / binsize)
             spikes = np.append(self.histogram_data[channel][unit][param_index]['spikes'], trial_spikes)
             trials = self.histogram_data[channel][unit][param_index]['trials'] + 1
             psth_data = np.array(np.histogram(spikes, bins=bins)[0],dtype='float') / (binsize*trials)
             smooth_psth = nd.gaussian_filter1d(psth_data, sigma=5)
             mean = np.mean(smooth_psth)
             self.histogram_data[channel][unit][param_index]['spikes'] = spikes
             self.histogram_data[channel][unit][param_index]['trials'] = trials
             self.histogram_data[channel][unit][param_index]['psth_data'] = psth_data
             self.histogram_data[channel][unit][param_index]['smooth_psth'] = smooth_psth
             self.histogram_data[channel][unit][param_index]['bins'] = bins
             self.histogram_data[channel][unit][param_index]['mean'] = mean
             self.histogram_data[channel][unit][param_index]['means'].append(trial_mean)
             self.histogram_data[channel][unit][param_index]['std'] = np.std(self.histogram_data[channel][unit][param_index]['means'])
Esempio n. 26
0
def plot_by_shell(shells, x, y, start=0, smooth=0, zoom=1, **plot_args):
    line_props = plot_args.get('line_props', [{}]*len(shells))
    unit = plot_args.get('unit', {x: 1, y: 1})
    ax = plot_args.get('ax')
    if ax is None:
        fig, ax = plt.subplots()
    else:
        fig = ax.get_figure()

    for s, shell in shells.iteritems():
        if s < 0:
            continue
        shell = shell[np.where(np.isfinite(shell[x]) & np.isfinite(shell[y]))]
        if x == 'f':
            ys, xs = corr.bin_average(shell[x], shell[y], 1)
            xs = xs[:-1] - start
        if smooth:
            ys = gaussian_filter1d(ys, smooth, mode='nearest', truncate=2)
        ax.plot(xs*unit[x], ys*unit[y], **line_props[s])

    ax.legend(fontsize='small')
    ax.set_xlabel(plot_args['xylabel'][x])
    ax.set_ylabel(plot_args['xylabel'][y])

    xlim = ax.get_xlim()
    ax.set_xlim(-25, xlim[1]*zoom)
    ax.set_ylim(0, 1.1)

    return fig, ax
Esempio n. 27
0
def plot_by_config(prefix_pattern, smooth=1, side=1, fps=1):
    configs = ['inward', 'aligned', 'random', 'outward']
    stats = ['phi', 'psi', 'dens']
    stats = {stat: {config: np.load(prefix_pattern.format(config, stat)+'.npy')
                    for config in configs}
             for stat in stats}
    xlims = {'dens': 300, 'phi': 200, 'psi': 150}
    plt.rc('text', usetex=True)
    for stat, v in stats.iteritems():
        colors = ['orange', 'brown', 'magenta', 'cyan']
        fig, ax = plt.subplots(figsize=(4, 3))
        for conf in configs:
            ax.plot(np.arange(len(v[conf]))/fps,
                    gaussian_filter1d(v[conf], smooth, mode='constant', cval=1),
                    lw=2, label=conf, c=colors.pop())
        ax.set_xscale('log')
        ax.set_xlabel(r'$tf$')
        ax.set_xlim(1, xlims[stat])
        ax.set_ylim(0, 1)
        statlabels = {
            'dens': r'$\mathrm{density}\ \langle r_{ij}\rangle^{-2}$',
            'psi': r'$\mathrm{bond\ angle\ order}\ \Psi$',
            'phi': r'$\mathrm{molecular\ angle\ order}\ \Phi$'}
        ax.set_ylabel(statlabels[stat])
        ax.legend(loc='lower left', fontsize='small')
        fig.savefig(prefix_pattern.format('ALL', stat) + '.pdf')
Esempio n. 28
0
 def find_jumps(self,ds, threshold = 40000):
     self._prepare_find_jumps()
     ds = self._hf[ds]
     ds = gaussian_filter1d(ds,2)
     offset=ds[0]
     jpnh = 0
     for i in xrange(ds.shape[0]-3):
         #i +=3
         #df=(((ds[i+1]+ds[i+2]+ds[i+3])/3.)-ds[i])
         #df=(ds[i] - ((ds[i-1]+ds[i-2]+ds[i-3])/3.))
         df=((ds[i+1])-ds[i])
         if (abs(df)>threshold):
             self.qps_jpn_nr.append(1.)
             offset = offset-df
             jpnh = df
             #print df, offset
             self.qps_jpn_hight.append(abs(float(jpnh)))
             
             self.qps_jpn_spec.append(float(ds[i]+offset))
             jpnh = df
         
         else:
             self.qps_jpn_nr.append(0.)
             #self.qps_jpn_hight.append(float(jpnh))
             self.qps_jpn_spec.append(float(ds[i]+offset))
 def _get_apex_time(self, chromatogram):
     try:
         x, y = chromatogram.as_arrays()
         y = gaussian_filter1d(y, 1)
         return x[np.argmax(y)]
     except AttributeError:
         return chromatogram.apex_time
Esempio n. 30
0
def find_feature_mask(s_msk, sigma=1, ax=None, x_values=None):
    # find emission features from observed spec.

    filtered_spec = s_msk - ni.median_filter(s_msk, 15)
    filtered_spec = ni.gaussian_filter1d(filtered_spec, 0.5)
    smoothed_std = get_smoothed_std(filtered_spec,
                                    rad=3, smooth_length=3)
    emission_feature_msk_ = filtered_spec > sigma*smoothed_std
    #emission_feature_msk_ = ni.binary_closing(emission_feature_msk_)
    emission_feature_msk = ni.binary_opening(emission_feature_msk_,
                                             iterations=1)

    if ax is not None:
        if x_values is None:
            x_values = np.arange(len(s_msk))

        #ax.plot(x_values, s_msk)
        ax.plot(x_values, filtered_spec)
        ax.plot(x_values, smoothed_std)

        ax.plot(x_values[emission_feature_msk],
                emission_feature_msk[emission_feature_msk],
                "ys", mec="none")

    return emission_feature_msk
Esempio n. 31
0
def gaussian_filter(array, sigma):
    #sigma=0.25==gaussian kernel with length 3
    #sigma=0.5==gaussian kernel with length 5
    #sigma=1==gaussian kernel with length 9
    return (gaussian_filter1d(array, sigma))
Esempio n. 32
0
import numpy as num
from osgeo.gdalnumeric import *
from osgeo.gdalconst import *
from skimage import data, io, segmentation, color
from skimage.future import graph
from osgeo import gdal
from skimage.segmentation import random_walker
import skimage
from scipy import ndimage

adata = gdal.Open("*****.tif", GA_ReadOnly)
data1 = adata.GetRasterBand(1).ReadAsArray()
data1 = num.array(data1, dtype=numpy.float64)
data1 = ndimage.gaussian_filter1d(data1, 5)
markers = num.zeros(data1.shape, dtype=num.uint)
markers[data1 < 63.16895369] = 1
markers[data1 > 335.826927] = 2

# Run random walker algorithm
labels = random_walker(data1, markers, beta=10, mode='bf')

data = data1 * 1
data1[labels == 1] = 0
markers1 = num.zeros(data1.shape, dtype=num.uint)
markers1[data1 < 295.3176796] = 1
markers1[data1 > 433.0834366] = 2
labels1 = random_walker(data1, markers1, beta=10, mode='bf')
labels1 = num.array(labels1 - 1, dtype=num.uint)

com = labels + labels1
Esempio n. 33
0
 def _gaussian_filter_(self, crop, axis=1, sigma=2, order=0):
     """ Apply a gaussian filter along specified axis. """
     return gaussian_filter1d(crop, sigma=sigma, axis=axis, order=order)
def nc_interploate(dataset_path, new_dir, suffix, method, rho_thres,
                   smooth_factor):
    nc_name = re.findall(r"\/(.+)\.netcdf", dataset_path)[0]
    os.system(
        "cp %s %s" %
        (dataset_path, os.path.join(new_dir, nc_name + suffix + ".netcdf")))
    nc_ds = nc.Dataset(os.path.join(new_dir, nc_name + suffix + ".netcdf"),
                       "a")

    #start = timeit.default_timer()
    Dp = np.array(nc_ds.variables["DifferentialPhase"])
    rho = np.array(nc_ds.variables["CrossPolCorrelation"])

    # set the mask area to determine which data used for interpolation
    # mask_tmp = ~np.isnan(rho)
    # mask_tmp[mask_tmp] &= rho[mask_tmp] < 0.4
    # rho_mask = np.ma.masked_where(mask_tmp, rho)
    rho_mask = np.ma.masked_where(rho < rho_thres, rho)
    grid_X, grid_Y = np.mgrid[0:rho.shape[0], 0:rho.shape[1]]
    X_use = grid_X[~rho_mask.mask]
    Y_use = grid_Y[~rho_mask.mask]
    pt_use = np.concatenate((np.reshape(X_use,
                                        (-1, 1)), np.reshape(Y_use, (-1, 1))),
                            axis=1)
    pt_inter = np.concatenate(
        (np.reshape(grid_X.flatten(),
                    (-1, 1)), np.reshape(grid_Y.flatten(), (-1, 1))),
        axis=1)

    # interpolate rho
    rho[rho <= -0.025] = np.NaN
    rho_use = rho[~rho_mask.mask]
    rho_new = interpolate.griddata(pt_use, rho_use, pt_inter, method=method)
    rho_new = np.reshape(rho_new, (rho.shape[0], rho.shape[1]))

    # interpolate Dp
    Dp[Dp == -2] = np.NaN
    Dp_use = Dp[~rho_mask.mask]
    Dp_new = interpolate.griddata(pt_use, Dp_use, pt_inter, method=method)
    Dp_new = np.reshape(Dp_new, (Dp.shape[0], Dp.shape[1]))

    # adjust the reserved area
    # larger erode kernel & iter and open kernel, more area would be reserved
    # larger close kernel, less area would be reserved
    kernel_erode = np.ones((2, 2), np.int8)
    kernel_open = np.ones((4, 4), np.int8)
    kernel_close = np.ones((10, 10), np.int8)
    mask_res = rho_mask.mask.astype(np.uint8)
    rho_inside_mask = cv2.erode(mask_res, kernel_erode, iterations=4)
    rho_inside_mask = cv2.morphologyEx(rho_inside_mask, cv2.MORPH_OPEN,
                                       kernel_open)
    rho_inside_mask = cv2.morphologyEx(rho_inside_mask, cv2.MORPH_CLOSE,
                                       kernel_close)

    # restrict rho's range
    rho_new[rho_inside_mask.astype(np.bool)] = np.NaN
    mask_tmp = ~np.isnan(rho_new)
    mask_tmp[mask_tmp] &= rho_new[mask_tmp] > 1.0
    rho_new[mask_tmp] = 1.0
    mask_tmp = ~np.isnan(rho_new)
    mask_tmp[mask_tmp] &= rho_new[mask_tmp] < -1.0
    rho_new[mask_tmp] = -1.0
    nc_ds.variables["CrossPolCorrelation"][:, :] = rho_new

    # smooth Dp by Gaussian filter and restrict Dp's range
    Dp_new[rho_inside_mask.astype(np.bool)] = np.NaN
    Dp_new = np.array(
        [ndimage.gaussian_filter1d(ax, sigma=smooth_factor) for ax in Dp_new])
    mask_tmp = ~np.isnan(Dp_new)
    mask_tmp[mask_tmp] &= Dp_new[mask_tmp] > 360
    Dp_new[mask_tmp] = 360.0
    mask_tmp = ~np.isnan(Dp_new)
    mask_tmp[mask_tmp] &= Dp_new[mask_tmp] < -360
    Dp_new[mask_tmp] = -360.0
    nc_ds.variables["DifferentialPhase"][:, :] = Dp_new

    # see the reserved area (px value==0)
    '''
    plt.imshow(rho_inside_mask, cmap='gray')
    plt.colorbar()
    plt.show()
    '''
    # see the interpolated result
    '''
    fig, ax = plt.subplots(1, figsize=(16, 8))
    ax1 = ax.imshow(X=Dp_new, cmap="rainbow", vmax=np.nanmax(Dp), vmin=np.nanmin(Dp))
    fig.colorbar(ax1)
    plt.savefig("Dp_GS_filter.png", dpi=400)
    '''
    #stop = timeit.default_timer()
    #print("Run-time of Interpolation: ", stop - start)
    nc_ds.close()
Esempio n. 35
0
    detector = Crires("K/2/4", [1, 2, 3], orders=[2, 3, 4, 5, 6, 7])
    star = Star.load(join(medium_dir, "star.yaml"))
    planet = Planet.load(join(medium_dir, "planet.yaml"))

    transit_time = "2020-05-25T10:31:25.418"
    transit_time = Time(transit_time, format="fits")
    planet.time_of_transit = transit_time

    print("Loading data...")
    normalized = SpectrumArray.read(join(medium_dir, "spectra_normalized.npz"))
    telluric = SpectrumArray.read(join(medium_dir, "telluric.npz"))
    stellar = SpectrumArray.read(join(medium_dir, "stellar.npz"))
    intensities = SpectrumArray.read(join(medium_dir, "intensities.npz"))

    spec = solve_prepared(normalized, telluric, stellar, intensities, detector,
                          star, planet)

    print("Saving data...")
    spec.write("planet_noise_1.fits")

    print("Plotting results...")
    planet_model = SpectrumList.read(join(done_dir, "planet_model.fits"))

    plt.plot(spec.wavelength, spec.flux)
    plt.plot(
        np.concatenate(planet_model.wavelength),
        gaussian_filter1d(np.concatenate(planet_model.flux), 1),
    )
    plt.show()
    plt.savefig(join(done_dir, "planet_spectrum_noise_1.png"))
Esempio n. 36
0
                        'python', '../../../bin/iupred2a/iupred2a.py',
                        temp.name, 'long'
                    ],
                                             capture_output=True,
                                             text=True,
                                             check=True)

                # Extract scores from output
                scores = []
                for line in process.stdout.rstrip().split(
                        '\n'):  # Remove trailing newline to prevent empty line
                    if line.startswith('#'):
                        continue
                    fields = line.split('\t')
                    scores.append(float(fields[2]))
                scores = ndimage.gaussian_filter1d(scores, 2)

                # Find bounds
                bounds = []
                ordered_prev = scores[0] < thresh
                bound_start = 0
                for i, score in enumerate(scores):
                    ordered_curr = score < thresh
                    if ordered_curr is not ordered_prev:
                        bounds.append(((bound_start, i), ordered_prev))
                        bound_start = i
                    ordered_prev = ordered_curr
                bounds.append(((bound_start, i + 1),
                               ordered_curr))  # Bound for final segment

                # Create dataframe rows
Esempio n. 37
0
    traj_smooth = pd.DataFrame(0, index=np.arange(len(traj_f)), columns=['x','y','dx','dy','frame','particle']) # initialize traj_smooth
    traj_smooth['frame'] = traj_f['frame']
    
    # smoothing trajectory --> Gaussian smoothing
    par = list(set(traj_f['particle'])) # get all unique particle index
    
    
    # loop particles
    for p in range(0,len(par)):
        
        print('Smoothing Act{}: {:.2f} %'.format(act,p*100/len(par)))
        #trajp = traj_f[traj_f['particle']== par[p]] # trajectory of one particle
        pos = traj_f.groupby('particle')[pos_columns].get_group(par[p])*mpp # trajectory of each particle (in micron)
    
        # smooth the traj
        xfil = gaussian_filter1d(pos['x'],sigma)
        yfil = gaussian_filter1d(pos['y'],sigma)
    
        # collect smoothed traj in a new dataframe
        traj_smooth.loc[pos.index,'x'] = xfil
        traj_smooth.loc[pos.index,'y'] = yfil
        
#==============================================================================
#         plt.figure()
#         plt.clf()
#         tp.plot_traj(traj_f[traj_f['particle']==par[p]]*mpp)
#         plt.plot(xfil,yfil)
#==============================================================================

        # Calc dx, dy
        dx = [xfil[x] - xfil[x-1] for x in range(1,len(xfil))]
Esempio n. 38
0
env_th7000 = threshold_AP(env_smoothed, 7000)

# rolling window standard deviation 
def sd_h(data, window):
    stdev = np.zeros((numberoflines,depth,length),dtype=float)
    window_width = window
    pad_width = np.int(window_width/2)
    for i in range(numberoflines):
        for j in range(depth):
            s = pd.Series(data[i][j,:])
            stdev[i][j,:] = s.rolling(window_width).std(center=True)
        stdev[i] = np.pad(stdev[i][:,pad_width:-pad_width], ((0,0),(pad_width,pad_width)), mode='edge')
    return stdev   

sd10 = gaussian_filter1d(sd_h(envelope, 10), sigma=10, axis=2)

# Calculate discontinuity attribute 
"""Functions sourced from https://github.com/seg/tutorials-2015/blob/master/1512_Semblance_coherence_and_discontinuity/writeup.md"""

def moving_window(data, func, window): 
    wrapped = lambda x: func(x.reshape(window))
    return scipy.ndimage.generic_filter(data, wrapped, window)

def marfurt_semblance(region):#
# Stack traces in 3D region into 2D array
    region = region.reshape(-1, region.shape[-1])
    ntraces, nsamples = region.shape
    square_sums = (np.sum(region, axis=1))**2
    sum_squares = np.sum(region**2, axis=1)
    c = square_sums.sum() / sum_squares.sum()
Esempio n. 39
0
def lris_pipeline(prefix,
                  dir,
                  science,
                  arc,
                  flats,
                  out_prefix,
                  useflat=0,
                  usearc=0,
                  cache=0,
                  offsets=None):
    print "Processing mask", out_prefix

    scinums = science.split(",")
    flatnums = flats.split(",")

    for i in range(len(flatnums)):
        flatnums[i] = dir + prefix + flatnums[i] + ".fits"
    scinames = []
    for i in range(len(scinums)):
        name = dir + prefix + scinums[i] + ".fits"
        scinames.append(name)
    arcname = dir + prefix + arc + ".fits"

    nsci = len(scinums)

    print "Preparing flatfields"
    if useflat == 1:
        yforw, yback, slits, starboxes, flatnorm = flatload(out_prefix)
    else:
        yforw, yback, slits, starboxes, flatnorm = flatpipe(
            flatnums, out_prefix)
    axis1 = flatnorm.shape[0]
    axis2 = flatnorm.shape[1]

    print "Preparing arcs for line identification"
    if usearc == 1:
        arcname = out_prefix + "_arc.fits"
        arc_tmp = pyfits.open(arcname)
        arc_ycor = arc_tmp[0].data.astype(scipy.float32)
        lamps = arc_tmp[0].header['LAMPS']
        del arc_tmp
    else:
        arcname = dir + prefix + arc + ".fits"
        arc_tmp = pyfits.open(arcname)
        arcdata = arc_tmp[0].data.copy()
        lamps = arc_tmp[0].header['LAMPS']
        del arc_tmp
        arcdata = biastrim(arcdata)
        arc_ycor = spectools.resampley(arcdata, yforw).astype(scipy.float32)
        arcname = out_prefix + "_arc.fits"
        arc_hdu = pyfits.PrimaryHDU(arc_ycor)
        arc_hdu.header.update('LAMPS', lamps)
        arc_hdu.writeto(arcname)
        del arc_hdu

    wide_stars = []
    for i, j in starboxes:
        mod = scipy.where((yback < j) & (yback > i))
        a = mod[0].min() - 3
        b = mod[0].max() + 3
        if a < 0:
            a = 0
        if b > axis1:
            b = axis1
        wide_stars.append([a, b])

    print "Bias trimming and flatfielding science data"
    scidata = scipy.zeros((nsci, axis1, axis2), 'f4')
    center = scipy.zeros((nsci, len(starboxes)), 'f4')
    flux = scipy.zeros((nsci), 'f4')
    airmass = []
    for i in range(nsci):
        filename = scinames[i]
        scitmp = pyfits.open(filename)

        scidatatmp = scitmp[0].data.copy()
        scidatatmp = biastrim(scidatatmp).astype(scipy.float32)

        #Remove screwed columns (this should already be done though...)
        bad = scipy.where(scidatatmp > 56000.)
        nbad = bad[0].size
        for k in range(nbad):
            y = bad[0][k]
            x = bad[1][k]
            scidatatmp[y,
                       x] = (scidatatmp[y, x - 1] + scidatatmp[y, x + 1]) / 2.
        # Don't flatfield blueside data
        scidatatmp = scidatatmp / flatnorm
        scidata[i, :, :] = scidatatmp.copy()

        try:
            mswave = scitmp[0].header['MSWAVE']
        except:
            mswave = 6500.
        if len(slits) == 1:
            try:
                mswave = scitmp[0].header['WAVELEN']
            except:
                pass
        disperser = scitmp[0].header['GRANAME']
        airmass.append(scitmp[0].header['AIRMASS'])

        # Old data mightn't have a dichroic keyword!
        try:
            dichroic = scitmp[0].header['DICHNAME']
        except:
            dichroic = None

        flux[i] = scipy.sort(scipy.ravel(scidatatmp))[scidatatmp.size / 4]
        for j in range(len(starboxes)):
            a, b = starboxes[j]
            m, n = wide_stars[j]
            a -= 4
            b += 4
            m -= 2
            n += 2
            center[i, j] = offset.findoffset(scidatatmp[m:n], yforw[a:b], m)

        del scitmp
        del scidatatmp
    del flatnorm

    if offsets is not None:
        center = scipy.asarray(offsets)
    else:
        center = stats.stats.nanmean(center, axis=1)

    center[scipy.isnan(center)] = 0.
    print "Normalizing Fluxes"
    cmax = center.max()
    fmax = flux.max()
    for i in range(center.size):
        center[i] -= cmax
        ratio = fmax / flux[i]
        scidata[i] *= ratio
    cmax = ceil(fabs(center.min()))

    if disperser == "150/7500":
        scale = 4.8
    elif disperser == "300/5000":
        scale = 2.45
    elif disperser == "400/8500":
        scale = 1.85
    elif disperser == "600/5000":
        scale = 1.25
    elif disperser == "600/7500":
        scale = 1.25
    elif disperser == "600/10000":
        scale = 1.25
    elif disperser == "831/8200":
        scale = 0.915
    elif disperser == "900/5500":
        scale = 0.85
    elif disperser == "1200/7500":
        scale = 0.64

    if dichroic == 'mirror':
        redcutoff = 4000.
        dich_file = ''
    elif dichroic == '460':
        redcutoff = 4600.
        dich_file = '460'
    elif dichroic == '500':
        redcutoff = 5000.
        dich_file = '500'
    elif dichroic == '560':
        redcutoff = 5500.
        dich_file = '560'
    elif dichroic == '680':
        redcutoff = 6700.
        dich_file = '680'
    else:
        redcutoff = 3500.
        dich_file = ''

    nsize = 0
    csize = 0
    wide_slits = []
    linewidth = []
    slits = [[1150, 1250]]
    for i, j in slits:
        csize += int(j - i + cmax) + 5
        nsize += j - i + 5
        mod = scipy.where((yback > i) & (yback < j))
        a = mod[0].min() - 4
        b = mod[0].max() + 4
        if a < 0:
            a = 0
        if b > axis1:
            b = axis1
        wide_slits.append([a, b])
        if len(wide_slits) % 7 == 0 or len(slits) == 1:
            linewidth.append(
                measure_width.measure(arc_ycor[(i + j) / 2, :], 15))
    csize -= 5
    nsize -= 5

    linewidth = scipy.median(scipy.asarray(linewidth))

    print "Loading wavelength model"
    lris_path = lris_red.__path__[0]

    filename = lris_path + "/uves_sky.model"
    infile = open(filename, "r")
    wavecalmodel = load(infile)
    infile.close()
    wave = scipy.arange(3400., 10400., 0.1)

    if dich_file != '':
        filename = lris_path + "/dichroics/dichroic_" + dich_file + "_t.dat"
        infile = open(filename, "r")
        input = sio.read_array(infile)
        infile.close()
        spline = interpolate.splrep(input[:, 0], input[:, 1])
        dich = interpolate.splev(wave, spline)
        dich[wave < 4500.] = 1.
        dich[wave > 8800.] = 1.
        del input, spline
    else:
        dich = scipy.ones(wave.size)
    wavemodel = interpolate.splev(wave, wavecalmodel)
    finemodel = ndimage.gaussian_filter1d(wavemodel, linewidth * scale / 0.1)
    wavemodel = ndimage.gaussian_filter1d(finemodel, 5. / 0.1)
    finemodel *= dich
    finemodel = interpolate.splrep(wave, finemodel)
    wavemodel *= dich
    widemodel = interpolate.splrep(wave, wavemodel)
    goodmodel = finemodel
    del dich, wave, wavemodel

    extractwidth = 10

    print "Creating output arrays"
    outlength = int(axis2 * 1.6)
    out = scipy.zeros((nsci, nsize, outlength), scipy.float32) * scipy.nan
    out2 = scipy.zeros((2, csize, outlength), scipy.float32) * scipy.nan

    if cache:
        print "Caching..."
        strtfile = out_prefix + "_TMPSTRT.fits"
        bgfile = out_prefix + "_TMPBSUB.fits"
        try:
            os.remove(strtfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        if nsci > 1:
            outfile.header.update('CTYPE3', 'LINEAR')
            outfile.header.update('CRPIX3', 1)
            outfile.header.update('CRVAL3', 1)
            outfile.header.update('CD3_3', 1)
        outfile.writeto(strtfile)
        del outfile, out

        try:
            os.remove(bgfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out2)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        outfile.header.update('CTYPE3', 'LINEAR')
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
        outfile.writeto(bgfile)
        del outfile, out2

    posc = 0
    posn = 0
    count = 1
    for k in range(len(slits)):
        i, j = slits[k]
        a, b = wide_slits[k]
        ##
        if count < 1:
            count += 1
            continue
        ##
        print "Working on slit %d (%d to %d)" % (count, i, j)
        sky2x, sky2y, ccd2wave = wavematch(a, scidata[:, a:b], arc_ycor[i:j],
                                           yforw[i:j], widemodel, finemodel,
                                           goodmodel, scale, mswave, redcutoff)

        strt, bgsub, varimg = doskysub(i, j - i, outlength, scidata[:, a:b],
                                       yback[a:b], sky2x, sky2y, ccd2wave,
                                       scale, mswave, center, redcutoff,
                                       airmass)

        h = strt.shape[1]
        if cache:
            file = pyfits.open(strtfile, mode="update")
            out = file[0].data
        out[:, posn:posn + h] = strt.copy()
        if cache:
            file.close()
            del file, out
        posn += h + 5

        ##
        #		lris_red.skysub.RESAMPLE = 1
        #		count += 1
        #		continue
        ##

        h = bgsub.shape[0]
        if cache:
            file = pyfits.open(bgfile, mode="update")
            out2 = file[0].data
        out2[0, posc:posc + h] = bgsub.copy()
        out2[1, posc:posc + h] = varimg.copy()
        if cache:
            file.close()
            del file, out2
        posc += h + 5
        ##
        #		count += 1
        #		continue
        ##
        tmp = scipy.where(scipy.isnan(bgsub), 0., bgsub)
        filter = tmp.sum(axis=0)
        mod = scipy.where(filter != 0)
        start = mod[0][0]
        end = mod[0][-1] + 1
        del tmp
        slit = bgsub[:, start:end]
        spectra = extract(slit, varimg[:, start:end], extractwidth)
        num = 1
        crval = mswave - (0.5 * bgsub.shape[1] - start) * scale
        for spec in spectra:
            for item in spec:
                if item.size == 4:
                    hdu = pyfits.PrimaryHDU()
                    hdu.header.update('CENTER', item[2])
                    hdu.header.update('WIDTH', item[3])
                    hdulist = pyfits.HDUList([hdu])
                else:
                    thdu = pyfits.ImageHDU(item)
                    thdu.header.update('CRVAL1', crval)
                    thdu.header.update('CD1_1', scale)
                    thdu.header.update('CRPIX1', 1)
                    thdu.header.update('CRVAL2', 1)
                    thdu.header.update('CD2_2', 1)
                    thdu.header.update('CRPIX2', 1)
                    thdu.header.update('CTYPE1', 'LINEAR')
                    hdulist.append(thdu)
            outname = out_prefix + "_spec_%02d_%02d.fits" % (count, num)
            hdulist.writeto(outname)
            num += 1

        count += 1

##
#	file = pyfits.open(bgfile)
#	file.writeto(out_prefix+"_save.fits")
#	return
##

    if cache:
        file = pyfits.open(bgfile)
        out2 = file[0].data.copy()
        del file
    tmp = out2[0].copy()
    tmp = scipy.where(scipy.isnan(tmp), 0, 1)
    mod = scipy.where(tmp.sum(axis=0) != 0)
    start = mod[0][0]
    end = mod[0][-1] + 1
    del tmp

    outname = out_prefix + "_bgsub.fits"
    outfile = pyfits.PrimaryHDU(out2[0, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out2.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    outfile.writeto(outname)
    hdr = outfile.header.copy()

    outname = out_prefix + "_var.fits"
    outfile = pyfits.PrimaryHDU(out2[1, :, start:end])
    outfile.header = hdr
    outfile.writeto(outname)
    del out2, hdr

    if cache:
        file = pyfits.open(strtfile)
        out = file[0].data.copy()
        del file
    outname = out_prefix + "_straight.fits"
    outfile = pyfits.PrimaryHDU(out[:, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    if nsci > 1:
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
    outfile.writeto(outname)

    del out, outfile
Esempio n. 40
0
    def detectEdges(self):
        """
        Default to non-illuminated image for edge detections
        """

        ############# arbitrary decisions (by hand inspection) ###############
        # how many pixels to smooth the original data
        smoothPx = 3
        # how many pixels to smooth the gradient
        gradientSmoothPx = 8
        # for determining baselines above which to find a detection, these are
        # from the edge of the chip inward, where it's just noise and such
        backgroundPxRange = 100
        # how many stds above mean background to declare a "detection"
        # for left right gradient
        LRstdDetect = 4
        # how many stds above mean background to declare a "detection" for top
        TstdDetect = 2
        # how many pixels to grow the left, right, and top edge detections for
        # narrowing line by line detections of arm edge, ignoring everything
        # outside
        bboxBufferPx = 5
        #####################################################################

        data = self.mainData

        # images are noisy, smooth them a bit
        # (and smooth them again after gradients)
        data = gaussian(data, smoothPx)

        _cols = []
        _rows = []
        _side = []
        _prom = []
        # prom is basically signal (promiminance above surroundings)
        # height of bump above minimum on each side
        _width = []
        # _gradData = []

        # first determine bounding box ii1, ii2, jj1 are limits
        for axis in [1, 0]:
            grad = numpy.gradient(data, axis=axis)
            grad = numpy.abs(grad)  # try abs after smoothing?
            grad = gaussian_filter1d(grad, gradientSmoothPx, axis=axis)

            # _gradData.append(grad)

            if axis == 0:
                # iterate over columns instead of rows
                # save before transposing
                self.vertGrad = grad
                grad = grad.T
            else:
                self.horizGrad = grad

            meanVal = numpy.mean(grad, axis=0)
            stdVal = numpy.std(grad, axis=0)

            if axis == 1:
                rBG = numpy.mean(meanVal[:backgroundPxRange])
                rSD = numpy.mean(stdVal[:backgroundPxRange])
                aw = numpy.argwhere(
                    meanVal > rBG + LRstdDetect * rSD).flatten()
                ii1 = aw[0]  # left side region of interest
                ii2 = aw[-1]  # right side region of interest

                # left right limits in image where beta arm should be
                self.ii1 = ii1  # save incase wanted later
                self.ii2 = ii2  # save incase wanted later

            else:
                # top side background
                tBG = numpy.mean(meanVal[-backgroundPxRange:])
                tSD = numpy.mean(stdVal[-backgroundPxRange:])
                aw = numpy.argwhere(meanVal > tBG + TstdDetect * tSD).flatten()

                # top limit in image where beta arm should be
                #  ii1, ii2, jj1 form the rough frame of the beta arm
                jj1 = aw[-1]  # y limit region of interest
                self.jj1 = jj1  # save incase wanted later

        # find row by row individual peaks
        # only look inside bounding box
        for axis in [1, 0]:
            ### repeating this junk from before
            grad = numpy.gradient(data, axis=axis)
            grad = numpy.abs(grad)
            grad = gaussian_filter1d(grad, gradientSmoothPx, axis=axis)
            if axis == 1:
                for row in range(
                        jj1 + bboxBufferPx):  # only iterate up to top of robot
                    line = grad[row]

                    peaks = find_peaks(line)[0]
                    prom = peak_prominences(line, peaks)[0]
                    widths = peak_widths(line, peaks)[0]

                    # ignore peaks not inside bounding box
                    keep = (peaks > ii1 - bboxBufferPx) & (peaks <
                                                           ii2 + bboxBufferPx)
                    peaks = peaks[keep]
                    if len(peaks) == 0:
                        continue
                    prom = prom[keep]
                    widths = widths[keep]

                    # find first bump (left side)
                    _cols.append(peaks[0])
                    _rows.append(row)
                    _side.append("left")
                    _prom.append(prom[0])
                    _width.append(widths[0])

                    # find last bump (right side)
                    _cols.append(peaks[-1])
                    _rows.append(row)
                    _side.append("right")
                    _prom.append(prom[-1])
                    _width.append(widths[-1])

            else:
                grad = grad.T
                for col in range(ii1 - bboxBufferPx, ii2 + bboxBufferPx):
                    line = grad[col]
                    peaks = find_peaks(line)[0]
                    prom = peak_prominences(line, peaks)[0]
                    widths = peak_widths(line, peaks)[0]
                    # ignore any maxima outside the region of interest
                    keep = peaks < jj1 + bboxBufferPx
                    peaks = peaks[keep]
                    if len(peaks) == 0:
                        continue
                    prom = prom[keep]
                    widths = widths[keep]

                    # find last bump (top of arm)
                    _cols.append(col)
                    _rows.append(peaks[-1])
                    _side.append("top")
                    _prom.append(prom[-1])
                    _width.append(widths[-1])

        df = {}
        df["col"] = _cols
        df["row"] = _rows
        df["side"] = _side
        df["prom"] = _prom
        df["width"] = _width

        self.edgeDetections = pd.DataFrame(df)
Esempio n. 41
0
def plot_image_epochs(epochs,
                      picks,
                      sigma=0.3,
                      vmin=None,
                      vmax=None,
                      colorbar=True,
                      order=None,
                      show=True):
    """Plot Event Related Potential / Fields image

    Parameters
    ----------
    epochs : instance of Epochs
        The epochs
    picks : int | array of int
        The indices of the channels to consider
    sigma : float
        The standard deviation of the Gaussian smoothing to apply along
        the epoch axis to apply in the image.
    vmin : float
        The min value in the image. The unit is uV for EEG channels,
        fT for magnetometers and fT/cm for gradiometers
    vmax : float
        The max value in the image. The unit is uV for EEG channels,
        fT for magnetometers and fT/cm for gradiometers
    colorbar : bool
        Display or not a colorbar
    order : None | array of int | callable
        If not None, order is used to reorder the epochs on the y-axis
        of the image. If it's an array of int it should be of length
        the number of good epochs. If it's a callable the arguments
        passed are the times vector and the data as 2d array
        (data.shape[1] == len(times))
    show : bool
        Show or not the figure at the end

    Returns
    -------
    figs : the list of matplotlib figures
        One figure per channel displayed
    """
    import pylab as pl

    units = dict(eeg='uV', grad='fT/cm', mag='fT')
    scaling = dict(eeg=1e6, grad=1e13, mag=1e15)

    picks = np.atleast_1d(picks)
    evoked = epochs.average()
    data = epochs.get_data()[:, picks, :]
    if vmin is None:
        vmin = data.min()
    if vmax is None:
        vmax = data.max()

    figs = list()
    for this_data, idx in zip(np.swapaxes(data, 0, 1), picks):
        this_fig = pl.figure()
        figs.append(this_fig)

        ch_type = channel_type(epochs.info, idx)
        this_data *= scaling[ch_type]

        this_order = order
        if callable(order):
            this_order = order(epochs.times, this_data)

        if this_order is not None:
            this_data = this_data[this_order]

        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)

        ax1 = pl.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
        im = pl.imshow(this_data,
                       extent=[
                           1e3 * epochs.times[0], 1e3 * epochs.times[-1], 0,
                           len(data)
                       ],
                       aspect='auto',
                       origin='lower',
                       vmin=vmin,
                       vmax=vmax)
        ax2 = pl.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
        if colorbar:
            ax3 = pl.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
        ax1.set_title(epochs.ch_names[idx])
        ax1.set_ylabel('Epochs')
        ax1.axis('auto')
        ax1.axis('tight')
        ax1.axvline(0, color='m', linewidth=3, linestyle='--')
        ax2.plot(1e3 * evoked.times, scaling[ch_type] * evoked.data[idx])
        ax2.set_xlabel('Time (ms)')
        ax2.set_ylabel(units[ch_type])
        ax2.set_ylim([vmin, vmax])
        ax2.axvline(0, color='m', linewidth=3, linestyle='--')
        if colorbar:
            pl.colorbar(im, cax=ax3)
        pl.tight_layout()

    if show:
        pl.show()

    return figs
Esempio n. 42
0
    def seg_process(self, Img):
        gray = cv2.cvtColor(Img, cv2.COLOR_BGR2GRAY)
        H, W = gray.shape
        #gray = gray [: ,100: W -100]
        #Img = Img [: ,100: W -100]
        ave_line = self.calculate_the_average_line(gray)
        peaks = self.aline.find_4peak(ave_line)

        H, W = gray.shape

        #gray = cv2.blur(gray,(3,3))
        #gray = cv2.medianBlur(gray,5)
        #gray = cv2.blur(gray,(5,5))

        gray = cv2.GaussianBlur(gray, (3, 3), 0)
        #gray = cv2.bilateralFilter(gray,15,75,75)
        #gray = cv2.bilateralFilter(gray,15,75,75)
        ave_line = self.calculate_the_average_line(gray)
        peaks = self.aline.find_4peak(ave_line)
        #gray = cv2.blur(gray,(5,5),0)

        if self.display_flag == True:
            cv2.imshow('ini', Img)

            cv2.imshow('blur', gray.astype(np.uint8))

        x_kernel = np.asarray([
            [-1, 0, 1],  # Sobel kernel for x-direction
            [-2, 0, 2],
            [-1, 0, 1]
        ])

        #y_kernel = np.asarray([[-2, -2, -2], # Sobel kernel for y-direction
        #                       [1,   1,  1],
        #                       [1,   1,  1]])
        y_kernel = np.asarray([
            [-1, -1, -1],  # Sobel kernel for y-direction
            [-1, -1, -1],
            [-1, -1, -1],
            [6, 6, 6],
            [-1, -1, -1],
            [-1, -1, -1],
            [-1, -1, -1]
        ])
        #y_kernel = np.asarray([ # Sobel kernel for y-direction
        #                [-1,-1],

        #                [-1,-1],
        #                [-1,-1],
        #                [-1,-1],

        #                [12,12],
        #                [-1,-1],
        #                [-1,-1],
        #                [-1,-1],
        #                [-1,-1],
        #                [-1,-1],
        #                [-1,-1],
        #                [-1,-1],
        #                [-1,-1],

        #                ])
        y_kernel = np.asarray([  # Sobel kernel for y-direction
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [28, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
            [-1, 0],
        ])
        y_kernel = y_kernel / 9
        gray = gray.astype(np.float)
        #sobel_x = signal.convolve2d(gray, x_kernel) #
        sobel_y = signal.convolve2d(gray,
                                    y_kernel)  # convolve kernels over images
        sobel_y = np.clip(sobel_y + 10, 1, 254)
        sobel_y = cv2.medianBlur(sobel_y.astype(np.uint8), 5)
        sobel_y = cv2.GaussianBlur(sobel_y, (5, 5), 0)
        sobel_y = cv2.blur(sobel_y, (5, 5))

        #sobel_y = cv2.GaussianBlur(sobel_y,(5,5),0)
        #sobel_y = cv2.bilateralFilter(sobel_y.astype(np.uint8),9,175,175)
        #find the start 4 point

        sobel_y = sobel_y[self.bias:H - self.bias, :]
        Img = Img[self.bias:H - self.bias, :]

        ave_line = self.calculate_the_average_line(sobel_y)
        peaks = self.aline.find_4peak(ave_line)

        Rever_img = 255 - sobel_y

        #start_point = 596
        peaks = np.clip(peaks, 1, Rever_img.shape[0] - 1)
        #new_peak = np.zeros(4)
        #new_peak=peaks
        #new_peak[3] = peaks[2]+35
        #peaks =  new_peak
        if Manual_start_flag == True:
            peaks[1] = peaks[0] + 472 - 293
            peaks[2] = peaks[0] + 500 - 293
            peaks[3] = peaks[0] + 677 - 293

        path1, path_cost1 = PATH.search_a_path(Rever_img, int(peaks[0]))
        path2, path_cost1 = PATH.search_a_path(Rever_img, int(peaks[1]))
        path3, path_cost1 = PATH.search_a_path(Rever_img, int(peaks[2]))
        path4, path_cost1 = PATH.search_a_path(Rever_img, int(peaks[3]))
        path1 = gaussian_filter1d(path1, 2)
        path2 = gaussian_filter1d(path2, 2)

        path3 = gaussian_filter1d(path3, 2)
        path4 = gaussian_filter1d(path4, 2)

        path4 = path3
        #path2 = path3
        path3 = path2 + 35
        path2 = path2 - 5
        path3, path_cost1 = PATH.search_a_path_based_on_path(Rever_img, path3)
        path3 = gaussian_filter1d(path3, 4)

        [path1, path2, path3, path4] = np.clip([path1, path2, path3, path4], 0,
                                               sobel_y.shape[0] - 1)
        for i in range(len(path1)):
            sobel_y[int(path1[i]), i] = 254
            sobel_y[int(path2[i]), i] = 254
            sobel_y[int(path3[i]), i] = 254
            sobel_y[int(path4[i]), i] = 254
        Dark_boundaries = sobel_y * 0
        [path1, path2, path3, path4] = np.clip([path1, path2, path3, path4], 0,
                                               Dark_boundaries.shape[0] - 2)
        for i in range(len(path1)):
            Dark_boundaries[int(path1[i]), i] = 254
            Dark_boundaries[int(path2[i]), i] = 220
            Dark_boundaries[int(path3[i]), i] = 199
            Dark_boundaries[int(path4[i]), i] = 180

        [path1, path2, path3, path4] = np.clip([path1, path2, path3, path4], 0,
                                               Img.shape[0] - 2)
        for i in range(Img.shape[1]):
            Img[int(path1[i]) + 1, i, :] = Img[int(path1[i]),
                                               i, :] = [254, 0, 0]
            Img[int(path2[i]) + 1, i, :] = Img[int(path2[i]),
                                               i, :] = [0, 254, 0]
            Img[int(path3[i]) + 1, i, :] = Img[int(path3[i]),
                                               i, :] = [0, 0, 254]
            Img[int(path4[i]) + 1, i, :] = Img[int(path4[i]), i, :] = [0, 0, 0]

        if self.display_flag == True:

            cv2.imshow('revert', Rever_img.astype(np.uint8))

            #cv2.imshow('path on blur',gray.astype(np.uint8))
            cv2.imshow('Seg2', Img)

            #sobel_y = sobel_y*0.1
            #edges = cv2.Canny(gray,50, 300,10)
            cv2.imshow('seg', sobel_y.astype(np.uint8))
            cv2.imwrite(self.savedir_path + str(1) + ".jpg",
                        sobel_y.astype(np.uint8))

            cv2.waitKey(1)

        return Img, sobel_y, Dark_boundaries, [path1, path2, path3, path4]
Esempio n. 43
0
def PT_Inversion(p, a1, a2, p1, p2, p3, T3, verb=False):
    '''
     Calculates PT profile for inversion case based on Equation (2) from
     Madhusudhan & Seager 2009.
     It takes a pressure array (e.g., extracted from a pressure file), and 6
     free parameters for inversion case and generates inverted PT profile. 
     The profile is then smoothed using 1D Gaussian filter. The pressure 
     array needs to be equally spaced in log space.

     Parameters
     ----------
     p:  1D array of floats
         Pressure array needs to be equally spaced in log space from bottom to top 
         of the atmosphere.
     a1: Float
         Model exponential factor in Layer 1, empirically determined to be within
         range (0.2, 0.6).
     a2: Float
         Model exponential factor in Layer 2, empirically determined to be within
         range (0.04, 0.5) 
     p1: Float
     p2: Float
         Pressure boundary between Layer  1 and 2 (in bars).
     p3: Float
         Pressure boundary between Layers 2 and 3 (in bars).
     T3: float
         Temperature in the Layer 3.
      
     Returns
     -------
     PT_Inver:  tupple of arrays that includes:
              - temperature and pressure arrays of every layer of the atmosphere 
                (PT profile)
              - concatenated array of temperatures, 
              - temperatures at point 1, 2 and 3 (see Figure 1, Madhusudhan & 
                Seager 2009)
          T_conc:   1D array of floats, temperatures concatenated for all levels 
          T_l1:     1D array of floats, temperatures for layer 1
          T_l2_pos: 1D array of floats, temperatures for layer 2 inversion part 
                    (pos-increase in temperatures)
          T_l2_neg: 1D array of floats, temperatures for layer 2 negative part
                    (neg-decrease in temperatures)
          T_l3:     1D array of floats, temperatures for layer 3 
                    (isothermal part)   
          p_l1:     1D array of floats, pressures for layer 1   
          p_l2_pos: 1D array of floats, pressures for layer 2 inversion part 
                    (pos-increase in temperatures)   
          p_l2_neg: 1D array of floats, pressures for layer 2 negative part 
                    (neg-decrease in temperatures)    
          p_l3:     1D array of floats, pressures for layer 3 (isothermal part)   
          T1:       float, temperature at point 1  
          T2:       float, temperature at point 2 
          T3:       float, temperature at point 3  
     T_smooth:  1D array of floats, Gaussian smoothed temperatures, 
                no kinks on Layer boundaries 

     Notes
     -----
     See model details in Madhusudhan & Seager (2009):
     http://adsabs.harvard.edu/abs/2009ApJ...707...24M
 
     Example
     -------    
     # array of pressures, equally spaced in log space 
     p = np.array([  1.00000000e-05,   1.17680000e-05,   1.38480000e-05,
                     1.62970000e-05,   1.91790000e-05,   2.25700000e-05,
                     2.65600000e-05,   3.12570000e-05,   3.67830000e-05,
                     4.32870000e-05,   5.09410000e-05,   5.99400000e-05,
                     7.05480000e-05,   8.30280000e-05,   9.77000000e-05,
                     1.14970000e-04,   1.35300000e-04,   1.59220000e-04,
                     1.87380000e-04,   2.20510000e-04,   2.59500000e-04,
                     3.05380000e-04,   3.59380000e-04,   4.22920000e-04,
                     4.97700000e-04,   5.85700000e-04,   6.89260000e-04,
                     8.11130000e-04,   9.54540000e-04,   1.12330000e-03,
                     1.32190000e-03,   1.55560000e-03,   1.83070000e-03,
                     2.15440000e-03,   2.53530000e-03,   2.98360000e-03,
                     3.51110000e-03,   4.13200000e-03,   4.86260000e-03,
                     5.72230000e-03,   6.73410000e-03,   7.92480000e-03,
                     9.32600000e-03,   1.09740000e-02,   1.29150000e-02,
                     1.51990000e-02,   1.78860000e-02,   2.10490000e-02,
                     2.47700000e-02,   2.91500000e-02,   3.43040000e-02,
                     4.03700000e-02,   4.75080000e-02,   5.59080000e-02,
                     6.57930000e-02,   7.74260000e-02,   9.11160000e-02,
                     1.07220000e-01,   1.26180000e-01,   1.48490000e-01,
                     1.74750000e-01,   2.05650000e-01,   2.42010000e-01,
                     2.84800000e-01,   3.35160000e-01,   3.94420000e-01,
                     4.64150000e-01,   5.46220000e-01,   6.42800000e-01,
                     7.56460000e-01,   8.90210000e-01,   1.04760000e+00,
                     1.23280000e+00,   1.45080000e+00,   1.70730000e+00,
                     2.00920000e+00,   2.36440000e+00,   2.78250000e+00,
                     3.27450000e+00,   3.85350000e+00,   4.53480000e+00,
                     5.33660000e+00,   6.28020000e+00,   7.39070000e+00,
                     8.69740000e+00,   1.02350000e+01,   1.20450000e+01,
                     1.41740000e+01,   1.66810000e+01,   1.96300000e+01,
                     2.31010000e+01,   2.71850000e+01,   3.19920000e+01,
                     3.76490000e+01,   4.43060000e+01,   5.21400000e+01,
                     6.13590000e+01,   7.22080000e+01,   8.49750000e+01,
                     1.00000000e+02])

     # random values imitate DEMC
     a1 = np.random.uniform(0.2  , 0.6 )
     a2 = np.random.uniform(0.04 , 0.5 )
     p3 = np.random.uniform(0.5  , 10  )
     p2 = np.random.uniform(0.01 , 1   )
     p1 = np.random.uniform(0.001, 0.01)
     T3 = np.random.uniform(1500 , 1700)

     # generates raw and smoothed PT profile
     PT_Inv, T_smooth = PT_Inversion(p, a1, a2, p1, p2, p3, T3)

     # returns full temperature array and temperatures at every point
     T, T0, T1, T2, T3 = PT_Inv[8], PT_Inv[9], PT_Inv[10], PT_Inv[11], PT_Inv[12]

     # sets plots in the middle 
     minT= min(T0, T2)*0.75
     maxT= max(T1, T3)*1.25

     # plots raw PT profile with equally spaced points in log space
     plt.figure(1)
     plt.clf()
     plt.semilogy(PT_Inv[0], PT_Inv[1], '.', color = 'r'     )
     plt.semilogy(PT_Inv[2], PT_Inv[3], '.', color = 'b'     )
     plt.semilogy(PT_Inv[4], PT_Inv[5], '.', color = 'orange')
     plt.semilogy(PT_Inv[6], PT_Inv[7], '.', color = 'g'     )
     plt.title('Thermal Inversion Raw', fontsize=14)
     plt.xlabel('T [K]'               , fontsize=14)
     plt.ylabel('logP [bar]'          , fontsize=14)
     plt.xlim(minT  , maxT)
     plt.ylim(max(p), min(p))
     #plt.savefig('ThermInverRaw.png', format='png')
     #plt.savefig('ThermInverRaw.ps' , format='ps' )

     # plots smoothed PT profile
     plt.figure(2)
     plt.clf()
     plt.semilogy(T       , p, color = 'r')
     plt.semilogy(T_smooth, p, color = 'k')
     plt.title('Thermal Inversion Smoothed', fontsize=14)
     plt.xlabel('T [K]'                    , fontsize=14)
     plt.ylabel('logP [bar]'               , fontsize=14)
     plt.xlim(minT  , maxT)
     plt.ylim(max(p), min(p) )
     #plt.savefig('ThermInverSmoothed.png', format='png')
     #plt.savefig('ThermInverSmoothed.ps' , format='ps' )

     Revisions
     ---------
     2013-11-14  Jasmina Blecic, [email protected]   Written by.
     2014-04-05  Jasmina Blecic, [email protected]   Revision
                     added T3 as free parameter instead of T0
                     changed boundary condition equations accordingly
     2014-09-24  Jasmina  Updated documentation.
     2019-02-13  mhimes   Added verb argument.
     '''

    # The following set of equations derived using Equation 2
    # Madhusudhan and Seager 2009

    # Set top of the atmosphere to p0 to have easy understandable equations:
    p0 = np.amin(p)
    if verb:
        print(p0)

    # Temperature at point 2
    # Calculated from boundary condition between layer 2 and 3
    T2 = T3 - (np.log(p3 / p2) / a2)**2

    # Temperature at the top of the atmosphere
    # Calculated from boundary condition between layer 1 and 2
    T0 = T2 + (np.log(p1 / p2) / -a2)**2 - (np.log(p1 / p0) / a1)**2

    # Temperature at point 1
    T1 = T0 + (np.log(p1 / p0) / a1)**2

    # Error message when temperatures ar point 1, 2 or 3 are < 0
    if T0 < 0 or T1 < 0 or T2 < 0 or T3 < 0:
        if verb:
            print('T0, T1, T2 and T3 temperatures are: ', T0, T1, T2, T3)
        raise ValueError(
            'Input parameters give non-physical profile. Try again.')

    # Defining arrays of pressures for every part of the PT profile
    p_l1 = p[(np.where((p >= min(p)) & (p < p1)))]
    p_l2_pos = p[(np.where((p >= p1) & (p < p2)))]
    p_l2_neg = p[(np.where((p >= p2) & (p < p3)))]
    p_l3 = p[(np.where((p >= p3) & (p <= max(p))))]

    # Sanity check for total number of levels
    check = len(p_l1) + len(p_l2_pos) + len(p_l2_neg) + len(p_l3)
    if verb:
        print('Total number of levels in p: ', len(p))
        print(
            '\nLevels per levels in inversion case (l1, l2_pos, l2_neg, l3) are respectively: ',
            len(p_l1), len(p_l2_pos), len(p_l2_neg), len(p_l3))
        print('Checking total number of levels in inversion case: ', check)

    # The following set of equations derived using Equation 2
    # Madhusudhan and Seager 2009

    # Layer 1 temperatures
    T_l1 = (np.log(p_l1 / p0) / a1)**2 + T0

    # Layer 2 temperatures (inversion part)
    T_l2_pos = (np.log(p_l2_pos / p2) / -a2)**2 + T2

    # Layer 2 temperatures (decreasing part)
    T_l2_neg = (np.log(p_l2_neg / p2) / a2)**2 + T2

    # Layer 3 temperatures
    T_l3 = np.linspace(T3, T3, len(p_l3))

    # Concatenating all temperature arrays
    T_conc = np.concatenate((T_l1, T_l2_pos, T_l2_neg, T_l3))

    # PT profile
    PT_Inver = (T_l1, p_l1, T_l2_pos, p_l2_pos, T_l2_neg, p_l2_neg, T_l3, p_l3,
                T_conc, T0, T1, T2, T3)

    # Smoothing with Gaussian_filter1d
    sigma = 4
    T_smooth = gaussian_filter1d(T_conc, sigma, mode='nearest')

    return PT_Inver, T_smooth
Esempio n. 44
0
def lris_pipeline(prefix,dir,scinames,arcname,flatnames,out_prefix,useflat=0,usearc=0,cache=0,offsets=None):
	print "Processing mask",out_prefix


	nsci = len(scinames)

	print "Preparing flatfields"
	if useflat==1:
		yforw,yback,slits,starboxes,flatnorm = flatload(out_prefix)
	else:
		yforw,yback,slits,starboxes,flatnorm = flatpipe(flatnames,out_prefix)
	axis1 = flatnorm.shape[0]
	axis2 = flatnorm.shape[1]

	"""
	Read lamps data from the arclamp file; this is unnecssary for the red
	  side unless the line fitting is altered to better calibrate the blue
	  end (ie for 460 dichroic data).
	"""
	print "Preparing arcs for line identification"
	if usearc==1:
		arcdata = biastrim(pyfits.open(arcname)[0].data)
		arcname = out_prefix+"_arc.fits"
		arc_tmp = pyfits.open(arcname)
		arc_ycor = arc_tmp[0].data.astype(scipy.float32)
		lamps = arc_tmp[0].header['LAMPS']
		del arc_tmp
	else:
		arc_tmp = pyfits.open(arcname)
		arcdata = arc_tmp[0].data.copy()
		lamps = arc_tmp[0].header['LAMPS']
		del arc_tmp
		arcdata = biastrim(arcdata)
		arc_ycor = spectools.resampley(arcdata,yforw).astype(scipy.float32)
		arcname = out_prefix+"_arc.fits"
		arc_hdu = pyfits.PrimaryHDU(arc_ycor)
		arc_hdu.header.update('LAMPS',lamps)
		arc_hdu.writeto(arcname)
		del arc_hdu

	"""
	Skysubtraction, centering, &c. may work better if there is some slop on
	  the sides of the data (the slit definitions have been created to only
	  include good data, so 'bad' edges are rejected).
	"""
	wide_stars = []
	for i,j in starboxes:
		mod = scipy.where((yback<j)&(yback>i))
		a = mod[0].min()-3
		b = mod[0].max()+3
		if a<0:
			a = 0
		if b>axis1:
			b = axis1
		wide_stars.append([a,b])


	print "Bias trimming and flatfielding science data"
	scidata = scipy.zeros((nsci,axis1,axis2),'f4')
	center = scipy.zeros((nsci,len(starboxes)),'f4')
	flux = scipy.zeros((nsci),'f4')
	airmass = []
	for i in range(nsci):
		filename = scinames[i]
		scitmp = pyfits.open(filename)

		scidatatmp = scitmp[0].data.copy()
		scidatatmp = biastrim(scidatatmp).astype(scipy.float32)

		"""
		The biastrim routine should take care of bad columns, but this
		  is just in case; we do a simple linear interpolation over
		  bad columns.
		"""
		bad = scipy.where(scidatatmp>56000.)
		nbad = bad[0].size
		for k in range(nbad):
			y = bad[0][k]
			x = bad[1][k]
			if x==0:
				x1 = x+1
				x2 = x+1
			elif x == scidatatmp.shape[1]-1:
				x1 = x-1
				x2 = x-1
			else:
				x1 = x-1
				x2 = x+1
			scidatatmp[y,x] = \
			    (scidatatmp[y,x1]+scidatatmp[y,x2])/2.

		"""
		Apply the flatfield and copy the data into the working array.
		"""		  
		scidatatmp = scidatatmp/flatnorm
		scidata[i,:,:] = scidatatmp.copy()

		"""
		Copy key header keywords; note that old data might not have
		  MSWAVE or DICHNAME keywords.
		"""
		try:
			mswave = scitmp[0].header['MSWAVE']
		except:
			mswave = 6500.
		disperser = scitmp[0].header['GRANAME']
		airmass.append(scitmp[0].header['AIRMASS'])
		try:
			dichroic = scitmp[0].header['DICHNAME']
		except:
			dichroic = None

		"""
		This should give a reasonable estimate of the sky level; the
		  program does a dumb scaling (to the level of exposure with the
		  highest sky level)
		"""
		flux[i] = scipy.sort(scipy.ravel(scidatatmp))[scidatatmp.size/4]

		"""
		Centroid stars in starboxes to find shifts between mask
		  exposures.
		"""
		for j in range(len(starboxes)):
			a,b = starboxes[j]
			m,n = wide_stars[j]
			a -= 4
			b += 4
			m -= 2
			n += 2
			if m<0:
				m = 0
			if n>scidatatmp.shape[0]:
				n = scidatatmp.shape[0]
			if a<0:
				a = 0
			if b>yforw.shape[0]:
				b = yforw.shape[0]
			center[i,j] = offset.findoffset(scidatatmp[m:n],yforw[a:b],m)

		del scitmp
		del scidatatmp
	del flatnorm

	"""
	This implements the mechanism for manually entering offsets (if for
	  example we dithered the stars out of the starboxes).
	"""
	if offsets is not None:
		center = scipy.asarray(offsets)
	else:
		center = stats.stats.nanmean(center,axis=1)

	"""
	Perform the flux scaling and set the offsets relative to each other.
	"""
	print "Normalizing Fluxes"
	cmax = center.max()
	fmax = flux.max()
	for i in range(center.size):
		center[i] -= cmax
		ratio = fmax/flux[i]
		scidata[i] *= ratio
	cmax = ceil(fabs(center.min()))


	"""
	Set the output scale (and approximate input scale), as well as blue
	  cutoff limits.
	"""
	if disperser=="150/7500":
		scale = 4.8
	elif disperser=="300/5000":
		scale = 2.45
	elif disperser=="400/8500":
		scale = 1.85
	elif disperser=="600/5000":
		scale = 1.25
	elif disperser=="600/7500":
		scale = 1.25
	elif disperser=="600/10000":
		scale = 1.25
	elif disperser=="831/8200":
		scale = 0.915
	elif disperser=="900/5500":
		scale = 0.85
	elif disperser=="1200/7500":
		scale = 0.64

	if dichroic=='mirror':
		redcutoff = 4000.
		dich_file = ''
	elif dichroic=='460':
		redcutoff = 4600.  # I haven't checked this...
		dich_file = '460'
	elif dichroic=='500':
		redcutoff = 5000.  # I haven't checked this...
		dich_file = '500'
	elif dichroic=='560':
		redcutoff = 5500.
		dich_file = '560'
	elif dichroic=='680':
		redcutoff = 6700.
		dich_file = '680'
	else:
		redcutoff = 3500.
		dich_file = ''

	"""
	Determine the y-size of the output arrays. We also find an estimate of
	  the mask resolution while looping through. Only every seventh slit
	  is examined to expedite the process.
	"""
	nsize = 0
	csize = 0
	wide_slits = []
	linewidth = []
	for i,j in slits:
		csize += int(j-i+cmax) + 5
		nsize += j-i+5
		mod = scipy.where((yback>i)&(yback<j))
		a = mod[0].min()-4
		b = mod[0].max()+4
		if a<0:
			a = 0
		if b>axis1:
			b = axis1
		wide_slits.append([a,b])
		if len(wide_slits)%7==0:
			linewidth.append(measure_width.measure(arc_ycor[(i+j)/2,:],15))
	csize -= 5
	nsize -= 5

	linewidth = scipy.median(scipy.asarray(linewidth))

	print "Loading wavelength model"
	lris_path = lris.__path__[0]

	filename = lris_path+"/data/uves_sky.model"
	infile = open(filename,"r")
	wavecalmodel = pickle.load(infile)
	infile.close()
	wave = scipy.arange(3400.,10400.,0.1)

	"""
	We make the sky spectrum slightly more realistic by taking into account
	  the dichroic cutoff. This mainly helps with matching the 5577 line
	  for the 560 dichroic. It would be nice if the response of the
	  instrument was somewhat well characterized for all grating/dichroic
	  combinations....
	"""
	if dich_file!='':
		filename = lris_path+"/data/dichroics/dichroic_"+dich_file+"_t.dat"
		infile = open(filename,"r")
		#input = sio.read_array(infile)
		input = np.loadtxt(infile)
		infile.close()
		spline = interpolate.splrep(input[:,0],input[:,1],s=0)
		dich = interpolate.splev(wave,spline)
		dich[wave<4500.] = 1.
		dich[wave>8800.] = 1.
		del input,spline
	else:
		dich = scipy.ones(wave.size)

	"""
	Create two sky spectrum spline models. One is a 'fine' model matched
	  to the resolution of the instrumental setup. The other is a widened
	  model for coarse wavelength matching.
	"""
	wavemodel = interpolate.splev(wave,wavecalmodel)
	finemodel = ndimage.gaussian_filter1d(wavemodel,linewidth*scale/0.1)
	wavemodel = ndimage.gaussian_filter1d(finemodel,5./0.1)
	finemodel *= dich
	finemodel = interpolate.splrep(wave,finemodel,s=0)
	wavemodel *= dich
	widemodel = interpolate.splrep(wave,wavemodel,s=0)
	goodmodel = finemodel
	del dich,wave,wavemodel

	""" See extract.py; sets default extraction width. """
	extractwidth = 10

	print "Creating output arrays"

	"""
	We choose an output array size that *should* be large enough to contain
	  all of the valid data (given reasonable assumptions about how far
	  the slits are placed from the center of the mask). We could also
	  decrease the needed size by enforcing the blue limit....
	"""
	outlength = int(axis2*1.6)
	out = scipy.zeros((nsci,nsize,outlength))*scipy.nan
	out2 = scipy.zeros((2,csize,outlength))*scipy.nan

	"""
	For systems with limited RAM, it might make sense to cache the output
	  arrays to disk. This increases the time it takes to run but may be
	  necessary and also allows the progress of the reduction to be
	  monitored.
	"""
	if cache:
		import os
		print "Caching..."
		strtfile = out_prefix+"_TMPSTRT.fits"
		bgfile = out_prefix+"_TMPBSUB.fits"
		try:
			os.remove(strtfile)
		except:
			pass

		outfile = pyfits.PrimaryHDU(out)
		outfile.header.update('CTYPE1','LINEAR')
		outfile.header.update('CRPIX1',1)
		outfile.header.update('CRVAL1',mswave-(0.5*out2.shape[2])*scale)
		outfile.header.update('CD1_1',scale)
		outfile.header.update('CTYPE2','LINEAR')
		outfile.header.update('CRPIX2',1)
		outfile.header.update('CRVAL2',1)
		outfile.header.update('CD2_2',1)
		if nsci>1:
			outfile.header.update('CTYPE3','LINEAR')
			outfile.header.update('CRPIX3',1)
			outfile.header.update('CRVAL3',1)
			outfile.header.update('CD3_3',1)
		outfile.writeto(strtfile)
		del outfile,out

		try:
			os.remove(bgfile)
		except:
			pass

		outfile = pyfits.PrimaryHDU(out2)
		outfile.header.update('CTYPE1','LINEAR')
		outfile.header.update('CRPIX1',1)
		outfile.header.update('CRVAL1',mswave-(0.5*out2.shape[2])*scale)
		outfile.header.update('CD1_1',scale)
		outfile.header.update('CTYPE2','LINEAR')
		outfile.header.update('CRPIX2',1)
		outfile.header.update('CRVAL2',1)
		outfile.header.update('CD2_2',1)
		outfile.header.update('CTYPE3','LINEAR')
		outfile.header.update('CRPIX3',1)
		outfile.header.update('CRVAL3',1)
		outfile.header.update('CD3_3',1)
		outfile.writeto(bgfile)
		del outfile,out2

	"""
	Loop through all of the slits, determining the wavelength solution and
	  performing the background subtraction. It might be more robust to
	  determine all wavelength solutions, then jointly determine a 'master'
	  solution.... posc stores the current (starting) position of the
	  coadded array, and posn stores the current position of the straight
	  array.
	"""
	posc = 0
	posn = 0
	count = 1

	""" Debugging feature; set to 1 to skip background subtraction """
	lris.lris_red.skysub.RESAMPLE = 0
	""" Extract 1d spectra? """
	do_extract = False

	for k in range(len(slits)):
		i,j = slits[k]
		a,b = wide_slits[k]

		""" Debugging feature; change number to skip initial slits """
		if count<1:
			count += 1
			continue

		print "Working on slit %d (%d to %d)" % (count,i,j)
		# Determine the wavelength solution
		sky2x,sky2y,ccd2wave = wavematch(a,scidata[:,a:b],arc_ycor[i:j],yforw[i:j],widemodel,finemodel,goodmodel,scale,mswave,redcutoff)
		# Resample and background subtract
		print 'Doing background subtraction'
		#scidata[0,a:b] = arcdata[a:b] # This line may be a debugging step that MWA put in.  See what happens with it missing.
		strt,bgsub,varimg = doskysub(i,j-i,outlength,scidata[:,a:b],yback[a:b],sky2x,sky2y,ccd2wave,scale,mswave,center,redcutoff,airmass)

		# Store the resampled 2d spectra
		h = strt.shape[1]
		if cache:
			file = pyfits.open(strtfile,mode="update")
			out = file[0].data
		out[:,posn:posn+h] = strt.copy()
		if cache:
			file.close()
			del file,out
		posn += h+5

		if lris.lris_red.skysub.RESAMPLE:
			count += 1
			continue

		# Store the resampled, background subtracted 2d spectra
		h = bgsub.shape[0]
		if cache:
			file = pyfits.open(bgfile,mode="update")
			out2 = file[0].data
		out2[0,posc:posc+h] = bgsub.copy()
		out2[1,posc:posc+h] = varimg.copy()
		if cache:
			file.close()
			del file,out2
		posc += h+5


		# Find and extract object traces
		if do_extract:
			print '  Extracting object spectra'
			tmp = scipy.where(scipy.isnan(bgsub),0.,bgsub)
			filter = tmp.sum(axis=0)
			mod = scipy.where(filter!=0)
			start = mod[0][0]
			end = mod[0][-1]+1
			del tmp
			slit = bgsub[:,start:end]
			spectra = extract(slit,varimg[:,start:end],extractwidth)
			num = 1
			crval = mswave-(0.5*bgsub.shape[1]-start)*scale
			for spec in spectra:
				for item in spec:
					if item.size==4:
						hdu = pyfits.PrimaryHDU()
						hdu.header.update('CENTER',item[2])
						hdu.header.update('WIDTH',item[3])
						hdulist = pyfits.HDUList([hdu])
					else:
						thdu = pyfits.ImageHDU(item)
						thdu.header.update('CRVAL1',crval)
						thdu.header.update('CD1_1',scale)
						thdu.header.update('CRPIX1',1)
						thdu.header.update('CRVAL2',1)
						thdu.header.update('CD2_2',1)
						thdu.header.update('CRPIX2',1)
						thdu.header.update('CTYPE1','LINEAR')
						hdulist.append(thdu)
					outname = out_prefix+"_spec_%02d_%02d.fits" % (count,num)
					hdulist.writeto(outname)
					num += 1

		count += 1


	""" Output 2d spectra """
	if cache:
		file = pyfits.open(bgfile)
		out2 = file[0].data.copy()
		del file
	tmp = out2[0].copy()
	tmp = scipy.where(scipy.isnan(tmp),0,1)
	mod = scipy.where(tmp.sum(axis=0)!=0)
	start = mod[0][0]
	end = mod[0][-1]+1
	del tmp

	outname = out_prefix+"_bgsub.fits"
	outfile = pyfits.PrimaryHDU(out2[0,:,start:end])
	outfile.header.update('CTYPE1','LINEAR')
	outfile.header.update('CRPIX1',1)
	outfile.header.update('CRVAL1',mswave-(0.5*out2.shape[2]-start)*scale)
	outfile.header.update('CD1_1',scale)
	outfile.header.update('CRPIX2',1)
	outfile.header.update('CRVAL2',1)
	outfile.header.update('CD2_2',1)
	outfile.writeto(outname)
	hdr = outfile.header.copy()

	outname = out_prefix+"_var.fits"
	outfile = pyfits.PrimaryHDU(out2[1,:,start:end])
	outfile.header=hdr
	outfile.writeto(outname)
	del out2,hdr

	if cache:
		file = pyfits.open(strtfile)
		out = file[0].data.copy()
		del file
	for i in range(nsci):
		outname = out_prefix+"_straight_%d.fits" % (i+1)
		outfile = pyfits.PrimaryHDU(out[i,:,start:end])
		outfile.header.update('CTYPE1','LINEAR')
		outfile.header.update('CRPIX1',1)
		outfile.header.update('CRVAL1',mswave-(0.5*out.shape[2]-start)*scale)
		outfile.header.update('CD1_1',scale)
		outfile.header.update('CRPIX2',1)
		outfile.header.update('CRVAL2',1)
		outfile.header.update('CD2_2',1)
		#if nsci>1:
		#	outfile.header.update('CRPIX3',1)
		#	outfile.header.update('CRVAL3',1)
		#	outfile.header.update('CD3_3',1)	
		outfile.writeto(outname)
		del outfile

	del out
Esempio n. 45
0
def smooth_gauss(arr, var):
    return ndimage.gaussian_filter1d(arr, var)
Esempio n. 46
0
    def induced_voltage_generation(self, Beam, length='slice_frame'):
        '''
        *Method to calculate the induced voltage through the derivative of the
        profile; the impedance must be of inductive type.*
        '''

        index = self.current_turn[0]

        if self.periodicity:
            self.derivative_line_density_not_filtered = np.zeros(
                self.slices.n_slices)
            find_index_slice = np.searchsorted(self.slices.edges,
                                               self.t_rev[index])
            if self.smooth_before_after[0]:
                if self.filter_ind_imp == 'gaussian':
                    self.slices.n_macroparticles = ndimage.gaussian_filter1d(
                        self.slices.n_macroparticles,
                        sigma=self.filter_options,
                        mode='wrap')
                elif self.filter_ind_imp == 'chebyshev':
                    nCoefficients, b, a = self.slices.beam_profile_filter_chebyshev(
                        self.filter_options)
                else:
                    raise RuntimeError('filter method not recognised')
            temp = np.concatenate(
                (np.array([self.slices.n_macroparticles[find_index_slice - 1]
                           ]), self.slices.n_macroparticles[:find_index_slice],
                 np.array([self.slices.n_macroparticles[0]])))
            self.derivative_line_density_not_filtered[:find_index_slice] = np.gradient(
                temp, self.slices.bin_centers[1] - self.slices.bin_centers[0]
            )[1:-1] / (self.slices.bin_centers[1] - self.slices.bin_centers[0])
            if self.smooth_before_after[1]:
                if self.filter_ind_imp == 'gaussian':
                    self.derivative_line_density_filtered = ndimage.gaussian_filter1d(
                        self.derivative_line_density_not_filtered,
                        sigma=self.filter_options,
                        mode='wrap')
                elif self.filter_ind_imp == 'chebyshev':
                    self.derivative_line_density_filtered = filtfilt(
                        b, a, self.derivative_line_density_not_filtered)
                    self.derivative_line_density_filtered = np.ascontiguousarray(
                        self.derivative_line_density_filtered)
                else:
                    raise RuntimeError('filter method not recognised')
                induced_voltage = - Beam.charge * e * Beam.ratio * \
                self.Z_over_n[index] * \
                self.derivative_line_density_filtered / (2 * np.pi * self.revolution_frequency[index])
            else:
                induced_voltage = - Beam.charge * e * Beam.ratio * \
                self.Z_over_n[index] * \
                self.derivative_line_density_not_filtered / (2 * np.pi * self.revolution_frequency[index])
        else:
            induced_voltage = - Beam.charge * e / (2 * np.pi) * Beam.ratio * \
                self.Z_over_n[index] / self.revolution_frequency[index] * \
                self.slices.beam_profile_derivative(self.deriv_mode)[1] / \
                (self.slices.bin_centers[1] - self.slices.bin_centers[0])

        self.induced_voltage = induced_voltage[0:self.slices.n_slices]

        if isinstance(length, int):
            max_length = len(induced_voltage)
            if length > max_length:
                induced_voltage = np.lib.pad(self.induced_voltage,
                                             (0, length - max_length),
                                             'constant',
                                             constant_values=(0, 0))
            return induced_voltage[0:length]
Esempio n. 47
0
plt.grid(b=True, which='major', axis='x')
plt.show()

# In[ ]:

############################################################
# Plot the S(q,w)
############################################################
fig = plt.figure(figsize=[8, 5])
ax = plt.subplot(111)

hbar = 4.135667662e-15
emax = 0.5 * float(hbar) / (float(timestep) * float(sc_step)) * 1e3
sqw_x[:, 0] = sqw_x[:, 0] / 100.0
sqw_x = sqw_x.T / sqw_x.T.max(axis=0)
sqw_x = ndimage.gaussian_filter1d(sqw_x, sigma=1, axis=1, mode='constant')
sqw_x = ndimage.gaussian_filter1d(sqw_x, sigma=5, axis=0, mode='reflect')
plt.imshow(sqw_x,
           cmap=cmap.gist_ncar_r,
           interpolation='nearest',
           origin='lower',
           extent=[axidx_abs[0], axidx_abs[-1], 0, emax])
plt.plot(ams[:, 0] / ams[-1, 0] * axidx_abs[-1], ams[:, 1:ams_dist_col], 'r')
ala = plt.xticks()

plt.xticks(axidx_abs, axlab)
plt.xlabel('q')
plt.ylabel('Energy (meV)')

plt.autoscale(tight=False)
ax.set_aspect('auto')
def peak_extract(sg_window,
                 sg_poly,
                 dev_order,
                 data,
                 plot=False,
                 plot_title=''):
    lg.function_log()

    f_min_list = []
    f_max_list = []

    f_inc_list = []

    peaks_array = []

    file_list = []

    if isinstance(data, str):
        file_list.append(data)
    else:
        if isinstance(data, list):
            file_list = data
        else:
            file_list = data['file_path'].tolist()
    """create list of all peaks found in the datasets"""
    for i in range(0, len(file_list)):

        file = load_clean_data(1, file_list[i])

        f = file['frequency']
        imag_z = file['imag_z']

        min_f = round(min(f), 1)
        max_f = round(max(f), 1)

        f_min_list.append(min_f)
        f_max_list.append(max_f)

        f_inc = round(abs((f[1]) - (f[0])), 2)

        f_inc_list.append(f_inc)
        """extraction of peaks from interpolation curve"""
        #d_Z = interp_derivative(f, imag_z, min_f, max_f, f_inc, sg_window, sg_poly, dev_order, "data")
        peaks_d1 = interp_derivative(f, imag_z, min_f, max_f, f_inc, sg_window,
                                     sg_poly, 1, "peaks")
        peaks_dn = interp_derivative(f, imag_z, min_f, max_f, f_inc, sg_window,
                                     sg_poly, dev_order, "peaks")

        peaks_d1_list = (peaks_d1["peak_x"].values.tolist())
        peaks_dn_list = (peaks_dn["peak_x"].values.tolist())
        peaks_array = peaks_array + peaks_d1_list + peaks_dn_list

    hist_start = min(f_min_list) - 0.5
    hist_end = max(f_max_list) + 1
    hist_bins = max(f_inc_list)
    """create histogram"""
    peak_hist, peak_bins = np.histogram(peaks_array,
                                        bins=np.arange(hist_start, hist_end,
                                                       hist_bins))
    plot_bins = (peak_bins[:len(peak_bins) - 1])
    """smoothing of histogram via gaussian filter"""

    hist_filter_1 = savgol_filter(peak_hist, 7, 5)
    hist_filter = gaussian_filter1d(hist_filter_1, 2.5)
    """interpolation of histogram"""
    hist_spline = UnivariateSpline(plot_bins, hist_filter, k=4, s=0)
    """higher number of bins for analysis of interpolation curve"""
    new_bins = np.arange(0, hist_end, f_inc * 0.1)
    """calculation of derivatives to find local maxima"""
    d_hist_spline = hist_spline.derivative()
    d2_hist_spline = hist_spline.derivative(2)
    d_hist_roots = d_hist_spline.roots()
    """only extract roots with positive values for d2/d2x(root)"""
    find_peaks_result = []

    for n in range(0, len(d_hist_roots)):
        if d_hist_roots[n] > min(new_bins):
            if d_hist_roots[n] < max(new_bins):
                if d2_hist_spline(d_hist_roots[n]) < 0:
                    find_peaks_result.append(d_hist_roots[n])

    cen_list = []

    for n in range(0, len(find_peaks_result)):
        if hist_spline(find_peaks_result[n]) > 0:
            cen_list.append(find_peaks_result[n])
    """refinement with gaussian peak fitting"""
    sigma_peak_find = 0.2
    params = []

    amp_list = list(hist_spline(cen_list))

    if len(cen_list) > 0:
        params, bnds = fit_params(amp_list, cen_list, hist_start, hist_end,
                                  0.1, 1.25, 0.5, 2)

        hist_gauss, errs_gauss = scipy.optimize.curve_fit(multiple_gauss,
                                                          plot_bins,
                                                          hist_filter,
                                                          p0=params,
                                                          bounds=bnds)
        g_hist = multiple_gauss(new_bins, hist_gauss)
        """plot peak search result"""

        if plot == True:
            plt.rc('legend', fontsize=10)
            plt.plot(plot_bins, peak_hist, label='histogram')
            plt.plot(plot_bins, hist_filter, label='histogram smoothed')
            plt.plot(new_bins, d_hist_spline(new_bins), label='1. derivative')
            plt.title(plot_title)
            plt.legend()
            plt.show()
            #plt.plot(new_bins, multiple_gauss(new_bins, *params), label = 'gauss')
            #plt.plot(new_bins,g_hist)

            folder_text, sample_text = get_sample_name(file_list[i])

        n_peaks = len(cen_list)
        gauss_peaks = []
        gauss_sigma = []

        errs_peaks = []
        errs_sigma = []

        for a in range(0, n_peaks):
            if hist_gauss[a] > (0.2 * np.mean(hist_gauss[0:n_peaks])):
                gauss_peaks.append(hist_gauss[a + n_peaks])
                gauss_sigma.append(abs(hist_gauss[a + n_peaks * 2]))

                errs_peaks.append(errs_gauss[a + n_peaks])
                errs_sigma.append(abs(errs_gauss[a + n_peaks * 2]))

    else:
        print('no local peaks found')
        gauss_peaks = []
        gauss_sigma = []

    return gauss_peaks, gauss_sigma
Esempio n. 49
0
        show1 = gray2.astype(float)
        #path2 = mydata_loader.input_path[0,:]/71*(Resample_size-2)
        #path2  = signal.resample(path2, Resample_size)

        #for i in range ( len(path2)):
        #    path2[i]= min(path2[i],Resample_size-1)
        #    path2[i]= max(path2[i],0)
        #    show1[int(path2[i]),i]=254

        show2 = gray2.astype(float)
        save_out = save_out.cpu().detach().numpy()

        save_out = save_out * (Resample_size)
        #save_out = gaussian_filter1d(save_out,5)
        save_out = signal.resample(save_out, Resample_size)
        save_out = gaussian_filter1d(save_out, 5)

        for i in range(len(save_out)):
            save_out[i] = min(save_out[i], Resample_size - 1)
            save_out[i] = max(save_out[i], 0)
            show2[int(save_out[i]), i] = 254
            #show2[int(path2[i]),i]=254
        ori_len = right - left
        save_out = signal.resample(save_out, ori_len)
        save_out = save_out / Resample_size * ini_H
        save_out = numpy.clip(save_out, 0, ini_H - 1)
        for i in range(left, right):

            long[int(save_out[i - left]), i] = 254

        #show3 = numpy.append(show1,show2,axis=1) # cascade
Esempio n. 50
0
plt.rc('font', **font)

# Set input number of timestamps and training days
n_timestamp = 10
train_days = 4000  # number of days to train from
testing_days = 500  # number of days to be predicted
n_epochs = 50
filter_on = 1

#import and filtering the data set

dataset = pd.read_csv('Colo.csv')
dataset.head()
if filter_on == 1:
    dataset['Temperature'] = medfilt(dataset['Temp C'], 3)
    dataset['Temperature'] = gaussian_filter1d(dataset['Temp C'], 1.2)

# Set number of training and testing data
train_set = dataset[0:train_days].reset_index(drop=True)
test_set = dataset[train_days:train_days + testing_days].reset_index(drop=True)
training_set = train_set.iloc[:, 1:2].values
testing_set = test_set.iloc[:, 1:2].values

# Normalize data
sc = MinMaxScaler(feature_range=(0, 1))
training_set_scaled = sc.fit_transform(training_set)
testing_set_scaled = sc.fit_transform(testing_set)


# Split data into n_timestamp
def data_split(sequence, n_timestamp):
Esempio n. 51
0
def get_t0(fname: str,
           sigma: float = 1,
           scan: Union[int, slice] = -1,
           display_result: bool = True,
           plot: bool = True,
           t_range: Tuple[float, float] = (-2, 2),
           invert: bool = False,
           no_slope: bool = True) -> TzResult:
    """Determine t0 from a semiconductor messuarement in the IR. For that, it opens
    the given file, takes the mean of all channels and fits the resulting curve with
    a step function.

    Note that the parameter

    Parameters
    ----------
    fname : str
        Filename of the messpy file containing the data
    sigma : float, optional
        Used for calculating the displayed nummerical derviate, by default 1.
    scan: int or slice
        Which scan to use, by default -1, the last scan. If given a slice,
        it takes the mean of the scan slice.
    display_result : bool, optional
        If true, show the fitting results, by default True
    plot : bool, optional
        If true, plot the result, by default True
    t_range : (float, flot)
        The range which is used to fit the data.
    invert : bool
        If true, invert data.
    no_slope : bool
        Determines if a variable slope is added to the fit model.

    Returns
    -------
    TzResult
        Result and presentation of the fit.
    """
    a = np.load(fname, allow_pickle=True)
    if not fname[-11:] == 'messpy1.npz':
        if 'data' in a:
            data = a['data']
            if isinstance(scan, slice):
                sig = np.nanmean(data[0, ..., scan], axis=-1)
            else:
                sig = data[0, ..., scan]
            sig = np.nanmean(sig[:, :, 1], axis=1)
        else:
            sig = np.nanmean(a['signal'], 1)

        t = a['t'] / 1000.
    else:
        data = a['data_Remote IR 32x2']
        if isinstance(scan, slice):
            sig = np.nanmean(data[scan, ...], axis=0)
        else:
            sig = data[scan, ...]
        sig = np.nanmean(sig[0, :, 1, :], axis=-1)
        t = a['t']
    if invert:
        sig = -sig

    idx = (t > t_range[0]) & (t < t_range[1])
    sig = sig.squeeze()[idx]
    #from scipy.signal import savgol_filter
    #dsig = savgol_filter(sig, 11, 2, 1)
    dsig = gaussian_filter1d(sig, sigma=1, order=1)

    GaussStep = lmfit.Model(gauss_step)

    model = GaussStep + lmfit.models.LinearModel()
    max_diff_idx = np.argmax(abs(dsig))

    params = model.make_params(amp=np.ptp(sig),
                               center=t[idx][max_diff_idx],
                               sigma=0.2,
                               slope=0,
                               intercept=sig.min())
    if no_slope:
        params['slope'].vary = False
    params.add(lmfit.Parameter('FWHM', expr="sigma*2.355"))
    result = model.fit(params=params, data=sig, x=t[idx])
    fig = None
    if display_result:
        import IPython.display
        IPython.display.display(result.params)
    if plot:
        fig, axs = plt.subplots(
            2,
            1,
            figsize=(5, 7),
        )

        axs[0].plot(t[idx], sig)
        tw = axs[0].twinx()
        tw.plot(t[idx], dsig, c='r', label='Nummeric Diff')
        tw.legend()
        #axs[1].plot(t[idx], dsig, color='red')
        axs[1].set_xlabel('t')
        plt.sca(axs[1])
        result.plot_fit()
        axs[1].axvline(result.params['center'].value)

    res = TzResult(
        x0=result.params['center'],
        sigma=result.params['sigma'],
        fwhm=result.params['FWHM'],
        data=(t[idx], sig),
        fit_result=result,
        fig=fig,
    )
    return res
Esempio n. 52
0
def PT_NoInversion(p, a1, a2, p1, p3, T3, verb=False):
    '''
     Calculates PT profile for non-inversion case based on Equation (2) from
     Madhusudhan & Seager 2009.
     It takes a pressure array (e.g., extracted from a pressure file), and 5
     free parameters for non-inversion case and generates non-inverted PT  
     profile. The profile is then smoothed using 1D Gaussian filter. The
     pressure array needs to be equally spaced in log space.

     Parameters
     ----------
     p:  1D array of floats
         Pressure array needs to be equally spaced in log space from bottom to top 
         of the atmosphere.
     a1: Float
         Model exponential factor in Layer 1, empirically determined to be within
         range (0.2, 0.6).
     a2: Float
         Model exponential factor in Layer 2, empirically determined to be within
         range (0.04, 0.5) 
     p1: Float
     p2: Float
         Pressure boundary between Layer  1 and 2 (in bars).
     p3: Float
         Pressure boundary between Layers 2 and 3 (in bars).
     T3: float
         Temperature in the Layer 3.
     verb: Boolean
         If True, print some info to screen.
   
     Returns
     -------
     PT_NoInver:  tupple of arrays that includes:
           - temperature and pressure arrays of every layer of the atmosphere 
             (PT profile)
           - concatenated array of temperatures, 
           - temperatures at point 1 and 3 (see Figure 1, Madhusudhan & 
             Seager 2009)
       T_conc:   1D array of floats, temperatures concatenated for all levels
       T_l1:     1D array of floats, temperatures for layer 1
       T_l2_neg: 1D array of floats, temperatures for layer 2
       T_l3:     1D array of floats, temperatures for layer 3 (isothermal part)  
       p_l1:     1D array of floats, pressures for layer 1   
       p_l2_neg: 1D array of floats, pressures for layer 2     
       p_l3:     1D array of floats, pressures for layer 3 (isothermal part)     
       T1:       float, temperature at point 1  
       T3:       float, temperature at point 3  
     T_smooth:  1D array of floats, Gaussian smoothed temperatures, 
             no kinks on layer boundaries 

     Notes
     -----
     The code uses just one equation for layer 2, assuming that decrease 
     in temperature in layer 2 is the same from point 3 to point 2 
     as is from point 2 to point 1.

     Example
     -------
     # array of pressures, equally spaced in log space 
     p = np.array([  1.00000000e-05,   1.17680000e-05,   1.38480000e-05,
                  1.62970000e-05,   1.91790000e-05,   2.25700000e-05,
                  2.65600000e-05,   3.12570000e-05,   3.67830000e-05,
                  4.32870000e-05,   5.09410000e-05,   5.99400000e-05,
                  7.05480000e-05,   8.30280000e-05,   9.77000000e-05,
                  1.14970000e-04,   1.35300000e-04,   1.59220000e-04,
                  1.87380000e-04,   2.20510000e-04,   2.59500000e-04,
                  3.05380000e-04,   3.59380000e-04,   4.22920000e-04,
                  4.97700000e-04,   5.85700000e-04,   6.89260000e-04,
                  8.11130000e-04,   9.54540000e-04,   1.12330000e-03,
                  1.32190000e-03,   1.55560000e-03,   1.83070000e-03,
                  2.15440000e-03,   2.53530000e-03,   2.98360000e-03,
                  3.51110000e-03,   4.13200000e-03,   4.86260000e-03,
                  5.72230000e-03,   6.73410000e-03,   7.92480000e-03,
                  9.32600000e-03,   1.09740000e-02,   1.29150000e-02,
                  1.51990000e-02,   1.78860000e-02,   2.10490000e-02,
                  2.47700000e-02,   2.91500000e-02,   3.43040000e-02,
                  4.03700000e-02,   4.75080000e-02,   5.59080000e-02,
                  6.57930000e-02,   7.74260000e-02,   9.11160000e-02,
                  1.07220000e-01,   1.26180000e-01,   1.48490000e-01,
                  1.74750000e-01,   2.05650000e-01,   2.42010000e-01,
                  2.84800000e-01,   3.35160000e-01,   3.94420000e-01,
                  4.64150000e-01,   5.46220000e-01,   6.42800000e-01,
                  7.56460000e-01,   8.90210000e-01,   1.04760000e+00,
                  1.23280000e+00,   1.45080000e+00,   1.70730000e+00,
                  2.00920000e+00,   2.36440000e+00,   2.78250000e+00,
                  3.27450000e+00,   3.85350000e+00,   4.53480000e+00,
                  5.33660000e+00,   6.28020000e+00,   7.39070000e+00,
                  8.69740000e+00,   1.02350000e+01,   1.20450000e+01,
                  1.41740000e+01,   1.66810000e+01,   1.96300000e+01,
                  2.31010000e+01,   2.71850000e+01,   3.19920000e+01,
                  3.76490000e+01,   4.43060000e+01,   5.21400000e+01,
                  6.13590000e+01,   7.22080000e+01,   8.49750000e+01,
                  1.00000000e+02])

     # random values imitate DEMC
     a1 = np.random.uniform(0.2  , 0.6 )
     a2 = np.random.uniform(0.04 , 0.5 )
     p3 = np.random.uniform(0.5  , 10  )
     p1 = np.random.uniform(0.001, 0.01)
     T3 = np.random.uniform(1500 , 1700)

     # generates raw and smoothed PT profile
     PT_NoInv, T_smooth = PT_NoInversion(p, a1, a2, p1, p3, T3)

     # returns full temperature array and temperatures at every point
     T, T0, T1, T3 = PT_NoInv[6], PT_NoInv[7], PT_NoInv[8], PT_NoInv[9]

     # sets plots in the middle 
     minT= T0*0.75
     maxT= max(T1, T3)*1.25

     # plots raw PT profile with equally spaced points in log space
     plt.figure(3)
     plt.clf()
     plt.semilogy(PT_NoInv[0], PT_NoInv[1], '.', color = 'r'     )
     plt.semilogy(PT_NoInv[2], PT_NoInv[3], '.', color = 'b'     )
     plt.semilogy(PT_NoInv[4], PT_NoInv[5], '.', color = 'orange')
     plt.title('No Thermal Inversion Raw', fontsize=14)
     plt.xlabel('T [K]'                  , fontsize=14)
     plt.ylabel('logP [bar]'             , fontsize=14)
     plt.xlim(minT  , maxT)
     plt.ylim(max(p), min(p))
     #plt.savefig('NoThermInverRaw.png', format='png')
     #plt.savefig('NoThermInverRaw.ps' , format='ps' )

     # plots smoothed PT profile
     plt.figure(4)
     plt.clf()
     plt.semilogy(T       , p, color = 'r')
     plt.semilogy(T_smooth, p, color = 'k')
     plt.title('No Thermal Inversion Smoothed', fontsize=14)
     plt.xlabel('T [K]'                       , fontsize=14)
     plt.ylabel('logP [bar]'                  , fontsize=14)
     plt.xlim(minT  , maxT)
     plt.ylim(max(p), min(p))
     #plt.savefig('NoThermInverSmoothed.png', format='png')
     #plt.savefig('NoThermInverSmoothed.ps' , format='ps' )

     Revisions
     ---------
     2013-11-16  Jasmina   Written by.
     2014-04-05  Jasmina   Added T3 as free parameter instead of T0
                           Changed boundary condition equations accordingly
     2014-08-15  Patricio  Cleaned-up the code. Added verb argument.
     2014-09-24  Jasmina   Updated documentation.
     '''

    if verb:
        print("Pressure range: {} -- {} bar\n"
              "PT params: {} {} {} {} {}\n".format(p[0], p[-1], a1, a2, p1, p3,
                                                   T3))

    # The following set of equations derived using Equation 2
    # Madhusudhan and Seager 2009

    # Set p0 (top of the atmosphere):
    p0 = np.amin(p)

    # Calculate temperature at layer boundaries:
    T1 = T3 - (np.log(p3 / p1) / a2)**2.0
    T0 = T1 - (np.log(p1 / p0) / a1)**2.0

    # Error message for negative Temperatures:
    if T0 < 0 or T1 < 0 or T3 < 0:
        raise ValueError("Input parameters give non-physical profile:\n"
                         "  T0={:.1f},  T1={:.1f},  T3={:.1f}".format(
                             T0, T1, T3))

    # Defining arrays for every part of the PT profile:
    p_l1 = p[np.where((p >= p0) & (p < p1))]
    p_l2_neg = p[np.where((p >= p1) & (p < p3))]
    p_l3 = p[np.where((p >= p3) & (p <= np.amax(p)))]

    # sanity check for total number of levels:
    check = len(p_l1) + len(p_l2_neg) + len(p_l3)
    if verb:
        print('Total number of layers: {:d}'.format(len(p)))
        print(
            'Number of levels per Layer: Nl1={:d},  Nl2={:d}, Nl3={:d}'.format(
                len(p_l1), len(p_l2_neg), len(p_l3)))
        print('Sum of levels per layer: {:d}'.format(check))

    # Layer 1 temperatures
    T_l1 = (np.log(p_l1 / p0) / a1)**2 + T0

    # Layer 2 temperatures decreasing part
    T_l2_neg = (np.log(p_l2_neg / p1) / a2)**2 + T1

    # Layer 3 temperatures
    T_l3 = np.linspace(T3, T3, len(p_l3))

    # Concatenate all temperature arrays:
    T_conc = np.concatenate((T_l1, T_l2_neg, T_l3))

    # PT profile info:
    PT_NoInver = (T_l1, p_l1, T_l2_neg, p_l2_neg, T_l3, p_l3, T_conc, T0, T1,
                  T3)

    # Smoothed PT profile:
    sigma = 4
    T_smooth = gaussian_filter1d(T_conc, sigma, mode='nearest')

    return PT_NoInver, T_smooth
Esempio n. 53
0
from scipy.io.wavfile import read
import scipy.signal as signal
from scipy.ndimage import gaussian_filter1d
from scipy.ndimage import median_filter
from scipy.io.wavfile import write
import numpy as np
import matplotlib.pyplot as plt
import convert_formant_to_list
import statistics
import convert_pitch_to_list

data = convert_pitch_to_list.func1("test_mor.PitchTier")
f1 = data[0, 0, :]
f2 = data[1, 0, :]

fy1 = gaussian_filter1d(f1, 3)
fy2 = gaussian_filter1d(f2, 3)
lenn = int(len(fy2) // 10) - 1
array = np.ndarray(lenn)
for i in range(lenn):
    array[i] = statistics.variance(fy2[i * 10:i * 10 + 20])

dev = np.zeros((len(fy2)))
for i in range(1, len(data) - 1):
    dev[i] = fy2[i + 1] - fy2[i - 1]

plt.subplot(211)
plt.title("source")
#plt.plot(data[0,1,:],f1,".", label='original data f1',linestyle = " ",color = "r")
# plt.plot(fy1, '.', label='filtered data f1',linestyle = " ",color = "b")
# plt.plot(data[0,1,:],f2, '.', label='original data f2',linestyle = " ",color = "r")
def setup_spectral_library(velscale, FWHM_gal):

    # Read the list of filenames from the Single Stellar Population library
    # by Vazdekis et al. (2010, MNRAS, 404, 1639) http://miles.iac.es/.
    #
    # For this example I downloaded from the above website a set of
    # model spectra with default linear sampling of 0.9A/pix and default
    # spectral resolution of FWHM=2.51A. I selected a Salpeter IMF
    # (slope 1.30) and a range of population parameters:
    #
    #     [M/H] = [-1.71, -1.31, -0.71, -0.40, 0.00, 0.22]
    #     Age = np.linspace(np.log10(1), np.log10(17.7828), 26)
    #
    # This leads to a set of 156 model spectra with the file names like
    #
    #     Mun1.30Zm0.40T03.9811.fits
    #
    # IMPORTANT: the selected models form a rectangular grid in [M/H]
    # and Age: for each Age the spectra sample the same set of [M/H].
    #
    # We assume below that the model spectra have been placed in the
    # directory "miles_models" under the current directory.
    #
    vazdekis = glob.glob('miles_models/Mun1.30*.fits')
    vazdekis.sort()
    FWHM_tem = 2.51  # Vazdekis+10 spectra have a resolution FWHM of 2.51A.

    # Extract the wavelength range and logarithmically rebin one spectrum
    # to the same velocity scale of the SDSS galaxy spectrum, to determine
    # the size needed for the array which will contain the template spectra.
    #
    hdu = pyfits.open(vazdekis[0])
    ssp = hdu[0].data
    h2 = hdu[0].header
    lamRange_temp = h2['CRVAL1'] + np.array(
        [0., h2['CDELT1'] * (h2['NAXIS1'] - 1)])
    sspNew, logLam2, velscale = util.log_rebin(lamRange_temp,
                                               ssp,
                                               velscale=velscale)

    # Create a three dimensional array to store the
    # two dimensional grid of model spectra
    #
    nAges = 26
    nMetal = 6
    templates = np.empty((sspNew.size, nAges, nMetal))

    # Convolve the whole Vazdekis library of spectral templates
    # with the quadratic difference between the SDSS and the
    # Vazdekis instrumental resolution. Logarithmically rebin
    # and store each template as a column in the array TEMPLATES.

    # Quadratic sigma difference in pixels Vazdekis --> SDSS
    # The formula below is rigorously valid if the shapes of the
    # instrumental spectral profiles are well approximated by Gaussians.
    #
    FWHM_dif = np.sqrt(FWHM_gal**2 - FWHM_tem**2)
    sigma = FWHM_dif / 2.355 / h2['CDELT1']  # Sigma difference in pixels

    # These are the array where we want to store
    # the characteristics of each SSP model
    #
    logAge_grid = np.empty((nAges, nMetal))
    metal_grid = np.empty((nAges, nMetal))

    # These are the characteristics of the adopted rectangular grid of SSP models
    #
    logAge = np.linspace(np.log10(1), np.log10(17.7828), nAges)
    metal = [-1.71, -1.31, -0.71, -0.40, 0.00, 0.22]

    # Here we make sure the spectra are sorted in both [M/H]
    # and Age along the two axes of the rectangular grid of templates.
    # A simple alphabetical ordering of Vazdekis's naming convention
    # does not sort the files by [M/H], so we do it explicitly below
    #
    metal_str = ['m1.71', 'm1.31', 'm0.71', 'm0.40', 'p0.00', 'p0.22']
    for k, mh in enumerate(metal_str):
        files = [s for s in vazdekis if mh in s]
        for j, filename in enumerate(files):
            hdu = pyfits.open(filename)
            ssp = hdu[0].data
            ssp = ndimage.gaussian_filter1d(ssp, sigma)
            sspNew, logLam2, velscale = util.log_rebin(lamRange_temp,
                                                       ssp,
                                                       velscale=velscale)
            templates[:, j, k] = sspNew  # Templates are *not* normalized here
            logAge_grid[j, k] = logAge[j]
            metal_grid[j, k] = metal[k]

    return templates, lamRange_temp, logAge_grid, metal_grid
Esempio n. 55
0
def FitCon1(
    wave,
    flux,
    deg=3,
    niter=10,
    snr=200,
    sig=None,
    swin=7,
    k1=1,
    k2=3,
    mask=None,
    plot=False,
):
    """
    Continuum normalize a spectrum
    based on IDL code by Oleg Kochukhov    Parameters
    ----------
    wave : array
        wavelength grid
    flux : arrau
        spectrum flux
    niter : int
        number of iterations
    deg : int
        polynomial degree of the continuum fit
    swin : int
        smoothing window
    sig : array, optional
        uncertainties on the spectral flux, by default None
    plot : bool, optional
        Whether to plot the results, by default False
    k1 : float, optional
        lower sigma clipping cutoff, by default 1
    k2 : float, optional
        upper sigma clipping cutoff, by default 3
    mask : array, optional
        mask for the points in the spectrum, mask == 1 is always used,
        mask == 2 is never used, mask == 0 is always used, by default None
    snr : float, optional
        signal to noise ratio, by default 200    Returns
    -------
    coeff: array
        polynomial coefficients of the continuum
    con : array
        Continuum points    Raises
    ------
    ValueError
        If poly_type is not a valid value
    """    # flag array
    if mask is None:
        mask = np.full(wave.shape, 0)    # choice of polynomial fitting routine
    if sig is not None:
        sig = 1 / sig
    else:
        sig = np.ones_like(wave)    # initial set of points to fit
    wave = wave - np.mean(wave)
    fmean = np.median(flux)
    flux = flux - fmean
    idx = np.where(mask != 2)
    #idx = np.where( ((flux > con - k1 * rms) & (flux < con + k2 * rms) & (mask != 2)) | (mask == 1))
    smooth = gaussian_filter1d(flux[idx], swin)
    coeff = np.polyfit(wave[idx], smooth, deg, w=sig[idx])
    con = np.polyval(coeff, wave)
    rms = np.sqrt(np.mean((con[idx] - flux[idx]) ** 2))    # iterate niter times
    for _ in range(niter):
        idx = np.where(
            ((flux > con - k1 * rms) & (flux < con + k2 * rms) & (mask != 2))
            | (mask == 1)
        )
        coeff = np.polyfit(wave[idx], flux[idx], deg, w=sig[idx])
        con = np.polyval(coeff, wave)
    rms = np.sqrt(np.mean((con[idx] - flux[idx]) ** 2))    # Re-add mean flux value
    con += fmean
    # optional plot
    if plot:
        pass

    return coeff, con
def weighting(freq,num_bins):
    lam = 0.5
    p = gaussian_filter1d(freq,sigma=5)
    w = ((1-lam)*p+(lam/(num_bins)))**(-1)
    w_norm = w/w.sum()
    return w_norm
Esempio n. 57
0
def eval_pop_responses(protocol, trial):
    # Abbreviations:
    # bl = before learning
    # al = after learning
    #  p = preferred
    # np = non-preferred
    # na = non-associated

    dt = trial.dt
    f_spikes = gaussian_filter1d(trial.spikes, 0.1 / dt, axis=0)

    mask = protocol.mask_screen(trial.trange)
    sliced_bl = slice_by_stim(trial.trange[mask], trial.spikes[mask],
                              protocol.screen_stimuli, protocol.onsets_screen)

    mask = protocol.mask_test(trial.trange)
    sliced_al = slice_by_stim(trial.trange[mask], trial.spikes[mask],
                              protocol.test_stimuli, protocol.onsets_test)

    frs_bl = {
        k: xr.DataArray([[x.mean(dim='t') for x in intervals]
                         for intervals in onsets],
                        dims=('onsets', 'intervals', 'fr'))
        for k, onsets in sliced_bl.items()
    }
    frs_al = {
        k: xr.DataArray([[x.mean(dim='t') for x in intervals]
                         for intervals in onsets],
                        dims=('onsets', 'intervals', 'fr'))
        for k, onsets in sliced_al.items()
    }

    median_frs_bl = {k: v.median(dim='onsets') for k, v in frs_bl.items()}
    vis_responsive = {
        k: [
            ranksums(*fr)[1] < 0.05 and bool(median_fr > 2.)
            for fr, median_fr in zip(frs_bl[k].T, median_frs_bl[k][1])
        ]
        for k in median_frs_bl
    }

    stimuli = set(protocol.screen_stimuli)
    pair_coding = {}
    for stim in stimuli:
        np_stim = {'L': 'P', 'P': 'L'}[stim[0]] + stim[1]
        na_stims = {s for s in stimuli if s not in (stim, np_stim)}
        sel = vis_responsive[stim]
        fr_bl_np = frs_bl[np_stim].isel(intervals=1, fr=sel)
        fr_al_np = frs_al[np_stim].isel(intervals=1, fr=sel)
        inc_np = fr_al_np - fr_bl_np
        fr_bl_np_before_stim = frs_bl[np_stim].isel(intervals=0, fr=sel)
        pair_coding_ = [
            ranksums(al, bl)[1] < 0.05 and bool(np.median(al) > np.median(bl))
            and ranksums(bl, bl_before)[1] >= 0.05
            for bl, al, bl_before in zip(fr_bl_np.T, fr_al_np.T,
                                         fr_bl_np_before_stim.T)
        ]
        for na_stim in na_stims:
            fr_bl_na = frs_bl[na_stim].isel(intervals=1, fr=sel)
            fr_al_na = frs_al[na_stim].isel(intervals=1, fr=sel)
            inc_na = fr_al_na - fr_bl_na
            keep = [
                ranksums(inp, ina)[1] < 0.05
                and bool(np.mean(inp) > np.mean(ina))
                for inp, ina in zip(inc_np.T, inc_na.T)
            ]
            pair_coding_ = [p and e for p, e in zip(pair_coding_, keep)]
        pair_coding[stim] = np.arange(trial.spikes.shape[1])[sel][pair_coding_]

    fr_response = xr.DataArray(np.mean(f_spikes.reshape(
        (-1, int(0.05 / dt), trial.spikes.shape[1])),
                                       axis=1),
                               dims=('bins', 'spikes'))
    t_bins = np.mean(trial.trange.reshape((-1, int(0.05 / dt))), axis=1)

    eta = 0.1
    z_bl = {
        k: (fr_response - xr.concat(
            (x[0].mean(dim='t')
             for x in v), dim='trial').mean(dim='trial')) / (xr.concat(
                 (x[0].mean(dim='t')
                  for x in v), dim='trial').std(dim='trial') + eta)
        for k, v in sliced_bl.items()
    }
    z_al = {
        k: (fr_response - xr.concat(
            (x[0].mean(dim='t')
             for x in v), dim='trial').mean(dim='trial')) / (xr.concat(
                 (x[0].mean(dim='t')
                  for x in v), dim='trial').std(dim='trial') + eta)
        for k, v in sliced_al.items()
    }

    return dict(
        p_bl=pop_response(protocol, pair_coding, t_bins, z_bl, 'screen',
                          lambda k, s: k == s),
        p_al=pop_response(protocol, pair_coding, t_bins, z_al, 'test',
                          lambda k, s: k == s),
        np_bl=pop_response(protocol, pair_coding, t_bins, z_bl, 'screen',
                           lambda k, s: k[0] != s[0] and k[1] == s[1]),
        np_al=pop_response(protocol, pair_coding, t_bins, z_al, 'test',
                           lambda k, s: k[0] != s[0] and k[1] == s[1]),
        na_bl=pop_response(protocol, pair_coding, t_bins, z_bl, 'screen',
                           lambda k, s: k[1] != s[1]),
        na_al=pop_response(protocol, pair_coding, t_bins, z_al, 'test',
                           lambda k, s: k[1] != s[1]),
        pair_coding=pair_coding,
    )
Esempio n. 58
0
xnew = np.linspace(-lim_xnew, lim_xnew, npoints_interp)

# %%

#exit()


# %%

from scipy.ndimage import gaussian_filter1d

if 'Curvature' in what2do:

    for data in listOfArrays:
        data[:, profilenumber] = gaussian_filter1d(data[:, profilenumber], 50)

# %%

plt.figure(figsize=(12, 8))

listInterpFunc = []

for data, fname in zip(listOfArrays, listOfFiles):

    f = interp1d(data[:, 0], data[:, profilenumber], kind='cubic')
    listInterpFunc.append(f)

    label = fname.rsplit('/', 1)[1].split('.')[0]

    plt.plot(data[:, 0]*1e6, data[:, profilenumber], 'o', label=label)
                     dateo,
                     ma.masked_invalid(log10(sla_ps_denoised[:, 1:Nk])),
                     arange(1.5, 5.1, 0.25),
                     cmap='CMRmap_r',
                     extend='both',
                     rasterized=True)
    ax.set_xlim([7e-4, 1e-1])
    #ylim([1e1,1e5])
    # xlim([250**-1, 70**-1])
    ax.set_xscale('log')
    ax.yaxis_date()
    ax.grid()
    colorbar(cf, cax=axcb, orientation='horizontal')
    ax.set_xlabel('K (cpkm)')
    ax.set_title('SLA Spectrogram - %s' % rname)
    axcb.set_title(r'log$_{10}$ power density (cm$^2$ / cpkm)')
    subplots_adjust(bottom=0.04, hspace=0.75)
    savefig('figures/sla_ps_full_%s.pdf' % short_name)
    #tight_layout()

    bigfig = figure(figsize=(6.5, 2.8))
    axf = bigfig.add_subplot(111)
    K0 = 25
    axf.semilogy(
        dateo, gaussian_filter1d(sla_ps_denoised[:, K0:K0 + 5].mean(axis=1),
                                 1))
    axf.xaxis_date()
    axf.grid()
    axf.set_ylabel(r'power density (cm$^2$ / cpkm)')
    axf.set_title('SLA Spectral Power at 100 km - %s' % rname)
    savefig('figures/sla_ps_100km_timeseries_%s.pdf' % short_name)
Esempio n. 60
0
def smooth(x, y, sigma=1.0):
    y_new = ndimage.gaussian_filter1d(y, sigma, mode='reflect')
    return x, y_new