Exemplo n.º 1
0
def SFR_vs_time():
    E_sfr, E_time = np.loadtxt('Enzo_SFRvstime.txt', skiprows=1, unpack=True)
    R_sfr, R_time = np.loadtxt('Ramses_SFRvstime.txt', skiprows=1, unpack=True)
    A_sfr, A_time = np.loadtxt('Arepo_SFRvstime.txt', skiprows=1, unpack=True)
    Art_sfr, Art_time = np.loadtxt('Art_SFRvstime.txt',
                                   skiprows=1,
                                   unpack=True)
    F_sfr, F_time = np.loadtxt('Fire_SFRvstime.txt', skiprows=1, unpack=True)

    z_tick_values = [6, 5, 4, 3, 2, 1.5, 1.25,
                     1.0]  #max and min of this are limits of the plot.
    z_tick_value_labels = [' ', '5', '4', '3', '2', '1.5', '1.25', '1.0']
    z_tick_locations = cosmo.lookback_time(z_tick_values).value
    LBT_max = cosmo.lookback_time(np.max(z_tick_values)).value
    LBT_min = cosmo.lookback_time(np.min(z_tick_values)).value

    fig = plt.figure()
    plt.ylabel('SFR  [M$_\odot$ yr$^{-1}$]')
    plt.ylim(0, 90)
    ax1 = fig.add_subplot(111)
    ax2 = ax1.twiny()
    ax1.set_xlabel('Lookback Time [Gyr]')
    ax1.set_xlim(LBT_max, LBT_min)
    ax2.set_xlabel('Redshift')
    ax2.set_xlim(LBT_max, LBT_min)
    ax2.set_xticks(z_tick_locations)
    ax2.set_xticklabels(z_tick_value_labels)

    E_LBtime = cosmo.age(0).value - E_time / 1e9
    ax1.plot(E_LBtime, E_sfr, linewidth=2)
    R_LBtime = cosmo.age(0).value - R_time / 1e9
    ax1.plot(R_LBtime, R_sfr, ':', linewidth=3)
    Art_LBtime = cosmo.age(0).value - Art_time / 1e9
    ax1.plot(Art_LBtime,
             convolve(Art_sfr, Gaussian1DKernel(1)),
             '-',
             linewidth=2)
    A_LBtime = cosmo.age(0).value - A_time / 1e9
    ax1.plot(A_LBtime,
             convolve(A_sfr, Gaussian1DKernel(0.5)),
             '-.',
             linewidth=2)
    F_LBtime = cosmo.age(0).value - F_time / 1e9
    ax1.plot(F_LBtime, F_sfr, linewidth=2)

    #str_z=str(abs(round(E_pf.current_redshift, 1)))
    ax1.legend(("Enzo", "Ramses", "Art", "Arepo", "Gizmo-pSPH"), frameon=False)
    plt.savefig("ALL_SFR_vs_time.png", bbox_inches='tight')
Exemplo n.º 2
0
def pixConvolve(wldata,
                specdata,
                pathstd,
                namestd,
                lam,
                Rdat,
                Rstd,
                headCount,
                band,
                wlstd=None):
    if type(wlstd) != 'numpy.ndarray':
        wlstd, specstd = loadspec(pathstd, namestd, band, headCount)
    else:
        a, specstd = loadspec(pathstd, namestd, band, headCount)
    dellamdata = lam / Rdat
    dellamstd = lam / Rstd

    medpixdata = medianNpix(wldata, specdata, wldata[0],
                            wldata[len(wldata) - 1])
    clipwlstd, specstd, medpixstd = medianNpix(wlstd,
                                               specstd,
                                               wldata[0],
                                               wldata[len(wldata) - 1],
                                               clipspec=True)

    dellamK = np.sqrt(np.abs(dellamstd**2 - dellamdata**2))
    NpixK = dellamK / medpixdata
    Kernel = Gaussian1DKernel(NpixK / 2.355)  #switch to sigma
    smoothed = convolve(specdata, Kernel, boundary='extend')
    #print(NpixK)

    interpsm = interpolate.interp1d(wldata, smoothed, fill_value='extrapolate')
    intsmoothed = interpsm(clipwlstd)
    return clipwlstd, intsmoothed, specstd
Exemplo n.º 3
0
def smoothSpec(wl, spec):
    dellamdata = 1.3 / 3800
    dellamBDSS = 1.3 / 2000
    wlBDSS, specBDSS = np.loadtxt('BDSSlowres_compositeset/2mass0015.dat',
                                  skiprows=6,
                                  unpack=True)
    medpixdata = medianNpix(wl, spec, wl[0], wl[len(wl) - 1])

    dellamK = np.sqrt(np.abs(dellamBDSS**2 - dellamdata**2))
    NpixK = dellamK / medpixdata
    Kernel = Gaussian1DKernel(NpixK / 2.355)  #switch to sigma
    smoothed = convolve(spec, Kernel, boundary='extend')
    #print(NpixK)
    low, hi = wl[0], wl[len(wl) - 1]
    clipstd = specBDSS[(np.around(wlBDSS, decimals=4) >= low)
                       & (np.around(wlBDSS, decimals=4) <= hi)]
    wlclip = wlBDSS[(np.around(wlBDSS, decimals=4) >= low)
                    & (np.around(wlBDSS, decimals=4) <= hi)]
    interpsm = interpolate.interp1d(wl, smoothed, fill_value='extrapolate')
    intsmoothed = interpsm(wlclip)
    plt.figure(1)
    plt.plot(wlBDSS, specBDSS)
    plt.plot(wlclip, intsmoothed)
    #plt.show()
    return wlclip, intsmoothed
Exemplo n.º 4
0
def fullspecConvolve(pathstd,
                     namestd,
                     wldat,
                     specdat,
                     headCount,
                     res,
                     lam,
                     wlstd=None):
    if type(wlstd) != 'numpy.ndarray':
        wlstd, specstd = loadspec(pathstd, namestd, 'H', headCount)
    else:
        a, specstd = loadspec(pathstd, namestd, 'H', headCount)
    dellamdata = lam / 3800
    dellamstd = lam / res

    medpixdata = medianNpix(wldat, specdat, wldat[0], wldat[len(wldat) - 1])

    dellamK = np.sqrt(np.abs(dellamstd**2 - dellamdata**2))
    NpixK = dellamK / medpixdata
    Kernel = Gaussian1DKernel(NpixK / 2.355)  #switch to sigma
    smoothed = convolve(specdat, Kernel, boundary='extend')
    #print(NpixK)
    low, hi = wldat[0], wldat[len(wldat) - 1]
    if low == 1.18:
        hi = 1.35
    clipstd = specstd[(np.around(wlstd, decimals=4) >= low)
                      & (np.around(wlstd, decimals=4) <= hi)]
    wlclip = wlstd[(np.around(wlstd, decimals=4) >= low)
                   & (np.around(wlstd, decimals=4) <= hi)]
    interpsm = interpolate.interp1d(wldat, smoothed, fill_value='extrapolate')
    intsmoothed = interpsm(wlclip)
    return intsmoothed, clipstd, wlclip
Exemplo n.º 5
0
def fit_specfits(file_name, smooth=True, sp=2):
    """
    Fits a blackbody (planckian) to a FITS file containing 1-D spectra.
    Args:
        file_name   : Name of the 1-D spectrum FITS file to fit
        smooth      : Whether or not to smooth the spectra
        sp          : Smoothing parameter to be used if smooth=True
    Returns:
        data_df     : Pandas DataFrame containing 1-D spectra
        popt        : Optimal fit parameters
        pcov        : Covariance of the fit parameters
    """
    wave_data, flux_data = read_1dspec(file_name)
    if smooth:
        flux_data = convolve(flux_data, Gaussian1DKernel(int(sp)))

    popt, pcov = curve_fit(calc_flux,
                           wave_data,
                           flux_data,
                           p0=[guess_amp, guess_temp])

    print("\nBest-Fit Parameters:")
    print("Temp = {0:.2f}+/- {1:.2f}".format(popt[1], pcov[1, 1]))

    data_df = pd.DataFrame()
    data_df['Flux'] = flux_data
    data_df['Wavelength'] = wave_data
    data_df['BBFitFlux'] = calc_flux(data_df['Wavelength'], *popt)
    data_df.to_csv(name_SN + '_BlackBodyFit.dat',
                   sep=' ',
                   index=False,
                   header=True)

    return data_df, popt, pcov
Exemplo n.º 6
0
def ssp_cvd(linelist):
    path = '/Users/alexa/Dropbox (ConroyAstro)/alf/empirical SSPs'
    models = glob.glob('{}/*0.ssp'.format(path))
    models.append('{}/CvD_t13.5.ssp'.format(path))
    fname = 'ssp_ews_cvd_v4.txt'

    with open(fname, 'w+') as f:
        for i, model in enumerate(models):
            age = model.strip(path).strip('.ssp').strip('CvD_t')
            spec = np.loadtxt(model)
            gauss = Gaussian1DKernel(stddev=4.8)
            dspec = convolve(spec[:, 3], gauss)

            model = Star(None, None, None, dspec, spec[:, 0])
            line_names, line_strengths, upper, lower = get_equiv_widths(
                linelist, model)

            if (i == 0):
                f.write("# Age ")
                for name in line_names:
                    f.write("{:15}".format(name))
                f.write("\n")
            f.write("{0:1.4}".format(age))
            for value in line_strengths:
                f.write("{0:15.5f}".format(value))
            f.write("\n")

    subprocess.call(
        ["mv", fname, "/Users/alexa/NonSolarModels/TheModels/LickIndices"])
Exemplo n.º 7
0
def get_coarser_wavelength_fsps(wave, spec, redwave=1e5):
    '''
    smooth the spectrum with a gaussian kernel to improve 
    computational efficiency

    only affects the wavelength grid
    (the age and metallicity grids remain unchanged)

    Parameters
    ----------
    wave : numpy array (1d)
        initial wavelength grid
    spec : numpy array
        initial SSP grid over (wave, age, metallicity)
    redwave : float
        red wavelength cutoff (in Angstroms)
    '''
    sel = np.where((wave > 500) * (wave < redwave))[0]
    spec = spec[sel, :]
    wave = wave[sel]
    G = Gaussian1DKernel(25)
    nsel = np.where(np.abs(np.diff(wave) - 0.9) < 0.5)[0]
    for i in np.arange(spec.shape[1]):
        spec[nsel, i] = convolve(spec[nsel, i], G)
    ndw = 12.
    nw = np.arange(wave[nsel[0]], wave[nsel[-1] + 1], ndw)
    nwb = np.hstack([nw, wave[nsel[-1] + 1]])
    nwave = np.hstack([wave[:nsel[0]], nw, wave[(nsel[-1] + 1):]])
    nspec = np.zeros((len(nwave), spec.shape[1]))
    for i, sp in enumerate(spec.swapaxes(0, 1)):
        hsp = (np.histogram(wave[nsel], nwb, weights=sp[nsel])[0] /
               np.histogram(wave[nsel], nwb)[0])
        nspec[:, i] = np.hstack([sp[:nsel[0]], hsp, sp[(nsel[-1] + 1):]])
    return nwave, nspec
Exemplo n.º 8
0
def acf(times, yvals):
    """ 
    computes the autocorrelation function for an evenly-sampled time-series 
    """
    cadence = np.median(np.diff(times))
    N = len(yvals)
    max_lag = N / 2

    median_yval = np.median(yvals)
    norm_term = np.sum((yvals - median_yval)**2)
    lags = np.arange(max_lag)

    #print median_yval,norm_term,max_lag

    ACF0 = [
        np.sum((yvals[:N - j] - median_yval) * (yvals[j:] - median_yval))
        for j in lags
    ]
    ACF1 = ACF0 / norm_term

    # smooth the ACF
    gauss_kernel = Gaussian1DKernel(18, x_size=55)
    ACF = ap_convolve(ACF1, gauss_kernel, boundary="extend")
    #ACF = ACF1

    periods = cadence * lags

    return periods, ACF
Exemplo n.º 9
0
def plot_epoch(ax_obj, file_name, index, master_df, offset=0.8e-15, smooth=False, sp=2):
    """
    Plots the spectrum with line identified and labelled accordingly.
    Args:
        ax_obj      : Axes object to be used for plotting and setting plot parameters
        file_name   : Name of the 1-D Spectrum FITS file to be plotted
        index       : Index of the file 'file_name' which is to be plotted
        master_df   : Master Pandas DataFrame from which the label is to be read
        offset      : Offset in Flux units to be applied to the spectra
        smooth      : Should the spectrum be smoothened before plotting?
        sp          : Smoothing parameter to be used for Gaussian Kernel
    Returns:
        None
    """
    data_df = pd.read_csv(file_name, names=['Wavelength', 'Flux'], sep='\s+', dtype='float64')
    data_df['Flux'] -= offset * index

    if smooth:
        data_df['Flux'] = convolve(data_df['Flux'].tolist(), Gaussian1DKernel(int(sp)))

    if data_df['Wavelength'].max() >= int(upper_lim):
        data_df = data_df[(data_df['Wavelength'] >= int(lower_lim)) & (data_df['Wavelength'] <= int(upper_lim))]
    else:
        data_df = data_df[(data_df['Wavelength'] >= int(lower_lim)) &
                          (data_df['Wavelength'] <= data_df['Wavelength'].max() - 30)]

    ax_obj.plot(data_df['Wavelength'], data_df['Flux'], linewidth=1, label=None)
    ax_obj.text(x=data_df['Wavelength'].values[-1] + 50, y=data_df['Flux'].values[-1],
                s=master_df.loc[index, 'Label'], fontsize=10)
Exemplo n.º 10
0
def filter_1D(data, std, dim="time", dtype=None):
    if dtype is None:
        dtype = detect_dtype(data)

    kernel = Gaussian1DKernel(std)

    def smooth_raw(data):
        raw_data = getattr(data, "values", data)
        result = convolve_fft(raw_data, kernel, boundary="wrap")
        result[np.isnan(raw_data)] = np.nan
        return result

    def temporal_smoother(data):
        dims = [dim]
        return xr.apply_ufunc(
            smooth_raw,
            data,
            vectorize=True,
            dask="parallelized",
            input_core_dims=[dims],
            output_core_dims=[dims],
            output_dtypes=[dtype],
        )

    return temporal_smoother(data)
Exemplo n.º 11
0
def gsmooth(data,
            fwhm,
            mask=None,
            boundary='extend',
            fill=0.0,
            truncate=4.0,
            squared=False):
    # astropy.convolve automatically ignores NaNs
    # Create kernel
    xsize = np.ceil(fwhm / 2.35 * truncate * 2)
    if xsize % 2 == 0: xsize += 1  # must be odd
    g = Gaussian1DKernel(stddev=fwhm / 2.35, x_size=xsize)
    if squared is False:
        return convolve(data,
                        g.array,
                        mask=mask,
                        boundary=boundary,
                        fill_value=fill)
        #return gaussian_filter1d(data,fwhm/2.35,axis=axis,mode=mode,cval=cval,truncate=truncate)
    else:
        return convolve(data,
                        g.array**2,
                        mask=mask,
                        boundary=boundary,
                        fill_value=fill,
                        normalize_kernel=False)
Exemplo n.º 12
0
def get_gaussian_blur(centroid_vector, blur, sigma):
    """
        Apply gaussian smoothing to tonal model centroids

        Parameters
        ----------
        centoid_vector: list
        tonal centroids of the tonal model

        sigma: number (scalar > 0) optional
        sigma of gaussian smoothing value. Default 11

        Returns
        -------
        list
        centroids blurred by gassuian smoothing
    """
    if blur == 'full':
        centroid_vector = gaussian_filter(centroid_vector, sigma=sigma)
    elif blur == '17-points':
        gauss_kernel = Gaussian1DKernel(17)
        i = 0
        for centroid in centroid_vector:
            centroid = convolve(centroid, gauss_kernel)
            centroid_vector[i] = centroid
    return numpy.array(centroid_vector)
Exemplo n.º 13
0
def voigt_abs(pars, t):
    """
    f_obs = f_source * e ^-tau
    """
    N, sigma, z, osc, l0, gamma, resolution_fwhm = pars
    # Move wavelength array to rest
    t = t.copy() / (1 + z)
    # Center wavlength array at line
    t_rest = t - l0
    # Convert to km/s
    t = c * t_rest / t
    # Get optical depth
    tau = voigt_tau(t, N, osc, l0, sigma, gamma)
    # Absorb flux
    flux = np.exp(-tau)
    # Correct for instrumental resolution
    if resolution_fwhm > 0.:
        # Get transformation from km/s to pixels for convolution
        mask = (t > -5000) & (t < 5000)
        transform = np.median(np.diff(t)[mask[:-1]])
        if np.isnan(transform):
            transform = 1e3
        # print(transform)
        # Convolve with linespread-function
        flux = convolve(
            flux,
            Gaussian1DKernel(stddev=resolution_fwhm / (sigfwhm * transform),
                             mode="oversample"))
    return flux
Exemplo n.º 14
0
def plot_modspec(ax_obj, file_name, phase='0d', offset=0, z=r'1 $\rm Z_{\odot}$'):
    data_df = pd.read_csv(file_name, sep='\s+', engine='python', header=None, comment='#')
    data_df = data_df[(data_df[0] > 3800)]
    data_df[1] = data_df[1] / data_df[1].mean()

    if file_name[0:3] != 'rfz':
        label = file_name.split('/')[-2]
        color = 'k'
        ax_obj.text(5250, offset + 0.4, s=label + ' +' + phase, color=color, fontsize=10)
        ax_obj.text(7000, offset + 0.3, s=z, color=color, fontsize=10)

    else:
        label = name_SN
        color = 'r'
        ax_obj.text(7250, 0.3, s=label + ' +' + phase, color=color, fontsize=10)
        data_df[1] = convolve(data_df[1].tolist(), Gaussian1DKernel(3))

        for index, row in data_df.iterrows():
            if 6500 < row[0] < 7500:
                row[1] = row[1] * 1.15
            if 8200 > row[0] >= 7500:
                row[1] = row[1] * 0.8
            elif row[0] >= 8200:
                row[1] = row[1] * 0.5

    ax_obj.plot(data_df[0], np.log10(data_df[1]) + offset, linewidth=1, c=color, alpha=0.8, label=label + ' ' + phase)
Exemplo n.º 15
0
def smooth_1dspec(common_text, sp=1.2, kernel='gaussian', prefix_str='z_', plot=False):
    """
    Smoothens a 1-D spectra based on the smoothening parameter. Smoothening parameter
    is 'std.dev.' in case of isotropic Gaussian filter and is 'width' in the case of the
    non-isotropic box filter.
    Args:
        common_text : Common text of 1-D spectra files which have to be smoothened
        sp          : Smoothening parameter
        kernel      : Convolution Kernel used for smoothening (Gaussian or Box)
        prefix_str  : Prefix to distinguish the smoothened 1-D spectra from the original
        plot        : Boolean describing whether the smoothened spectra has to be plotted
    Returns:
        None
    """
    list_spectra = group_similar_files('', common_text=common_text)
    usable_kernel = Gaussian1DKernel(int(sp))

    if kernel.lower() != 'gaussian':
        if kernel.lower() == 'box':
            usable_kernel = Box1DKernel(int(sp))
        else:
            print ("Error: Kernel '{0}' Not Recognised".format(kernel))
            sys.exit(1)

    for file_name in list_spectra:
        wav_data, flux_data = read_1dspec(file_name)
        smoothed_data = convolve(flux_data, usable_kernel)
        write_1dspec(ref_filename=file_name, flux_array=smoothed_data, prefix_str=prefix_str)

        if plot:
            plt.plot(wav_data, flux_data, 'g', label='Original Spectrum')
            plt.plot(wav_data, smoothed_data, 'r', label='Smooth Spectrum')
            plt.legend()
            plt.show()
            plt.close()
Exemplo n.º 16
0
 def generate_observed_kin_map(self):
     self.vel_obs = np.zeros((self.xsize, self.ysize)) * np.nan
     self.evel_obs = np.zeros((self.xsize, self.ysize)) * np.nan
     self.disp_obs = np.zeros((self.xsize, self.ysize)) * np.nan
     self.edisp_obs = np.zeros((self.xsize, self.ysize)) * np.nan
     self.ha_obs = np.zeros((self.xsize, self.ysize)) * np.nan
     for i in arange(self.blrcube.shape[1]):
         for j in arange(self.blrcube.shape[2]):
             krnl = Gaussian1DKernel(1)
             spec = convolve_fft(self.blrcube[:, i, j], krnl)
             try:
                 c_a, v_a = curve_fit(gauss,
                                      self.vscale,
                                      spec,
                                      p0=[
                                          nanmax(spec),
                                          self.vscale[nanargmax(spec)], 30,
                                          median(spec[0:15])
                                      ])
                 if (isfinite(sqrt(v_a[1,1]))) & (c_a[2] > 0.) & (isfinite(sqrt(v_a[2,2]))) & \
                 (c_a[0] > 0) & (c_a[2] > 10) & (sqrt(v_a[2,2]) < 30) & (sqrt(v_a[1,1]) < 30):
                     self.vel_obs[i, j] = c_a[1]
                     self.evel_obs[i, j] = sqrt(v_a[1, 1])
                     self.disp_obs[i, j] = c_a[2]
                     self.edisp_obs[i, j] = sqrt(v_a[2, 2])
                     self.ha_obs[i, j] = c_a[0] * c_a[2] * sqrt(2 * pi)
             except:
                 pass
     self.vel_obs[-isnan(self.vel_obs)] += np.random.normal(
         0, 5, shape(self.vel_obs[-isnan(self.vel_obs)]))
     self.disp_obs[-isnan(self.disp_obs)] += np.random.normal(
         0, 5, shape(self.vel_obs[-isnan(self.vel_obs)]))
Exemplo n.º 17
0
def smooth_magseries_gaussfilt(mags, windowsize, windowfwhm=7):
    '''This smooths the magseries with a Gaussian kernel.

    Parameters
    ----------

    mags : np.array
        The input mags/flux time-series to smooth.

    windowsize : int
        This is a odd integer containing the smoothing window size.

    windowfwhm : int
        This is an odd integer containing the FWHM of the applied Gaussian
        window function.

    Returns
    -------

    np.array
        The smoothed mag/flux time-series array.

    '''

    convkernel = Gaussian1DKernel(windowfwhm, x_size=windowsize)
    smoothed = convolve(mags, convkernel, boundary='extend')
    return smoothed
Exemplo n.º 18
0
    def processTemplate(self,temp_file,instru_fwhm_nm=4.998446e-04):
        """
        Performs broadening of the template based on instrumental fwhm.

        Parameters
        ----------
        temp_file (string):
            Full absolute path of the template file 
        instru_fwhm_nm (float):
            The instrument wavelength resolution (in same units as temp_file)
        
        Returns
        --------
            float, float:
                Returns wavelengths from the temp_file and flux now broadened
        """
        print("Now reading {}".format(str(temp_file)))
        #f=str(f)
        temp=fits.open(temp_file)
        wave_temp=temp[1].data['Wavelength']
        flux=temp[1].data['flux']
        #flux=(flux-np.min(flux))/(np.max(flux)-np.min(flux))
        Teff=temp_file.split('/')[-1].split('-')[0][-4:]
        logg=temp_file.split('/')[-1].split('-')[1]
        instru_fwhm_nm = instru_fwhm_nm#4.998446e-04#0.67 #mum

        BT_SETTL_res = wave_temp[1]-wave_temp[0]#0.005   #mum
        instru_fwhm_BTSETTL = instru_fwhm_nm/BT_SETTL_res
        gaus_BTSETTL = Gaussian1DKernel(stddev=instru_fwhm_BTSETTL*gaussian_fwhm_to_sigma)

        kernel = np.array(gaus_BTSETTL)
        temp_conv = np.convolve(flux,kernel,'same') / sum(kernel)
        #filt=savgol_filter(temp_conv,101,polyorder=3)
        return temp_conv,wave_temp
Exemplo n.º 19
0
def _smooth_acf(acf, windowfwhm=7, windowsize=21):
    '''This returns a smoothed version of the ACF.

    Convolves the ACF with a Gaussian of given `windowsize` and `windowfwhm`.

    Parameters
    ----------

    acf : np.array
        The auto-correlation function array to smooth.

    windowfwhm : int
        The smoothing window Gaussian kernel's FWHM .

    windowsize : int
        The number of input points to apply the smoothing over.

    Returns
    -------

    np.array
        Smoothed version of the input ACF array.

    '''

    convkernel = Gaussian1DKernel(windowfwhm, x_size=windowsize)
    smoothed = convolve(acf, convkernel, boundary='extend')

    return smoothed
Exemplo n.º 20
0
def convolve_1d_spectrum(input_lambda, input_flux, output_spec_res):
    '''Function that convolves a sky spectrum with a Gaussian to 
	match the input cube spectral resolution.
	
	Inputs:
		input_lambda: input sky spectrum lambda
		input_flux: input sky spectrum flux
		output_spec_res: Spectral resolution of the convolved sky spectrum output 
		
	Outputs:
		convolved sky spectrum
	'''

    sky_resolution = np.abs(input_lambda[1] - input_lambda[0])

    if output_spec_res > sky_resolution:
        logging.info("Convolve sky to input cube spectral resolution")

        new_res_pix = (output_spec_res**2 - sky_resolution**
                       2)**0.5 / np.abs(input_lambda[1] - input_lambda[0])
        sigma_LSF_pix = new_res_pix / 2.35482
        if sigma_LSF_pix < 1.:  # avoid convolution with a kernel narrower than 1 pixel
            return input_flux
        npix_LSF = int(sigma_LSF_pix * config_data['LSF_kernel_size'])
        kernel_LSF = Gaussian1DKernel(stddev=sigma_LSF_pix, x_size=npix_LSF)
        return np.convolve(input_flux, kernel_LSF, mode="same")

    else:
        logging.warning(
            "Sky spectral resolution (R = {:.0f}) is lower than the input cube spectral resolution (R = {:.0f})"
            .format(
                np.median(input_lambda) / sky_resolution,
                np.median(input_lambda) /
                output_spec_res if output_spec_res != 0 else np.inf))
        return input_flux
Exemplo n.º 21
0
def ySZ_convolved(r, z, M500, P0, c500, fwhm_beam, cosmo=None, Dv=500):
    """ 
    ySZ_convolved(r,z,M500,logP0,c500, fwhm_beam, cosmo=None,Dv=500)
    ---------
    observed y profile - Equation (3)
    real y is convolved with the beam. when done in angular units it's simple; 
    when done is physical, should we just convert psf size from arcmin to Mpc?
    """
    #from scipy.ndimage.filters import gaussian_filter1d
    from astropy.convolution import Gaussian1DKernel, convolve
    if cosmo is None:
        cosmo = FlatLambdaCDM(
            H0=70,
            Om0=0.3,
        )
    # convert the beam from angular (theta) to comoving (r). This is likely wrong but what's been used.
    psf = fwhm_beam / np.sqrt(8. * np.log(2.))  # in arcmin
    psf_r = (psf * cosmo.kpc_comoving_per_arcmin(z)).to(u.Mpc)  # in Mpc
    # make the y profile
    y = ySZ_r(r, z, M500, P0, c500, cosmo=cosmo, Dv=Dv)
    ind = np.isfinite(y)
    y_filtered = np.zeros(y.shape) * np.nan
    # convolve y and beam. convolution is done unitless
    dr = r.max() - r.min() / (len(r) - 1
                              )  # this is only good for linear binning?
    g = Gaussian1DKernel(stddev=psf_r / dr)
    y_filtered[ind] = convolve(y[ind], g, 'fill', fill_value=np.nan)
    #y_filtered[ind] = gaussian_filter1d(y[np.isfinite(y)],psf_r.value,mode='constant')
    return y_filtered
Exemplo n.º 22
0
def filter_1D(data, std, dim="time", dtype=None):
    if astropy is None:
        raise RuntimeError(
            "Module `astropy` not found. Please install optional dependency with `conda install -c conda-forge astropy"
        )

    if dtype is None:
        dtype = detect_dtype(data)

    kernel = Gaussian1DKernel(std)

    def smooth_raw(data):
        raw_data = getattr(data, "values", data)
        result = convolve_fft(raw_data, kernel, boundary="wrap")
        result[np.isnan(raw_data)] = np.nan
        return result

    def temporal_smoother(data):
        dims = [dim]
        return xr.apply_ufunc(
            smooth_raw,
            data,
            vectorize=True,
            dask="parallelized",
            input_core_dims=[dims],
            output_core_dims=[dims],
            output_dtypes=[dtype],
        )

    return temporal_smoother(data)
Exemplo n.º 23
0
    def multVoigt(vv=velocity, a_1=a_1, a1_1=a1_1, a2_1=a2_1, a_2=a_2, a1_2=a1_2, a2_2=a2_2,
                  f=f, gamma=gamma, l0=l0,
                  nvoigts=nvoigts, vars_dic=vars_dic):

        model_matrix = []

        for i in [0, 1]:
          conv_val = RES / (2 * np.sqrt(2 * np.log(2)) * tf[i])
          gauss_k = Gaussian1DKernel(stddev=conv_val, mode="oversample")
  
          if i == 0:
              flux = np.ones(len(vv[i])) * a_1 #(a_1 + a1_1 * vv[i] + a2_1 * (power_lst(vv[i], 2)))
          if i == 1:
              flux = np.ones(len(vv[i])) * a_2 # (a_2 + a1_2 * vv[i] + a2_2 * (power_lst(vv[i], 2)))

          for j in range(1, nvoigts + 1):
              v = vv[i] - vars_dic["v0" + str(j)]
              flux *= add_abs_velo(v, vars_dic["N" + str(j)],
                                   vars_dic["b" + str(j)], gamma[i], f[i], l0[i])
  
          #model_matrix.append(flux)
          model_matrix.append(np.convolve(flux, gauss_k, mode='same'))


        #print a_1, a1_1, a2_1, a_2, a1_2, a2_2
        #print vars_dic
        #print model_matrix
        return model_matrix
Exemplo n.º 24
0
def smooth_days(x, y, kernel, ksize, fix):
    """Smooth data (x, y) given parameters kernel, ksize, fix"""
    kernel = kernel.lower()
    if kernel == 'none' or not ksize:
        return x, y
    if kernel == 'box':
        from astropy.convolution import convolve, Box1DKernel
        ysm = convolve(y, Box1DKernel(ksize), boundary='extend')
    elif kernel == 'gaussian':
        from astropy.convolution import convolve, Gaussian1DKernel
        ysm = convolve(y, Gaussian1DKernel(ksize), boundary='extend')
    elif kernel == 'trapezoid':
        from astropy.convolution import convolve, Trapezoid1DKernel
        ysm = convolve(y, Trapezoid1DKernel(ksize), boundary='extend')
    elif kernel == 'triangle':
        from astropy.convolution import convolve, CustomKernel
        f = triangle(ksize)
        ysm = convolve(y, CustomKernel(f), boundary='extend')
    else:
        raise ValueError('{} not supported.'.format(kernel))
    redo = int(np.floor(ksize / 2))
    xsm = x
    if fix == 'cull':
        _sfs = slice(0, len(ysm) - redo)
        xsm = x[_sfs]
        ysm = ysm[_sfs]
    elif fix == 'redo':
        for i in range(len(y) - redo, len(y)):
            ave = 0.0
            cnt = 0
            for j in range(i, len(y)):
                ave += y[j]
                cnt += 1
            ysm[i] = (ave / cnt + ysm[i]) / 2.0
    return xsm, ysm
Exemplo n.º 25
0
def model_profile(theta,wave,line):
	voigt_one, voigt_Full=create_model_simple(theta,wave,line)	
	COS_kernel=(6.5/2.355)/3. #6.5 pixels
	g = Gaussian1DKernel(stddev=COS_kernel)
	# Convolve data
	fmodel = convolve(voigt_Full, g,boundary='extend')	
	return fmodel,voigt_one
Exemplo n.º 26
0
    def set_spectral_resolution(self, spectral_resolution):
        """
        Set the spectral resolution by convolution with a Gaussian.

        Parameters
        ----------
        spectral_resolution : `~astropy.units.Quantity`
            The spectral resolution.

        Returns
        -------
        spec_new : `~speclib.Spectrum`
            A spectrum with the desired spectral resolution.
        """
        from astropy.convolution import convolve, Gaussian1DKernel

        # # First, check if grid spacing is regular
        # delta_lambdas = np.unique(self.wavelength.diff())
        delta_lambda = np.unique(self.wavelength.diff()).min()

        kernel_size = (spectral_resolution /
                       delta_lambda).value  # resolution elements
        kernel = Gaussian1DKernel(kernel_size)
        convolved_flux = convolve(self.flux, kernel, boundary="extend")
        spec_new = Spectrum(spectral_axis=self.wavelength, flux=convolved_flux)

        return spec_new
Exemplo n.º 27
0
def smooth_flux(flux, width=10, kernel="boxcar"):

    if kernel == "boxcar" or kernel == "Boxcar":
        kernel = Box1DKernel(width)
    elif kernel == "gaussian" or kernel == "Gaussian":
        kernel = Gaussian1DKernel(width)

    return convolve(flux, kernel)
Exemplo n.º 28
0
def calc_conc(fe, ofe, age, dt=0.2, n_width=0.68, w_width=0.999, stddev=10, **kw):
    dt = dt

    g = Gaussian1DKernel(stddev=stddev)

    age_space = np.arange(0, 14, 2 * dt)
    age_space_cent = 0.5 * (age_space[1:] + age_space[:-1])
    width_fe_n = np.zeros_like(age_space_cent)
    width_fe_w = np.zeros_like(age_space_cent)
    width_ofe_n = np.zeros_like(age_space_cent)
    width_ofe_w = np.zeros_like(age_space_cent)
    conc_fe = np.zeros_like(age_space_cent)
    conc_ofe = np.zeros_like(age_space_cent)
    disp_sfh = np.zeros_like(age_space_cent)
    limits_fe_n = np.zeros((2, len(age_space_cent)))
    limits_fe_w = np.zeros((2, len(age_space_cent)))
    limits_ofe_n = np.zeros((2, len(age_space_cent)))
    limits_ofe_w = np.zeros((2, len(age_space_cent)))
    # limits_sfh = np.zeros((2, len(age_space_cent)))

    for t, i in zip(age_space_cent, range(len(age_space_cent))):
        idxs_age = np.where(abs(age - t) < dt)

        # limits_fe_n[:, i] = limits(fe[idxs_age], low=0.5-n_width/2., high=0.5+n_width/2., bins=500)[:]
        # limits_fe_w[:, i] = limits(fe[idxs_age], low=0.5-w_width/2., high=0.5+w_width/2., bins=500)[:]
        try:
            limits_fe_n[:, i] = np.percentile(fe[idxs_age], [50.-n_width*50., 50.+n_width*50.])
            limits_fe_w[:, i] = np.percentile(fe[idxs_age], [50.-w_width*50., 50.+w_width*50.])
        except:
            limits_fe_n[:, i] = [0., 0.]
            limits_fe_w[:, i] = [0., 0.]
        width_fe_n[i] = limits_fe_n[1, i] - limits_fe_n[0, i]
        width_fe_w[i] = limits_fe_w[1, i] - limits_fe_w[0, i]
        conc_fe[i] = width_fe_n[i]/width_fe_w[i]

        # limits_ofe_n[:, i] = limits(ofe[idxs_age], low=0.5-n_width/2., high=0.5+n_width/2., bins=500)[:]
        # limits_ofe_w[:, i] = limits(ofe[idxs_age], low=0.5-w_width/2., high=0.5+w_width/2., bins=500)[:]
        try:
            limits_ofe_n[:, i] = np.percentile(ofe[idxs_age], [50.-n_width*50., 50.+n_width*50.])
            limits_ofe_w[:, i] = np.percentile(ofe[idxs_age], [50.-w_width*50., 50.+w_width*50.])
        except:
            limits_ofe_n[:, i] = [0., 0.]
            limits_ofe_w[:, i] = [0., 0.]
        width_ofe_n[i] = limits_ofe_n[1, i] - limits_ofe_n[0, i]
        width_ofe_w[i] = limits_ofe_w[1, i] - limits_ofe_w[0, i]
        conc_ofe[i] = width_ofe_n[i]/width_ofe_w[i]

        if 'sfh' in kw:
            z = convolve(kw['sfh'], g)
            idxs_sfh = np.where(abs(kw['sfh_age'] - t) < dt)
        # limits_sfh[:, i] = limits(sfh[idxs_sfh], low=0.1, high=0.9, bins=500)[:]
            disp_sfh[i] = np.std(kw['sfh'][idxs_sfh]-z[idxs_sfh])  #/np.mean(z[idxs_sfh])

    if 'sfh' in kw:
        return conc_fe, conc_ofe, disp_sfh, z, limits_fe_n[1,:], (limits_ofe_n[0,:]+limits_ofe_n[1,:])/2., width_fe_w, width_ofe_w

    else: 
        return conc_fe, conc_ofe, limits_fe_n[1,:], (limits_ofe_n[0,:]+limits_ofe_n[1,:])/2., width_fe_w, width_ofe_w
Exemplo n.º 29
0
def make_test_cube(shape=(30, 9, 9), outfile='test.fits', sigma=None, seed=0):
    """
    Generates a simple gaussian cube with noise of
    given shape and writes it as a fits file.
    """
    from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel
    if sigma is None:
        sigma1d, sigma2d = shape[0] / 10., np.mean(shape[1:]) / 5.
    else:
        sigma1d, sigma2d = sigma

    gauss1d = Gaussian1DKernel(stddev=sigma1d, x_size=shape[0])
    gauss2d = Gaussian2DKernel(stddev=sigma2d,
                               x_size=shape[1],
                               y_size=shape[2])
    test_cube = gauss1d.array[:, None, None] * gauss2d.array
    test_cube = test_cube / test_cube.max()
    # adding noise:
    np.random.seed(seed)
    noise_cube = (np.random.random(test_cube.shape)-.5)* \
                        np.median(test_cube.std(axis=0))
    test_cube += noise_cube
    true_rms = noise_cube.std()

    # making a simple header for the test cube:
    test_hdu = fits.PrimaryHDU(test_cube)
    # the strange cdelt values are a workaround
    # for what seems to be a bug in wcslib:
    # https://github.com/astropy/astropy/issues/4555
    cdelt1, cdelt2, cdelt3 = -(4e-3 + 1e-8), 4e-3 + 1e-8, -0.1
    keylist = {
        'CTYPE1': 'RA---GLS',
        'CTYPE2': 'DEC--GLS',
        'CTYPE3': 'VRAD',
        'CDELT1': cdelt1,
        'CDELT2': cdelt2,
        'CDELT3': cdelt3,
        'CRVAL1': 0,
        'CRVAL2': 0,
        'CRVAL3': 5,
        'CRPIX1': 9,
        'CRPIX2': 0,
        'CRPIX3': 5,
        'CUNIT1': 'deg',
        'CUNIT2': 'deg',
        'CUNIT3': 'km s-1',
        'BUNIT': 'K',
        'EQUINOX': 2000.0
    }
    # write out some values used to generate the cube:
    keylist['SIGMA'] = abs(sigma1d * cdelt3), 'in units of CUNIT3'
    keylist['RMSLVL'] = true_rms
    keylist['SEED'] = seed

    test_header = fits.Header()
    test_header.update(keylist)
    test_hdu = fits.PrimaryHDU(data=test_cube, header=test_header)
    test_hdu.writeto(outfile, clobber=True, checksum=True)
Exemplo n.º 30
0
def smoothing(blue_path, red_path, tell_wave, tell_flux, hi, lo, mid, mid2, pre):
    #Import co-added target spectra
    blue_coadd1d = fits.open(blue_path)
    blue_dat = blue_coadd1d[1].data
    blue_coadd1d.close()

    red_coadd1d = fits.open(red_path)
    red_dat = red_coadd1d[1].data
    red_coadd1d.close()

    #Get wavelengths and fluxes
    blue_wave = blue_dat['wave']
    red_wave = red_dat['wave']
    blue_ivar = blue_dat['ivar']
    red_ivar = red_dat['ivar']

    flux = np.copy(tell_flux)
    total_wave = np.concatenate((blue_wave, red_wave))
    noise = 1/np.sqrt(np.concatenate((blue_ivar, red_ivar)))

    #Get resolution
    resb, resr = tell.lris_res(blue_wave, red_wave)
    total_res = np.concatenate((resb, resr))

    #Get pixel positions of noisy telluric spikes
    spikes_hi, spikes_lo, spikes_mid, spikes_mid2, spikes_early = spikes(total_wave, flux, hi, lo, mid, mid2, pre)
    bad_pixels = []
    for i in range(len(spikes_hi)):
        for j in range(len(spikes_hi[i])):
            bad_pixels.append(spikes_hi[i][j])
    for i in range(len(spikes_lo)):
        for j in range(len(spikes_lo[i])):
            bad_pixels.append(spikes_lo[i][j])
    for i in range(len(spikes_mid)):
        bad_pixels.append(spikes_mid[i])
    for i in range(len(spikes_mid2)):
        bad_pixels.append(spikes_mid2[i])
    for i in range(len(spikes_early)):
        bad_pixels.append(spikes_early[i])

    bad_pixels = np.sort(list(set(bad_pixels))) #Remove duplicates

    #Interpolate over noisy spikes
    flux[bad_pixels] = np.nan
    noise[bad_pixels] = np.nan

    kernel = Gaussian1DKernel(stddev = 1)
    intp_flux = interpolate_replace_nans(flux, kernel, convolve = convolve_fft)
    intp_noise = interpolate_replace_nans(noise, kernel, convolve = convolve_fft)

    #Smooth by 200km/s
    c = 299792.458 #speed of light
    in_sigma_kms = 200
    sigma_aa_desired = in_sigma_kms/c*total_wave
    smoothed_flux = utils.smoothing.smoothspec(total_wave, intp_flux, outwave=total_wave, smoothtype='lsf', resolution=sigma_aa_desired)
    smoothed_noise = utils.smoothing.smoothspec(total_wave, intp_noise, outwave=total_wave, smoothtype='lsf', resolution=sigma_aa_desired)
    return total_wave, smoothed_flux, smoothed_noise, bad_pixels