Example #1
0
 def get_windows(self):
     rectangle_windows = [
         boxcar(self.rc_lengths[0]),
         boxcar(self.rc_lengths[1])
     ]
     hanning_windows = hanning(self.hn_length)
     hamming_windows = hamming(self.hm_length)
     self.windows = [rectangle_windows, hanning_windows, hamming_windows]
Example #2
0
def lperiodgram(psd, dofw = 1, alpha = 0.05, Nens = 2, Nband = 1, smoo = True, ax = -1):
    """
    Computes a smothed or binned late periodgram with the no-overlap Welch method and/or Band averaging
    for a given array of PSD, and outputs an average PSD along axis ax and its statistics in a tuple.
    """
    if smoo == True:
        #N = np.floor(psd.shape[ax]/int(Nens))
        N = np.floor(int(Nens))
        win = windows.boxcar(N)
        win = win/win.sum()
        win.resize((N,) + tuple(np.int8(np.ones(psd.ndim - 1))))
        if ax != 0 and ax != -1:
            win = np.rollaxis(win, 0, start = ax + 1)
        elif ax != 0 and ax == -1:
            win = np.rollaxis(win, 0, start = psd.ndim)
        elif ax == 0:
            win = win
        else:
            raise ValueError, "Pick your axis better."

        mpsd = sci_fftconvolve(psd, win, mode = 'same')
    else:
        mpsd = binav(psd, bins = Nens, ax = ax)
    
    if Nband > 1:
        if Nband % 2 != 1:
            Nband = Nband + 1
        
        wbd = windows.boxcar(Nband)
        wbd = wbd / wbd.sum()
        wbd.resize((Nband,) + tuple(np.int8(np.ones(mpsd.ndim - 1))))
        if ax != 0 and ax != -1:
            wbd = np.rollaxis(wbd, 0, start = ax + 1)
        elif ax != 0 and ax == -1:
            wbd = np.rollaxis(wbd, 0, start = mpsd.ndim)
        elif ax == 0:
            wdb = wbd
        else:
            raise ValueError, "Pick your axis better."
        
        mpsd = sci_fftconvolve(mpsd, wbd, mode = 'same')
    
    dof = 2*Nens*Nband*dofw # for non-overlaping segments
    psd_hi = (dof * mpsd) / (stats.chi2.ppf(.5 * alpha, dof))
    psd_lo = (dof * mpsd) / (stats.chi2.ppf(1-(alpha/2), dof))
    loci = np.log10(dof / stats.chi2.ppf(1-(alpha/2), dof))
    hici = np.log10(dof / stats.chi2.ppf(.5 * alpha, dof))
    mpsd = mpsd
    Stats = tuple([psd_lo, psd_hi, loci, hici, dof])
    return mpsd, Stats
Example #3
0
def rolling_mean_np(arr, win, center=True, win_type='boxcar'):
    import scipy.signal.windows as spwin

    df = pd.DataFrame(data=arr.reshape((arr.shape[0], arr[0].size)))

    if win_type == 'gaussian':
        w_std = win / 3.
        print('Performing {} day rolling mean with gaussian window (std={})'
              ' to get better interannual statistics'.format(win, w_std))
        fig, ax = plt.subplots(figsize=(3, 3))
        ax.plot(range(-int(win / 2), +round(win / 2 + .49)),
                spwin.gaussian(win, w_std))
        plt.title('window used for rolling mean')
        plt.xlabel('timesteps')
        rollmean = df.rolling(win,
                              center=center,
                              min_periods=1,
                              win_type='gaussian').mean(std=w_std)
    elif win_type == 'boxcar':
        fig, ax = plt.subplots(figsize=(3, 3))
        plt.plot(spwin.boxcar(win))
        plt.title('window used for rolling mean')
        plt.xlabel('timesteps')
        rollmean = df.rolling(win,
                              center=center,
                              min_periods=1,
                              win_type='boxcar').mean()

    return rollmean.values.reshape((arr.shape))
Example #4
0
    def CalcSteerVect(num_channels: int = 48, beam_center: float = 0.0, sll: float = None, fc: float = 76.5e9,
                      dx: float = None):
        dx = 4.2e-3 if dx is None else GlobalConstants.dx  # element spacing
        wavelength = C / fc
        steer_vect = np.ones([num_channels, 1], dtype='complex')
        if sll is None:
            win = windows.boxcar(48)
        else:
            if num_channels == 48:
                win = MathHelper.taylor_48_5_35
            elif num_channels == 96:
                win = MathHelper.taylor_96_5_35
            elif num_channels == 144:
                win = MathHelper.taylor_144_5_35
            elif num_channels == 192:
                win = MathHelper.taylor_192_5_35
            else:
                print("Invalid number of channels")
                win = MathHelper.taylor_48_5_35

        if beam_center != 0:  # steer_vect is already initialized to complex-zeros, so no need to handle that
            dphi = 2 * np.pi * dx * np.sin(beam_center * np.pi / 180) / wavelength
            phi_vect = np.linspace(0, dphi * (num_channels - 1), num_channels)
        else:
            phi_vect = np.zeros([48, ])

        steer_vect = np.exp(1j * phi_vect) * win

        return steer_vect
Example #5
0
def findpeaks(x, y, wid, sth, ath, pkg=None, verbose=False):
    """Find peaks in spectrum"""
    # derivative
    grad = np.gradient(y)
    # smooth derivative
    win = boxcar(wid)
    d = sp.signal.convolve(grad, win, mode='same') / sum(win)
    # size
    nx = len(x)
    # set up windowing
    if not pkg:
        pkg = wid
    hgrp = int(pkg/2)
    hgt = []
    pks = []
    sgs = []
    # loop over spectrum
    # limits to avoid edges given pkg
    for i in np.arange(pkg, (nx - pkg)):
        # find zero crossings
        if np.sign(d[i]) > np.sign(d[i+1]):
            # pass slope threshhold?
            if (d[i] - d[i+1]) > sth * y[i]:
                # pass amplitude threshhold?
                if y[i] > ath or y[i+1] > ath:
                    # get subvectors around peak in window
                    xx = x[(i-hgrp):(i+hgrp+1)]
                    yy = y[(i-hgrp):(i+hgrp+1)]
                    if len(yy) > 3:
                        try:
                            # gaussian fit
                            res, _ = curve_fit(gaus, xx, yy,
                                               p0=[y[i], x[i], 1.])
                            # check offset of fit from initial peak
                            r = abs(x - res[1])
                            t = r.argmin()
                            if abs(i - t) > pkg:
                                if verbose:
                                    print(i, t, x[i], res[1], x[t])
                            else:
                                hgt.append(res[0])
                                pks.append(res[1])
                                sgs.append(abs(res[2]))
                        except RuntimeError:
                            continue
    # clean by sigmas
    cvals = []
    cpks = []
    sgmn = None
    if len(pks) > 0:
        cln_sgs, low, upp = sigmaclip(sgs, low=3., high=3.)
        for i in range(len(pks)):
            if low < sgs[i] < upp:
                cpks.append(pks[i])
                cvals.append(hgt[i])
        sgmn = cln_sgs.mean()
        # sgmd = float(np.nanmedian(cln_sgs))
    else:
        print("No peaks found!")
    return cpks, sgmn, cvals
Example #6
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = windows.tukey(100, 0)
        box0 = windows.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = windows.tukey(100, 1)
        han1 = windows.hann(100)
        assert_array_almost_equal(tuk1, han1)
Example #7
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = windows.tukey(100, 0)
        box0 = windows.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = windows.tukey(100, 1)
        han1 = windows.hann(100)
        assert_array_almost_equal(tuk1, han1)
Example #8
0
def smooth(data, window_type='hann', filter_width=11, sigma=2, plot_on=1):
    """
    Smooth 1d data with moving window (uses filtfilt to have zero phase distortion)
    Wrapper for scipy.signal.filtfilt
    To do: consider replacing with sosfiltfilt

    Inputs:
        data: numpy array
        window_type ('hann'): string ('boxcar', 'gaussian', 'hann', 'bartlett', 'blackman')
        filter_width (11): int (wider is more smooth) odd is ideal
        sigma (2.): scalar std deviation only used for gaussian
        plot_on (1): int determines plotting. 0 none, 1 plot signal, 2: also plot filter
    Outputs
        data_smoothed: signal after being smoothed
        filter_window: the window used for smoothing

    Notes:
        Uses gustaffson's method to handle edge artifacts
        Currently accepted window_type options:
            hann (default) - cosine bump filter_width is only param
            blackman - more narrowly peaked bump than hann
            boxcar - flat-top of length filter_width
            bartlett - triangle
            gaussian - sigma determines width

    """
    if window_type == 'boxcar':
        filter_window = windows.boxcar(filter_width)
    elif window_type == 'hann':
        filter_window = windows.hann(filter_width)
    elif window_type == 'bartlett':
        filter_window = windows.bartlett(filter_width)
    elif window_type == 'blackman':
        filter_window = windows.blackman(filter_width)
    elif window_type == 'gaussian':
        filter_window = windows.gaussian(filter_width, sigma)
    filter_window = filter_window / np.sum(filter_window)
    data_smoothed = signal.filtfilt(filter_window, 1, data,
                                    method="gust")  # pad

    if plot_on:
        if plot_on > 1:
            plt.plot(filter_window)
            plt.title(f'{window_type} filter')
        plt.figure('signal', figsize=(10, 5))
        plt.plot(data,
                 color=(0.7, 0.7, 0.7),
                 label='noisy signal',
                 linewidth=1)
        plt.plot(data_smoothed, color='r', label='smoothed signal')
        plt.xlim(0, len(data_smoothed))
        plt.xlabel('sample')
        plt.grid(True)
        plt.legend()

    return data_smoothed, filter_window
Example #9
0
    def __init__(self, N, fs, init_frame=[], init_level=False, tol=0, N_fft=128, tau_up=10, tau_down=40e-3, T_up=3, T_down=1.2):

        """
        Constructor for VAD (Voice Activity Detector) class.
        Parameters
        -----------
        N : int
            Length of frame.
        fs : float or int
            Sampling frequency
        tau_up : float
            Time in seconds.
        tau_down : float
            Time in seconds.
        T_up : float
            Time in seconds.
        T_down : float
            Time in seconds.
        """

        self.tol = tol
        self.N = N
        self.N_fft = N_fft
        self.T = self.N/float(fs)
        self.tau_up = tau_up
        self.tau_down= tau_down
        self.T_up = T_up
        self.T_down = T_down
        self.fs = fs
        self.eta_up = 1.2
        self.eta_down = 40e-3
        self.eta_min = 2

        self.W = windows.boxcar(math.ceil(N_fft/2))

        if init_level == True :
            X = fft(init_frame, N_fft)
            X = X[math.ceil(N_fft/2):]
            L = np.sum((self.W*abs(X))**2)/len(X)
            self.L_min = (self.T/self.tau_down)*L
        else :
            self.L_min = 10

        self.lambda_init = 1
        self.V = False
Example #10
0
def multiplSegments():
    y = np.loadtxt('data.512.csv', delimiter=",", unpack=True)
    datos = y
    print(datos.size)
    N = 512
    fs = N * 1.0 / (3 - 0)
    nblock = N / 2
    win = boxcar(nblock)
    f, Pxxf = welch(datos,
                    fs,
                    window=win,
                    nfft=nblock,
                    return_onesided=True,
                    detrend=False)
    print(Pxxf.max())
    print(Pxxf.size)
    plt.scatter(f, Pxxf)
    plt.grid()
    plt.show()
Example #11
0
        def testSmooth(self):
            # Test the default window size (10% of input signal length)
            self.assertEqual(sum(smooth(self.signal, wtype='boxcar') != 0),
                             self.npts * 0.1)
            # Test the default window type ('boxzen')
            self.assertEqual(sum(smooth(self.signal)),
                             sum(smooth(self.signal, wtype='boxzen')))
            # Test the default standard deviation for 'gaussian' windows (10% of window size)
            self.assertEqual(sum(smooth(self.signal, 10, 'gaussian')),
                             sum(smooth(self.signal, 10, 'gaussian', 1.)))
            # Test a window passed as parameter
            self.assertEqual(sum(smooth(self.signal, 10, 'boxcar')),
                             sum(smooth(self.signal, wnds.boxcar(10))))
            # Test the window normalization step
            n = 10.
            self.assertEqual(sum(smooth(self.signal, 10, 'boxcar') == 1 / n),
                             n)

            # Test if the right exception is raised when the window size is <=0
            self.assertRaises(ValueError, smooth, self.signal, 0)
Example #12
0
 def test_basic(self):
     assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1])
     assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1])
     assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1])
Example #13
0
    def _perform(self):
        """Solve individual arc bar spectra for wavelength"""
        self.logger.info("Solving individual arc spectra")
        # plot control booleans
        master_inter = (self.config.instrument.plot_level >= 2)
        do_inter = (self.config.instrument.plot_level >= 3)
        # output control
        verbose = (self.config.instrument.verbose > 1)

        # Bar statistics
        bar_sig = []
        bar_nls = []
        # set thresh for finding lines
        hgt = 50.
        self.logger.info("line thresh = %.2f" % hgt)
        # get relevant part of atlas spectrum
        atwave = self.action.args.refwave[self.action.args.atminrow:self.
                                          action.args.atmaxrow]
        atspec = self.action.args.reflux[self.action.args.atminrow:self.action.
                                         args.atmaxrow]
        # convert list into ndarray
        at_wave = np.asarray(self.action.args.at_wave)
        at_flux = np.asarray(self.action.args.at_flux)
        # get x values starting at zero pixels
        self.action.args.xsvals = np.arange(
            0, len(self.context.arcs[self.config.instrument.REFBAR]))
        # loop over arcs and generate a wavelength solution for each
        next_bar_to_plot = 0
        poly_order = 4
        for ib, b in enumerate(self.context.arcs):
            # Starting with pascal shifted coeffs from fit_center()
            coeff = self.action.args.twkcoeff[ib]
            # get bar wavelengths
            bw = np.polyval(coeff, self.action.args.xsvals)
            # smooth spectrum according to slicer
            if 'Small' in self.action.args.ifuname:
                # no smoothing for Small slicer
                bspec = b
            else:
                if 'Large' in self.action.args.ifuname:
                    # max smoothing for Large slicer
                    win = boxcar(5)
                else:
                    # intermediate smoothing for Medium slicer
                    win = boxcar(3)
                # do the smoothing
                bspec = sp.signal.convolve(b, win, mode='same') / sum(win)
            # store values to fit
            at_wave_dat = []  # atlas line wavelengths
            at_flux_dat = []  # atlas line peak fluxes
            arc_pix_dat = []  # arc line pixel positions
            arc_int_dat = []  # arc line pixel intensities
            rej_wave = []  # rejected line wavelengths
            rej_flux = []  # rejected line fluxes
            gaus_sig = []
            nrej = 0
            # loop over lines
            for iw, aw in enumerate(self.action.args.at_wave):
                # get window for this line
                try:
                    # get arc line initial pixel position
                    line_x = [i for i, v in enumerate(bw) if v >= aw][0]
                    # get window for arc line
                    minow, maxow, count = get_line_window(
                        bspec,
                        line_x,
                        thresh=hgt,
                        logger=(self.logger if verbose else None))
                    # do we have enough points to fit?
                    if count < 5 or not minow or not maxow:
                        rej_wave.append(aw)
                        rej_flux.append(self.action.args.at_flux[iw])
                        nrej += 1
                        if verbose:
                            self.logger.info(
                                "Arc window rejected for line %.3f" % aw)
                        continue
                    # check if window no longer contains initial value
                    if minow > line_x > maxow:
                        rej_wave.append(aw)
                        rej_flux.append(self.action.args.at_flux[iw])
                        nrej += 1
                        if verbose:
                            self.logger.info(
                                "Arc window wandered off for line %.3f" % aw)
                        continue
                    # get data to fit
                    yvec = bspec[minow:maxow + 1]
                    xvec = self.action.args.xsvals[minow:maxow + 1]
                    wvec = bw[minow:maxow + 1]
                    f0 = max(yvec)
                    par_start = [f0, np.nanmean(xvec), 1.0]
                    par_bounds = ([f0 * 0.9, np.min(xvec),
                                   0.5], [f0 * 1.1,
                                          np.max(xvec), 2.5])
                    # Gaussian fit
                    try:
                        fit, _ = curve_fit(gaus, xvec, yvec, p0=par_start)
                        #  bounds=par_bounds, method='trf')
                        sp_pk_x = fit[1]
                        gaus_sig.append(fit[2])
                    except (RuntimeError, ValueError):
                        rej_wave.append(aw)
                        rej_flux.append(self.action.args.at_flux[iw])
                        nrej += 1
                        if verbose:
                            self.logger.info(
                                "Arc Gaussian fit rejected for line %.3f" % aw)
                        # sp_pk_x = line_x
                        continue

                    # get interpolation of arc line
                    int_line = interpolate.interp1d(xvec,
                                                    yvec,
                                                    kind='cubic',
                                                    bounds_error=False,
                                                    fill_value='extrapolate')
                    # use very dense sampling
                    xplot = np.linspace(min(xvec), max(xvec), num=1000)
                    # re-sample line with dense sampling
                    plt_line = int_line(xplot)
                    # get peak position
                    max_index = plt_line.argmax()
                    peak = xplot[max_index]
                    # calculate centroid
                    cent = np.sum(xvec * yvec) / np.sum(yvec)
                    # how different is the centroid from the peak?
                    if abs(cent - peak) > 0.7:
                        # keep track of rejected line
                        rej_wave.append(aw)
                        rej_flux.append(self.action.args.at_flux[iw])
                        nrej += 1
                        if verbose:
                            self.logger.info("Arc peak - cent offset = %.2f "
                                             "rejected for line %.3f" %
                                             (abs(cent - peak), aw))
                        continue
                    if plt_line[max_index] < 100:
                        # keep track of rejected line
                        rej_wave.append(aw)
                        rej_flux.append(self.action.args.at_flux[iw])
                        nrej += 1
                        if verbose:
                            self.logger.info("Arc peak too low = %.2f "
                                             "rejected for line %.3f" %
                                             (plt_line[max_index], aw))
                        continue
                    # store surviving line data
                    arc_pix_dat.append(peak)
                    arc_int_dat.append(plt_line[max_index])
                    at_wave_dat.append(aw)
                    at_flux_dat.append(self.action.args.at_flux[iw])
                    # plot, if requested
                    if do_inter and ib == next_bar_to_plot:
                        ptitle = " Bar# %d - line %3d/%3d: xc = %.1f, " \
                                 "Wave = %9.2f" % \
                                 (ib, (iw + 1), len(self.action.args.at_wave),
                                  peak, aw)
                        atx0 = [
                            i for i, v in enumerate(atwave) if v >= min(wvec)
                        ][0]
                        atx1 = [
                            i for i, v in enumerate(atwave) if v >= max(wvec)
                        ][0]
                        atnorm = np.nanmax(yvec) / np.nanmax(atspec[atx0:atx1])
                        p = figure(
                            title=self.action.args.plotlabel +
                            "ATLAS/ARC LINE FITS" + ptitle,
                            x_axis_label="Wavelength (A)",
                            y_axis_label="Relative Flux",
                            plot_width=self.config.instrument.plot_width,
                            plot_height=self.config.instrument.plot_height)
                        ylim = [0, np.nanmax(yvec)]
                        p.line(atwave[atx0:atx1],
                               atspec[atx0:atx1] * atnorm,
                               color='blue',
                               legend_label='Atlas')
                        p.circle(atwave[atx0:atx1],
                                 atspec[atx0:atx1] * atnorm,
                                 color='green',
                                 legend_label='Atlas')
                        p.line([aw, aw],
                               ylim,
                               color='red',
                               legend_label='AtCntr')
                        p.x_range = Range1d(start=min(wvec), end=max(wvec))
                        p.extra_x_ranges = {
                            "pix": Range1d(start=min(xvec), end=max(xvec))
                        }
                        p.add_layout(
                            LinearAxis(x_range_name="pix",
                                       axis_label="CCD Y pix"), 'above')
                        p.line(xplot,
                               plt_line,
                               color='black',
                               legend_label='Arc',
                               x_range_name="pix")
                        p.circle(xvec,
                                 yvec,
                                 legend_label='Arc',
                                 color='red',
                                 x_range_name="pix")
                        ylim = [0, np.nanmax(plt_line)]
                        p.line([cent, cent],
                               ylim,
                               color='green',
                               legend_label='Cntr',
                               line_dash='dashed',
                               x_range_name="pix")
                        p.line([sp_pk_x, sp_pk_x],
                               ylim,
                               color='magenta',
                               legend_label='Gpeak',
                               line_dash='dashdot',
                               x_range_name="pix")
                        p.line([peak, peak],
                               ylim,
                               color='black',
                               legend_label='Peak',
                               line_dash='dashdot',
                               x_range_name="pix")
                        p.y_range.start = 0
                        bokeh_plot(p, self.context.bokeh_session)

                        q = input(ptitle + " - Next? <cr>, q to quit: ")
                        if 'Q' in q.upper():
                            do_inter = False
                except IndexError:
                    if verbose:
                        self.logger.info(
                            "Atlas line not in observation: %.2f" % aw)
                    rej_wave.append(aw)
                    rej_flux.append(self.action.args.at_flux[iw])
                    nrej += 1
                    continue
                except ValueError:
                    if verbose:
                        self.logger.info(
                            "Interpolation error for line at %.2f" % aw)
                    rej_wave.append(aw)
                    rej_flux.append(self.action.args.at_flux[iw])
                    nrej += 1
            self.logger.info("")
            self.logger.info("Fitting wavelength solution starting with %d "
                             "lines after rejecting %d lines" %
                             (len(arc_pix_dat), nrej))
            # Fit wavelengths
            # Get poly order
            if self.action.args.dichroic_fraction <= 0.6:
                poly_order = 2
            elif 0.6 < self.action.args.dichroic_fraction < 0.75:
                poly_order = 3
            else:
                poly_order = 4
            self.logger.info("Fitting with polynomial order %d" % poly_order)
            # Initial fit
            wfit = np.polyfit(arc_pix_dat, at_wave_dat, poly_order)
            pwfit = np.poly1d(wfit)
            arc_wave_fit = pwfit(arc_pix_dat)
            # fit residuals
            resid = arc_wave_fit - at_wave_dat
            resid_c, low, upp = sigmaclip(resid, low=3., high=3.)
            wsig = resid_c.std()
            # maximum outlier
            max_resid = np.max(abs(resid))
            self.logger.info("wsig: %.3f, max_resid: %.3f" % (wsig, max_resid))
            # keep track of rejected lines
            rej_rsd = []  # rejected line residuals
            rej_rsd_wave = []  # rejected line wavelengths
            rej_rsd_flux = []  # rejected line fluxes
            # iteratively remove outliers
            it = 0
            while max_resid > 2.5 * wsig and it < 25:
                arc_dat = []  # arc line pixel values
                arc_fdat = []  # arc line flux data
                at_dat = []  # atlas line wavelength values
                at_fdat = []  # atlas line flux data
                # trim largest outlier
                for il, rsd in enumerate(resid):
                    if abs(rsd) < max_resid:
                        # append data for line that passed cut
                        arc_dat.append(arc_pix_dat[il])
                        arc_fdat.append(arc_int_dat[il])
                        at_dat.append(at_wave_dat[il])
                        at_fdat.append(at_flux_dat[il])
                    else:
                        if verbose:
                            self.logger.info("It%d REJ: %d, %.2f, %.3f, %.3f" %
                                             (it, il, arc_pix_dat[il],
                                              at_wave_dat[il], rsd))
                        # keep track of rejected lines
                        rej_rsd_wave.append(at_wave_dat[il])
                        rej_rsd_flux.append(at_flux_dat[il])
                        rej_rsd.append(rsd)
                # copy cleaned data back into input arrays
                arc_pix_dat = arc_dat.copy()
                arc_int_dat = arc_fdat.copy()
                at_wave_dat = at_dat.copy()
                at_flux_dat = at_fdat.copy()
                # refit cleaned data
                wfit = np.polyfit(arc_pix_dat, at_wave_dat, poly_order)
                # new wavelength function
                pwfit = np.poly1d(wfit)
                # new wavelengths for arc lines
                arc_wave_fit = pwfit(arc_pix_dat)
                # calculate residuals of arc lines
                resid = arc_wave_fit - at_wave_dat
                # get statistics
                resid_c, low, upp = sigmaclip(resid, low=3., high=3.)
                wsig = resid_c.std()
                # maximum outlier
                max_resid = np.max(abs(resid))
                # wsig = np.nanstd(resid)
                it += 1
            # END while max_resid > 3.5 * wsig and it < 5:
            # log arc bar results
            self.logger.info("")
            self.logger.info("BAR %03d, Slice = %02d, RMS = %.3f, N = %d" %
                             (ib, int(ib / 5), wsig, len(arc_pix_dat)))
            self.logger.info("Nits: %d, wsig: %.3f, max_resid: %.3f" %
                             (it, wsig, max_resid))
            self.logger.info("NRejRsd: %d, NRejFit: %d" %
                             (len(rej_rsd_wave), len(rej_wave)))
            self.logger.info("Line width median sigma: %.2f px" %
                             np.nanmedian(gaus_sig))
            self.logger.info("Coefs: " +
                             ' '.join(['%.6g' % (c, )
                                       for c in reversed(wfit)]))
            # store final fit coefficients
            self.action.args.fincoeff.append(wfit)
            # store statistics
            bar_sig.append(wsig)
            bar_nls.append(len(arc_pix_dat))
            # do plotting?
            if master_inter and ib == next_bar_to_plot:
                # plot bar fit residuals
                ptitle = " for Bar %03d, Slice %02d, RMS = %.3f, N = %d" % \
                         (ib, int(ib / 5), wsig, len(arc_pix_dat))
                p = figure(title=self.action.args.plotlabel + "RESIDUALS" +
                           ptitle,
                           x_axis_label="Wavelength (A)",
                           y_axis_label="Fit - Inp (A)",
                           plot_width=self.config.instrument.plot_width,
                           plot_height=self.config.instrument.plot_height)
                p.diamond(at_wave_dat, resid, legend_label='Rsd', size=8)
                if rej_rsd_wave:
                    p.diamond(rej_rsd_wave,
                              rej_rsd,
                              color='orange',
                              legend_label='Rej',
                              size=8)
                xlim = [self.action.args.atminwave, self.action.args.atmaxwave]
                ylim = [
                    np.nanmin(list(resid) + list(rej_rsd)),
                    np.nanmax(list(resid) + list(rej_rsd))
                ]
                p.line(xlim, [0., 0.], color='black', line_dash='dotted')
                p.line(xlim, [wsig, wsig], color='gray', line_dash='dashdot')
                p.line(xlim, [-wsig, -wsig], color='gray', line_dash='dashdot')
                p.line([self.action.args.cwave, self.action.args.cwave],
                       ylim,
                       legend_label='CWAV',
                       color='magenta',
                       line_dash='dashdot')
                bokeh_plot(p, self.context.bokeh_session)
                input("Next? <cr>: ")

                # overplot atlas and bar using fit wavelengths
                p = figure(title=self.action.args.plotlabel + "ATLAS/ARC FIT" +
                           ptitle,
                           x_axis_label="Wavelength (A)",
                           y_axis_label="Flux",
                           plot_width=self.config.instrument.plot_width,
                           plot_height=self.config.instrument.plot_height)
                bwav = pwfit(self.action.args.xsvals)
                p.line(bwav, b, color='darkgrey', legend_label='Arc')
                p.diamond(arc_wave_fit, arc_int_dat, color='darkgrey', size=8)
                ylim = [np.nanmin(b), np.nanmax(b)]
                atnorm = np.nanmax(b) / np.nanmax(atspec)
                p.line(atwave,
                       atspec * atnorm,
                       color='blue',
                       legend_label='Atlas')
                p.line([self.action.args.cwave, self.action.args.cwave],
                       ylim,
                       color='magenta',
                       line_dash='dashdot',
                       legend_label='CWAV')
                p.diamond(at_wave,
                          at_flux * atnorm,
                          legend_label='Kept',
                          color='green',
                          size=8)
                if rej_rsd_wave:
                    p.diamond(rej_rsd_wave,
                              [rj * atnorm for rj in rej_rsd_flux],
                              color='orange',
                              legend_label='RejRsd',
                              size=6)
                p.diamond(rej_wave, [rj * atnorm for rj in rej_flux],
                          color='red',
                          legend_label='RejFit',
                          size=6)
                bokeh_plot(p, self.context.bokeh_session)
                q = input("Next? <int> or <cr>, q - quit: ")
                if 'Q' in q.upper():
                    master_inter = False
                else:
                    try:
                        next_bar_to_plot = int(q)
                    except ValueError:
                        next_bar_to_plot = ib + 1

        # Plot final results

        # plot output name stub
        pfname = "arc_%05d_%s_%s_%s_tf%02d" % (
            self.action.args.ccddata.header['FRAMENO'], self.action.args.illum,
            self.action.args.grating, self.action.args.ifuname,
            int(100 * self.config.instrument.TAPERFRAC))

        # Plot coefs
        if self.config.instrument.plot_level >= 1:
            ylabs = ['Ang/px^4', 'Ang/px^3', 'Ang/px^2', 'Ang/px', 'Ang']
            ylabs = ylabs[-(poly_order + 1):]
            for ic in reversed(range(len(self.action.args.fincoeff[0]))):
                cn = poly_order - ic
                ptitle = self.action.args.plotlabel + "COEF %d VALUES" % cn
                p = figure(title=ptitle,
                           x_axis_label="Bar #",
                           y_axis_label="Coef %d (%s)" % (cn, ylabs[ic]),
                           plot_width=self.config.instrument.plot_width,
                           plot_height=self.config.instrument.plot_height)
                coef = []
                for c in self.action.args.fincoeff:
                    coef.append(c[ic])
                p.diamond(list(range(120)), coef, size=8)
                xlim = [-1, 120]
                ylim = get_plot_lims(coef)
                p.xgrid.grid_line_color = None
                oplot_slices(p, ylim)
                set_plot_lims(p, xlim=xlim, ylim=ylim)
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)
                # save coefficients plot
                save_plot(p, filename=pfname + '_coef%d.png' % cn)

        # Plot number of lines fit
        self.action.args.av_bar_nls = float(np.nanmean(bar_nls))
        self.action.args.st_bar_nls = float(np.nanstd(bar_nls))
        ptitle = self.action.args.plotlabel + \
            "FIT STATS <Nlns> = %.1f +- %.1f" % (self.action.args.av_bar_nls,
                                                 self.action.args.st_bar_nls)
        p = figure(title=ptitle,
                   x_axis_label="Bar #",
                   y_axis_label="N Lines",
                   plot_width=self.config.instrument.plot_width,
                   plot_height=self.config.instrument.plot_height)
        p.diamond(list(range(120)), bar_nls, size=8)
        xlim = [-1, 120]
        ylim = get_plot_lims(bar_nls)
        self.logger.info(
            "<N Lines> = %.1f +- %.1f" %
            (self.action.args.av_bar_nls, self.action.args.st_bar_nls))
        p.line(xlim,
               [self.action.args.av_bar_nls, self.action.args.av_bar_nls],
               color='red')
        p.line(xlim,
               [(self.action.args.av_bar_nls - self.action.args.st_bar_nls),
                (self.action.args.av_bar_nls - self.action.args.st_bar_nls)],
               color='green',
               line_dash='dashed')
        p.line(xlim,
               [(self.action.args.av_bar_nls + self.action.args.st_bar_nls),
                (self.action.args.av_bar_nls + self.action.args.st_bar_nls)],
               color='green',
               line_dash='dashed')
        p.xgrid.grid_line_color = None
        oplot_slices(p, ylim)
        set_plot_lims(p, xlim=xlim, ylim=ylim)
        if self.config.instrument.plot_level >= 1:
            bokeh_plot(p, self.context.bokeh_session)
            if self.config.instrument.plot_level >= 2:
                input("Next? <cr>: ")
            else:
                time.sleep(self.config.instrument.plot_pause)
        # save N lines plot
        save_plot(p, filename=pfname + '_nlines.png')

        # Plot fit sigmas
        self.action.args.av_bar_sig = float(np.nanmean(bar_sig))
        self.action.args.st_bar_sig = float(np.nanstd(bar_sig))
        self.logger.info(
            "<STD>     = %.3f +- %.3f (A)" %
            (self.action.args.av_bar_sig, self.action.args.st_bar_sig))

        ptitle = self.action.args.plotlabel + \
            "FIT STATS <RMS> = %.3f +- %.3f" % (self.action.args.av_bar_sig,
                                                self.action.args.st_bar_sig)
        p = figure(title=ptitle,
                   x_axis_label="Bar #",
                   y_axis_label="RMS (A)",
                   plot_width=self.config.instrument.plot_width,
                   plot_height=self.config.instrument.plot_height)
        p.diamond(list(range(120)), bar_sig, size=8)
        xlim = [-1, 120]
        ylim = get_plot_lims(bar_sig)
        p.line(xlim,
               [self.action.args.av_bar_sig, self.action.args.av_bar_sig],
               color='red')
        p.line(xlim,
               [(self.action.args.av_bar_sig - self.action.args.st_bar_sig),
                (self.action.args.av_bar_sig - self.action.args.st_bar_sig)],
               color='green',
               line_dash='dashed')
        p.line(xlim,
               [(self.action.args.av_bar_sig + self.action.args.st_bar_sig),
                (self.action.args.av_bar_sig + self.action.args.st_bar_sig)],
               color='green',
               line_dash='dashed')
        p.xgrid.grid_line_color = None
        oplot_slices(p, ylim)
        set_plot_lims(p, xlim=xlim, ylim=ylim)
        if self.config.instrument.plot_level >= 1:
            bokeh_plot(p, self.context.bokeh_session)
            if self.config.instrument.plot_level >= 2:
                input("Next? <cr>: ")
            else:
                time.sleep(self.config.instrument.plot_pause)

        # save residual plot
        save_plot(p, filename=pfname + '_resid.png')

        log_string = SolveArcs.__module__
        self.action.args.ccddata.header['HISTORY'] = log_string
        self.logger.info(log_string)

        return self.action.args
Example #14
0
    def pre_processing(self):
        """
        Complete various pre-processing steps for encoded protein sequences before
        doing any of the DSP-related functions or transformations. Zero-pad
        the sequences, remove any +/- infinity or NAN values, get the approximate
        protein spectra and window function parameter names.

        Parameters
        ----------
        :self (PyDSP object): 
            instance of PyDSP class.
            
        Returns
        -------
        None

        """
        #zero-pad encoded sequences so they are all the same length
        self.protein_seqs = zero_padding(self.protein_seqs)

        #get shape parameters of proteins seqs
        self.num_seqs = self.protein_seqs.shape[0]
        self.signal_len = self.protein_seqs.shape[1]

        #replace any positive or negative infinity or NAN values with 0
        self.protein_seqs[self.protein_seqs == -np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.nan] = 0

        #replace any NAN's with 0's
        #self.protein_seqs.fillna(0, inplace=True)
        self.protein_seqs = np.nan_to_num(self.protein_seqs)

        #initialise zeros array to store all protein spectra
        self.fft_power = np.zeros((self.num_seqs, self.signal_len))
        self.fft_real = np.zeros((self.num_seqs, self.signal_len))
        self.fft_imag = np.zeros((self.num_seqs, self.signal_len))
        self.fft_abs = np.zeros((self.num_seqs, self.signal_len))

        #list of accepted spectra, window functions and filters
        all_spectra = ['power', 'absolute', 'real', 'imaginary']
        all_windows = [
            'hamming', 'blackman', 'blackmanharris', 'gaussian', 'bartlett',
            'kaiser', 'barthann', 'bohman', 'chebwin', 'cosine', 'exponential'
            'flattop', 'hann', 'boxcar', 'hanning', 'nuttall', 'parzen',
            'triang', 'tukey'
        ]
        all_filters = [
            'savgol', 'medfilt', 'symiirorder1', 'lfilter', 'hilbert'
        ]

        #set required input parameters, raise error if spectrum is none
        if self.spectrum == None:
            raise ValueError(
                'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                .format(self.spectrum, all_spectra))
        else:
            #get closest correct spectra from user input, if no close match then raise error
            spectra_matches = (get_close_matches(self.spectrum,
                                                 all_spectra,
                                                 cutoff=0.4))

            if spectra_matches == []:
                raise ValueError(
                    'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                    .format(self.spectrum, all_spectra))
            else:
                self.spectra = spectra_matches[0]  #closest match in array

        if self.window_type == None:
            self.window = 1  #window = 1 is the same as applying no window
        else:
            #get closest correct window function from user input
            window_matches = (get_close_matches(self.window,
                                                all_windows,
                                                cutoff=0.4))

            #check if sym=True or sym=False
            #get window function specified by window input parameter, if no match then window = 1
            if window_matches != []:
                if window_matches[0] == 'hamming':
                    self.window = hamming(self.signal_len, sym=True)
                    self.window_type = "hamming"
                elif window_matches[0] == "blackman":
                    self.window = blackman(self.signal_len, sym=True)
                    self.window = "blackman"
                elif window_matches[0] == "blackmanharris":
                    self.window = blackmanharris(self.signal_len,
                                                 sym=True)  #**
                    self.window_type = "blackmanharris"
                elif window_matches[0] == "bartlett":
                    self.window = bartlett(self.signal_len, sym=True)
                    self.window_type = "bartlett"
                elif window_matches[0] == "gaussian":
                    self.window = gaussian(self.signal_len, std=7, sym=True)
                    self.window_type = "gaussian"
                elif window_matches[0] == "kaiser":
                    self.window = kaiser(self.signal_len, beta=14, sym=True)
                    self.window_type = "kaiser"
                elif window_matches[0] == "hanning":
                    self.window = hanning(self.signal_len, sym=True)
                    self.window_type = "hanning"
                elif window_matches[0] == "barthann":
                    self.window = barthann(self.signal_len, sym=True)
                    self.window_type = "barthann"
                elif window_matches[0] == "bohman":
                    self.window = bohman(self.signal_len, sym=True)
                    self.window_type = "bohman"
                elif window_matches[0] == "chebwin":
                    self.window = chebwin(self.signal_len, sym=True)
                    self.window_type = "chebwin"
                elif window_matches[0] == "cosine":
                    self.window = cosine(self.signal_len, sym=True)
                    self.window_type = "cosine"
                elif window_matches[0] == "exponential":
                    self.window = exponential(self.signal_len, sym=True)
                    self.window_type = "exponential"
                elif window_matches[0] == "flattop":
                    self.window = flattop(self.signal_len, sym=True)
                    self.window_type = "flattop"
                elif window_matches[0] == "boxcar":
                    self.window = boxcar(self.signal_len, sym=True)
                    self.window_type = "boxcar"
                elif window_matches[0] == "nuttall":
                    self.window = nuttall(self.signal_len, sym=True)
                    self.window_type = "nuttall"
                elif window_matches[0] == "parzen":
                    self.window = parzen(self.signal_len, sym=True)
                    self.window_type = "parzen"
                elif window_matches[0] == "triang":
                    self.window = triang(self.signal_len, sym=True)
                    self.window_type = "triang"
                elif window_matches[0] == "tukey":
                    self.window = tukey(self.signal_len, sym=True)
                    self.window_type = "tukey"

            else:
                self.window = 1  #window = 1 is the same as applying no window

        #calculate convolution from protein sequences
        if self.convolution is not None:
            if self.window is not None:
                self.convoled_seqs = signal.convolve(
                    self.protein_seqs, self.window, mode='same') / sum(
                        self.window)

        if self.filter != None:
            #get closest correct filter from user input
            filter_matches = (get_close_matches(self.filter,
                                                all_filters,
                                                cutoff=0.4))

            #set filter attribute according to approximate user input
            if filter_matches != []:
                if filter_matches[0] == 'savgol':
                    self.filter = savgol_filter(self.signal_len,
                                                self.signal_len)
                elif filter_matches[0] == 'medfilt':
                    self.filter = medfilt(self.signal_len)
                elif filter_matches[0] == 'symiirorder1':
                    self.filter = symiirorder1(self.signal_len, c0=1, z1=1)
                elif filter_matches[0] == 'lfilter':
                    self.filter = lfilter(self.signal_len)
                elif filter_matches[0] == 'hilbert':
                    self.filter = hilbert(self.signal_len)
            else:
                self.filter = ""  #no filter
Example #15
0
class PyQtGraphPlotter(QtWidgets.QGroupBox):
    @enum.unique
    class WindowTypes(enum.Enum):
        Rectangular = 0
        Hann = 1
        Flattop = 2
        Tukey_5Percent = 3

    windowFunctionMap = {
        WindowTypes.Rectangular:
        lambda M: windows.boxcar(M, sym=False),
        WindowTypes.Hann:
        lambda M: windows.hann(M, sym=False),
        WindowTypes.Flattop:
        lambda M: windows.flattop(M, sym=False),
        WindowTypes.Tukey_5Percent:
        lambda M: windows.tukey(M, sym=False, alpha=0.05),
    }

    dataIsPower = False
    prevDataSet = None
    curDataSet = None

    axes_units = [ureg.dimensionless]
    data_unit = ureg.dimensionless

    def __init__(self, parent=None):
        _style_pg()

        super().__init__(parent)

        pal = self.palette()
        highlightPen = pg.mkPen(
            pal.color(QtGui.QPalette.Highlight).darker(120))
        darkerHighlightPen = pg.mkPen(highlightPen.color().darker(120))
        alphaColor = darkerHighlightPen.color()
        alphaColor.setAlphaF(0.25)
        darkerHighlightPen.setColor(alphaColor)

        self.toolbar = QtWidgets.QToolBar(self)
        self.toolbar.addWidget(QtWidgets.QLabel("Fourier transform window:"))
        self.windowComboBox = QtWidgets.QComboBox(self.toolbar)
        for e in self.WindowTypes:
            self.windowComboBox.addItem(e.name, e)
        self.toolbar.addWidget(self.windowComboBox)
        self.windowComboBox.currentIndexChanged.connect(self._updateFTWindow)

        self.pglwidget = pg.GraphicsLayoutWidget(self)
        self.pglwidget.setBackground(None)

        vbox = QtWidgets.QVBoxLayout(self)
        vbox.addWidget(self.toolbar)
        vbox.addWidget(self.pglwidget)

        self.plot = self.pglwidget.addPlot(row=0, col=0)
        self.ft_plot = self.pglwidget.addPlot(row=1, col=0)

        self.plot.setLabels(title="Data")
        self.ft_plot.setLabels(title="Magnitude spectrum")

        self.plot.showGrid(x=True, y=True)
        self.ft_plot.showGrid(x=True, y=True)

        self._make_plot_background(self.plot)
        self._make_plot_background(self.ft_plot)

        self._lines = []
        self._lines.append(self.plot.plot())
        self._lines.append(self.plot.plot())
        self._lines[0].setPen(darkerHighlightPen)
        self._lines[1].setPen(highlightPen)

        self._ft_lines = []
        self._ft_lines.append(self.ft_plot.plot())
        self._ft_lines.append(self.ft_plot.plot())
        self._ft_lines[0].setPen(darkerHighlightPen)
        self._ft_lines[1].setPen(highlightPen)

        self._lastPlotTime = time.perf_counter()

    def _make_plot_background(self, plot, brush=None):
        if brush is None:
            brush = pg.mkBrush(self.palette().color(QtGui.QPalette.Base))

        vb_bg = QtWidgets.QGraphicsRectItem(plot)
        vb_bg.setRect(plot.vb.rect())
        vb_bg.setBrush(brush)
        vb_bg.setFlag(QtWidgets.QGraphicsItem.ItemStacksBehindParent)
        vb_bg.setZValue(-1e9)
        plot.vb.sigResized.connect(lambda x: vb_bg.setRect(x.geometry()))

    def setLabels(self, axesLabels, dataLabel):
        self.axesLabels = axesLabels
        self.dataLabel = dataLabel

        self.updateLabels()

    def updateLabels(self):
        self.plot.setLabels(bottom='{} [{:C~}]'.format(self.axesLabels[0],
                                                       self.axes_units[0]),
                            left='{} [{:C~}]'.format(self.dataLabel,
                                                     self.data_unit))

        ftUnits = self.data_unit
        if not self.dataIsPower:
            ftUnits = ftUnits**2

        self.ft_plot.setLabels(bottom='1 / {} [{:C~}]'.format(
            self.axesLabels[0], (1 / self.axes_units[0]).units),
                               left='Power [dB-({:C~})]'.format(ftUnits))

    def get_ft_data(self, data):
        delta = np.mean(np.diff(data.axes[0]))
        winFn = self.windowFunctionMap[self.windowComboBox.currentData()]
        refUnit = 1 * self.data_unit
        Y = np.fft.rfft(data.data / refUnit * winFn(len(data.data)), axis=0)
        freqs = np.fft.rfftfreq(len(data.axes[0]), delta)
        dBdata = 10 * np.log10(np.abs(Y))
        if not self.dataIsPower:
            dBdata *= 2
        return (freqs, dBdata)

    def _updateFTWindow(self):
        if self.prevDataSet:
            F, dBdata = self.get_ft_data(self.prevDataSet)
            self._ft_lines[0].setData(x=F, y=dBdata)

        if self.curDataSet:
            F, dBdata = self.get_ft_data(self.curDataSet)
            self._ft_lines[1].setData(x=F, y=dBdata)

    def drawDataSet(self, newDataSet, *args):
        plotTime = time.perf_counter()

        # artificially limit the replot rate to 20 Hz
        if (plotTime - self._lastPlotTime < 0.05):
            return

        self._lastPlotTime = plotTime

        self.prevDataSet = self.curDataSet
        self.curDataSet = newDataSet

        if (self.curDataSet.data.units != self.data_unit
                or self.curDataSet.axes[0].units != self.axes_units[0]):
            self.data_unit = self.curDataSet.data.units
            self.axes_units[0] = self.curDataSet.axes[0].units
            self.updateLabels()

        if self.prevDataSet:
            self._lines[0].setData(x=self._lines[1].xData,
                                   y=self._lines[1].yData)
            self._ft_lines[0].setData(x=self._ft_lines[1].xData,
                                      y=self._ft_lines[1].yData)
        if self.curDataSet:
            self._lines[1].setData(x=self.curDataSet.axes[0],
                                   y=self.curDataSet.data)
            F, dBdata = self.get_ft_data(self.curDataSet)
            self._ft_lines[1].setData(x=F, y=dBdata)
Example #16
0
    def _perform(self):
        """
        Returns an Argument() with the parameters that depends on this operation
        """
        self.logger.info("Creating master illumination correction")

        suffix = self.action.args.new_type.lower()
        insuff = self.action.args.stack_type.lower()

        stack_list = list(self.stack_list['filename'])

        if len(stack_list) <= 0:
            self.logger.warning("No flats found!")
            return self.action.args

        # get root for maps
        tab = self.context.proctab.search_proctab(
            frame=self.action.args.ccddata, target_type='ARCLAMP',
            target_group=self.action.args.groupid)
        if len(tab) <= 0:
            self.logger.error("Geometry not solved!")
            return self.action.args

        mroot = strip_fname(tab['filename'][-1])

        # Wavelength map image
        wmf = mroot + '_wavemap.fits'
        self.logger.info("Reading image: %s" % wmf)
        wavemap = kcwi_fits_reader(
            os.path.join(self.config.instrument.cwd, 'redux',
                         wmf))[0]

        # Slice map image
        slf = mroot + '_slicemap.fits'
        self.logger.info("Reading image: %s" % slf)
        slicemap = kcwi_fits_reader(os.path.join(
            self.config.instrument.cwd, 'redux',
                         slf))[0]

        # Position map image
        pof = mroot + '_posmap.fits'
        self.logger.info("Reading image: %s" % pof)
        posmap = kcwi_fits_reader(os.path.join(
            self.config.instrument.cwd, 'redux',
                         pof))[0]

        # Read in stacked flat image
        stname = strip_fname(stack_list[0]) + '_' + insuff + '.fits'

        self.logger.info("Reading image: %s" % stname)
        stacked = kcwi_fits_reader(os.path.join(
            self.config.instrument.cwd, 'redux',
                         stname))[0]

        # get type of flat
        internal = ('SFLAT' in stacked.header['IMTYPE'])
        twiflat = ('STWIF' in stacked.header['IMTYPE'])
        domeflat = ('SDOME' in stacked.header['IMTYPE'])

        if internal:
            self.logger.info("Internal Flat")
        elif twiflat:
            self.logger.info("Twilight Flat")
        elif domeflat:
            self.logger.info("Dome Flat")
        else:
            self.logger.error("Flat of Unknown Type!")
            return self.action.args

        # knots per pixel
        knotspp = self.config.instrument.KNOTSPP

        # get image size
        ny = stacked.header['NAXIS2']

        # get binning
        xbin = self.action.args.xbinsize

        # Parameters for fitting

        # vignetted slice position range
        fitl = int(4/xbin)
        fitr = int(24/xbin)

        # un-vignetted slice position range
        flatl = int(34/xbin)
        flatr = int(72/xbin)

        # flat fitting slice position range
        ffleft = int(10/xbin)
        ffright = int(70/xbin)
        nrefx = int(ffright - ffleft)

        buffer = 6.0/float(xbin)

        # reference slice
        refslice = 9
        allidx = np.arange(int(140/xbin))
        newflat = stacked.data.copy()

        # dichroic fraction
        try:
            dichroic_fraction = wavemap.header['DICHFRAC']
        except KeyError:
            dichroic_fraction = 1.

        # get reference slice data
        q = [i for i, v in enumerate(slicemap.data.flat) if v == refslice]
        # get wavelength limits
        waves = wavemap.data.compress((wavemap.data > 0.).flat)
        waves = [waves.min(), waves.max()]
        self.logger.info("Wavelength limits: %.1f - %1.f" % (waves[0],
                                                             waves[1]))

        # correct vignetting if we are using internal flats
        if internal:
            self.logger.info("Internal flats require vignetting correction")
            # get good region for fitting
            if self.action.args.camera == 0:    # Blue
                wmin = waves[0]
                wmax = min([waves[1], 5620.])
            elif self.action.args.camera == 1:  # Red
                wmin = max([waves[0], 5580.])
                wmax = waves[1]
            else:
                self.logger.warning("Camera keyword not defined")
                wmin = waves[0]
                wmax = waves[1]
            dw = (wmax - wmin) / 30.0
            wavemin = (wmin+wmax) / 2.0 - dw
            wavemax = (wmin+wmax) / 2.0 + dw
            self.logger.info("Using %.1f - %.1f A of slice %d" % (wavemin,
                                                                  wavemax,
                                                                  refslice))
            xflat = []
            yflat = []
            wflat = []
            qq = []
            for i in q:
                if wavemin < wavemap.data.flat[i] < wavemax:
                    xflat.append(posmap.data.flat[i])
                    yflat.append(stacked.data.flat[i])
                    wflat.append(wavemap.data.flat[i])
                    qq.append(i)
            # get un-vignetted portion
            qflat = [i for i, v in enumerate(xflat) if flatl <= v <= flatr]
            xflat = [xflat[i] for i in qflat]
            yflat = [yflat[i] for i in qflat]
            wflat = [wflat[i] for i in qflat]
            # sort on wavelength
            sw = np.argsort(wflat)
            ywflat = [yflat[i] for i in sw]
            wwflat = [wflat[i] for i in sw]
            ww0 = np.min(wwflat)
            # fit wavelength slope
            wavelinfit = np.polyfit(wwflat-ww0, ywflat, 2)
            wslfit = np.polyval(wavelinfit, wflat-ww0)
            # plot slope fit
            if self.config.instrument.plot_level >= 1:
                p = figure(title=self.action.args.plotlabel + ' WAVE SLOPE FIT',
                           x_axis_label='wave px',
                           y_axis_label='counts',
                           plot_width=self.config.instrument.plot_width,
                           plot_height=self.config.instrument.plot_height)
                p.circle(wwflat, ywflat, legend_label="Data")
                p.line(wflat, wslfit, line_color='red', line_width=3,
                       legend_label="Fit")
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)
            # take out slope
            yflat = yflat / wslfit
            # now sort on slice position
            ss = np.argsort(xflat)
            xflat = [xflat[i] for i in ss]
            yflat = [yflat[i] for i in ss]
            # fit un-vignetted slope
            resflat = np.polyfit(xflat, yflat, 1)

            # select the points we will fit for the vignetting
            # get reference region
            xfit = [posmap.data.flat[i] for i in qq]
            yfit = [stacked.data.flat[i] for i in qq]
            wflat = [wavemap.data.flat[i] for i in qq]
            # take out wavelength slope
            yfit = yfit / np.polyval(wavelinfit, wflat-ww0)

            # select the vignetted region
            qfit = [i for i, v in enumerate(xfit) if fitl <= v <= fitr]
            xfit = [xfit[i] for i in qfit]
            yfit = [yfit[i] for i in qfit]
            # sort on slice position
            s = np.argsort(xfit)
            xfit = [xfit[i] for i in s]
            yfit = [yfit[i] for i in s]
            # fit vignetted slope
            resfit = np.polyfit(xfit, yfit, 1)
            # corrected data
            ycdata = stacked.data.flat[qq] / \
                np.polyval(wavelinfit, wavemap.data.flat[qq]-ww0)
            ycmin = 0.5     # np.min(ycdata)
            ycmax = 1.25    # np.max(ycdata)
            # compute the intersection
            xinter = -(resflat[1] - resfit[1]) / (resflat[0] - resfit[0])
            # plot slice profile and fits
            if self.config.instrument.plot_level >= 1:
                p = figure(title=self.action.args.plotlabel + ' Vignetting',
                           x_axis_label='Slice Pos (px)',
                           y_axis_label='Ratio',
                           plot_width=self.config.instrument.plot_width,
                           plot_height=self.config.instrument.plot_height)
                p.circle(posmap.data.flat[qq], ycdata, legend_label='Data')
                p.line(allidx, resfit[1] + resfit[0]*allidx,
                       line_color='purple', legend_label='Vign.')
                p.line(allidx, resflat[1] + resflat[0]*allidx, line_color='red',
                       legend_label='UnVign.')
                p.line([fitl, fitl], [ycmin, ycmax], line_color='blue')
                p.line([fitr, fitr], [ycmin, ycmax], line_color='blue')
                p.line([flatl, flatl], [ycmin, ycmax], line_color='green')
                p.line([flatr, flatr], [ycmin, ycmax], line_color='green')
                p.line([xinter-buffer, xinter-buffer], [ycmin, ycmax],
                       line_color='black')
                p.line([xinter + buffer, xinter + buffer], [ycmin, ycmax],
                       line_color='black')
                p.line([xinter, xinter], [ycmin, ycmax], line_color='red')
                p.y_range = Range1d(ycmin, ycmax)
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)

            # figure out where the correction applies
            qcor = [i for i, v in enumerate(posmap.data.flat)
                    if 0 <= v <= (xinter-buffer)]
            # apply the correction!
            self.logger.info("Applying vignetting correction...")
            for i in qcor:
                newflat.flat[i] = (resflat[1]+resflat[0]*posmap.data.flat[i]) \
                                / (resfit[1]+resfit[0]*posmap.data.flat[i]) * \
                                stacked.data.flat[i]
            # now deal with the intermediate (buffer) region
            self.logger.info("Done, now handling buffer region")
            # get buffer points to fit in reference region
            qbff = [i for i in qq if (xinter-buffer) <=
                    posmap.data.flat[i] <= (xinter+buffer)]
            # get slice pos and data for buffer fitting
            xbuff = [posmap.data.flat[i] for i in qbff]
            ybuff = [stacked.data.flat[i] / np.polyval(wavelinfit,
                                                       wavemap.data.flat[i]-ww0)
                     for i in qbff]
            # sort on slice position
            ssp = np.argsort(xbuff)
            xbuff = [xbuff[i] for i in ssp]
            ybuff = [ybuff[i] for i in ssp]
            # fit buffer with low-order poly
            buffit = np.polyfit(xbuff, ybuff, 3)
            # plot buffer fit
            if self.config.instrument.plot_level >= 1:
                p = figure(title=self.action.args.plotlabel + ' Buffer Region',
                           x_axis_label='Slice Pos (px)',
                           y_axis_label='Ratio',
                           plot_width=self.config.instrument.plot_width,
                           plot_height=self.config.instrument.plot_height)
                p.circle(xbuff, ybuff)
                p.line(xbuff, np.polyval(buffit, xbuff), line_color='red')
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)
            # get all buffer points in image
            qbuf = [i for i, v in enumerate(posmap.data.flat)
                    if (xinter-buffer) <= v <= (xinter+buffer)]
            # apply buffer correction to all buffer points in newflat
            for i in qbuf:
                newflat.flat[i] = \
                    (resflat[1] + resflat[0] * posmap.data.flat[i]) / \
                    np.polyval(buffit, posmap.data.flat[i]) * newflat.flat[i]
            self.logger.info("Vignetting correction complete.")

        self.logger.info("Fitting master illumination")
        # now fit master flat
        # get reference slice points
        qref = [i for i in q if ffleft <= posmap.data.flat[i] <= ffright]
        xfr = wavemap.data.flat[qref]
        yfr = newflat.flat[qref]
        # sort on wavelength
        s = np.argsort(xfr)
        xfr = xfr[s]
        yfr = yfr[s]

        wavegood0 = wavemap.header['WAVGOOD0']
        wavegood1 = wavemap.header['WAVGOOD1']

        # correction for BM where we see a ledge
        if 'BM' in self.action.args.grating:
            ledge_wave = bm_ledge_position(self.action.args.cwave)

            self.logger.info("BM ledge calculated wavelength "
                             "for ref slice = %.2f (A)" % ledge_wave)
            if wavegood0 <= ledge_wave <= wavegood1:
                self.logger.info("BM grating requires correction")
                qledge = [i for i, v in enumerate(xfr)
                          if ledge_wave-25 <= v <= ledge_wave+25]
                xledge = [xfr[i] for i in qledge]
                yledge = [yfr[i] for i in qledge]
                s = np.argsort(xledge)
                xledge = [xledge[i] for i in s]
                yledge = [yledge[i] for i in s]
                win = boxcar(250)
                smyledge = sp.signal.convolve(yledge,
                                              win, mode='same') / sum(win)
                ylmax = np.max(yledge)
                ylmin = np.min(yledge)
                fpoints = np.arange(0, 100) / 100. * 50 + (ledge_wave-25)
                ledgefit, ledgemsk = Bspline.iterfit(np.asarray(xledge),
                                                     smyledge, fullbkpt=fpoints,
                                                     upper=1, lower=1)
                ylfit, _ = ledgefit.value(np.asarray(fpoints))
                deriv = -(np.roll(ylfit, 1) - np.roll(ylfit, -1)) / 2.0
                # trim edges
                trm = int(len(deriv)/5)
                deriv = deriv[trm:-trm]
                xvals = fpoints[trm:-trm]
                peaks, _ = find_peaks(deriv, height=100)
                if len(peaks) != 1:
                    self.logger.warning("Extra peak found!")
                    p = figure(title=self.action.args.plotlabel +
                               ' Ledge', x_axis_label='Wavelength (A)',
                               y_axis_label='Value',
                               plot_width=self.config.instrument.plot_width,
                               plot_height=self.config.instrument.plot_height)
                    p.circle(xledge, smyledge, fill_color='green')
                    p.line(fpoints, ylfit)
                    bokeh_plot(p, self.context.bokeh_session)
                    input("Next? <cr>: ")
                    p = figure(title=self.action.args.plotlabel +
                               ' Deriv', x_axis_label='px',
                               y_axis_label='Value',
                               plot_width=self.config.instrument.plot_width,
                               plot_height=self.config.instrument.plot_height)
                    xx = list(range(len(deriv)))
                    ylim = get_plot_lims(deriv)
                    p.circle(xx, deriv)
                    for pk in peaks:
                        p.line([pk, pk], ylim)
                    bokeh_plot(p, self.context.bokeh_session)
                    print("Please indicate the integer pixel value of the peak")
                    ipk = int(input("Peak? <int>: "))
                else:
                    ipk = peaks[0]
                apk = xvals[ipk]
                if self.config.instrument.plot_level >= 3:
                    p = figure(
                        title=self.action.args.plotlabel + ' Peak of ledge',
                        x_axis_label='Wave (A)',
                        y_axis_label='Value',
                        plot_width=self.config.instrument.plot_width,
                        plot_height=self.config.instrument.plot_height)
                    p.circle(xvals, deriv, legend_label='Data')
                    p.line([apk, apk], [-50, 200], line_color='red',
                           legend_label='Pk')
                    bokeh_plot(p, self.context.bokeh_session)
                    if self.config.instrument.plot_level >= 2:
                        input("Next? <cr>: ")
                    else:
                        time.sleep(self.config.instrument.plot_pause)
                xlow = apk - 3 - 5
                xhi = apk - 3
                zlow = apk + 3
                zhi = apk + 3 + 5
                qlow = [i for i, v in enumerate(fpoints) if xlow <= v <= xhi]
                xlf = np.asarray([fpoints[i] for i in qlow])
                ylf = np.asarray([ylfit[i] for i in qlow])
                lowfit = np.polyfit(xlf, ylf, 1)
                qhi = [i for i, v in enumerate(fpoints) if zlow <= v <= zhi]
                xlf = np.asarray([fpoints[i] for i in qhi])
                ylf = np.asarray([ylfit[i] for i in qhi])
                hifit = np.polyfit(xlf, ylf, 1)
                ratio = (hifit[1] + hifit[0] * apk) / \
                        (lowfit[1] + lowfit[0] * apk)
                self.logger.info("BM ledge ratio: %.3f" % ratio)
                # correct flat data
                qcorr = [i for i, v in enumerate(xfr) if v >= apk]
                for i in qcorr:
                    yfr[i] /= ratio
                # plot BM ledge
                if self.config.instrument.plot_level >= 1:
                    p = figure(
                        title=self.action.args.plotlabel + ' BM Ledge Region',
                        x_axis_label='Wave (A)',
                        y_axis_label='Value',
                        plot_width=self.config.instrument.plot_width,
                        plot_height=self.config.instrument.plot_height)
                    # Input data
                    p.circle(xledge, yledge, fill_color='blue',
                             legend_label='Data')
                    # correct input data
                    qcorrect = [i for i, v in enumerate(xledge) if v >= apk]
                    xplt = []
                    yplt = []
                    for i in qcorrect:
                        xplt.append(xledge[i])
                        yplt.append(yledge[i] / ratio)
                    p.circle(xplt, yplt, fill_color='orange',
                             legend_label='Corrected')
                    p.line(fpoints, ylfit, line_color='red', legend_label='Fit')
                    p.line([xlow, xlow], [ylmin, ylmax], line_color='blue')
                    p.line([xhi, xhi], [ylmin, ylmax], line_color='blue')
                    p.line([zlow, zlow], [ylmin, ylmax], line_color='black')
                    p.line([zhi, zhi], [ylmin, ylmax], line_color='black')
                    p.line(fpoints, lowfit[1] + lowfit[0] * fpoints,
                           line_color='purple')
                    p.line(fpoints, hifit[1] + hifit[0] * fpoints,
                           line_color='green')
                    p.line([apk, apk], [ylmin, ylmax], line_color='green',
                           legend_label='Pk')
                    p.y_range = Range1d(ylmin, ylmax)
                    p.legend.location = 'top_left'
                    bokeh_plot(p, self.context.bokeh_session)
                    if self.config.instrument.plot_level >= 2:
                        input("Next? <cr>: ")
                    else:
                        time.sleep(self.config.instrument.plot_pause)
        # END: handling BM grating ledge

        # if we are fitting a twilight flat, treat it like a sky image with a
        # larger number of knots
        if twiflat:
            knots = int(ny * knotspp)
        else:
            knots = 100
        self.logger.info("Using %d knots for bspline fit" % knots)

        # generate a fit from ref slice points
        bkpt = np.min(xfr) + np.arange(knots+1) * \
            (np.max(xfr) - np.min(xfr)) / knots
        sftr, _ = Bspline.iterfit(xfr[nrefx:-nrefx], yfr[nrefx:-nrefx],
                                  fullbkpt=bkpt)
        yfitr, _ = sftr.value(xfr)

        # generate a blue slice spectrum bspline fit
        blueslice = 12
        blueleft = 60 / xbin
        blueright = 80 / xbin
        qb = [i for i, v in enumerate(slicemap.data.flat) if v == blueslice]
        qblue = [i for i in qb if blueleft <= posmap.data.flat[i] <= blueright]
        xfb = wavemap.data.flat[qblue]
        yfb = newflat.flat[qblue]
        s = np.argsort(xfb)
        xfb = xfb[s]
        yfb = yfb[s]
        bkpt = np.min(xfb) + np.arange(knots+1) * \
            (np.max(xfb) - np.min(xfb)) / knots
        sftb, _ = Bspline.iterfit(xfb[nrefx:-nrefx], yfb[nrefx:-nrefx],
                                  fullbkpt=bkpt)
        yfitb, _ = sftb.value(xfb)

        # generate a red slice spectrum bspline fit
        redslice = 23
        redleft = 60 / xbin
        redright = 80 / xbin
        qr = [i for i, v in enumerate(slicemap.data.flat) if v == redslice]
        qred = [i for i in qr if redleft <= posmap.data.flat[i] <= redright]
        xfd = wavemap.data.flat[qred]
        yfd = newflat.flat[qred]
        s = np.argsort(xfd)
        xfd = xfd[s]
        yfd = yfd[s]
        bkpt = np.min(xfd) + np.arange(knots + 1) * \
            (np.max(xfd) - np.min(xfd)) / knots
        sftd, _ = Bspline.iterfit(xfd[nrefx:-nrefx], yfd[nrefx:-nrefx],
                                  fullbkpt=bkpt)
        yfitd, _ = sftd.value(xfd)

        # waves
        minwave = np.min(xfb)
        maxwave = np.max(xfd)
        # are we a twilight flat?
        if twiflat:
            nwaves = int(ny * knotspp)
        else:
            nwaves = 1000
        waves = minwave + (maxwave - minwave) * np.arange(nwaves+1) / nwaves
        if self.config.instrument.plot_level >= 1:
            # output filename stub
            rbfnam = "redblue_%05d_%s_%s_%s" % \
                      (self.action.args.ccddata.header['FRAMENO'],
                       self.action.args.illum, self.action.args.grating,
                       self.action.args.ifuname)
            if xbin == 1:
                stride = int(len(xfr) / 8000.)
                if stride <= 0:
                    stride = 1
            else:
                stride = 1
            xrplt = xfr[::stride]
            yrplt = yfitr[::stride]
            yrplt_d = yfr[::stride]
            xbplt = xfb[::stride]
            ybplt = yfitb[::stride]
            ybplt_d = yfb[::stride]
            xdplt = xfd[::stride]
            ydplt = yfitd[::stride]
            ydplt_d = yfd[::stride]
            p = figure(
                title=self.action.args.plotlabel + ' Blue/Red fits',
                x_axis_label='Wave (A)',
                y_axis_label='Flux (e-)',
                plot_width=self.config.instrument.plot_width,
                plot_height=self.config.instrument.plot_height)
            p.line(xrplt, yrplt, line_color='black', legend_label='Ref')
            p.circle(xrplt, yrplt_d, size=1, line_alpha=0., fill_color='black')
            p.line(xbplt, ybplt, line_color='blue', legend_label='Blue')
            p.circle(xbplt, ybplt_d, size=1, line_alpha=0., fill_color='blue')
            p.line(xdplt, ydplt, line_color='red', legend_label='Red')
            p.circle(xdplt, ydplt_d, size=1, line_alpha=0., fill_color='red')
            bokeh_plot(p, self.context.bokeh_session)
            if self.config.instrument.plot_level >= 2:
                input("Next? <cr>: ")
            else:
                time.sleep(self.config.instrument.plot_pause)
            save_plot(p, filename=rbfnam+'.png')

        wavebuffer = 0.1
        minrwave = np.min(xfr)
        maxrwave = np.max(xfr)
        wavebuffer2 = 0.05
        wlb0 = minrwave+(maxrwave-minrwave)*wavebuffer2
        wlb1 = minrwave+(maxrwave-minrwave)*wavebuffer
        wlr0 = minrwave+(maxrwave-minrwave)*(1.-wavebuffer)
        wlr1 = minrwave+(maxrwave-minrwave)*(1.-wavebuffer2)
        qbluefit = [i for i, v in enumerate(waves) if wlb0 < v < wlb1]
        qredfit = [i for i, v in enumerate(waves) if wlr0 < v < wlr1]

        nqb = len(qbluefit)
        nqr = len(qredfit)
        self.logger.info("Wavelength regions: blue = %.1f - %.1f, "
                         "red = %.1f - %.1f" % (wlb0, wlb1, wlr0, wlr1))
        self.logger.info("Fit points: blue = %d, red = %d" % (nqb, nqr))

        if nqb > 0:
            bluefit, _ = sftb.value(waves[qbluefit])
            refbluefit, _ = sftr.value(waves[qbluefit])
            blue_zero_cross = np.nanmin(bluefit) <= 0. or np.nanmin(
                refbluefit) <= 0.
            bluelinfit = np.polyfit(waves[qbluefit], refbluefit/bluefit, 1)
            bluelinfity = bluelinfit[1] + bluelinfit[0] * waves[qbluefit]
        else:
            bluefit = None
            blue_zero_cross = False
            refbluefit = None
            bluelinfit = None
            bluelinfity = None
        if nqr > 0:
            redfit, _ = sftd.value(waves[qredfit])
            refredfit, _ = sftr.value(waves[qredfit])
            red_zero_cross = np.nanmin(redfit) <= 0. or np.nanmin(
                refredfit) <= 0.
            redlinfit = np.polyfit(waves[qredfit], refredfit/redfit, 1)
            redlinfity = redlinfit[1] + redlinfit[0] * waves[qredfit]
        else:
            redfit = None
            red_zero_cross = False
            refredfit = None
            redlinfit = None
            redlinfity = None
        if blue_zero_cross:
            self.logger.info("Blue extension zero crossing detected")
        if red_zero_cross:
            self.logger.info("Red extension zero crossing detected")
        if self.config.instrument.plot_level >= 1:
            if nqb > 1:
                # plot blue fits
                p = figure(
                    title=self.action.args.plotlabel + ' Blue fits',
                    x_axis_label='Wave (A)',
                    y_axis_label='Flux (e-)',
                    plot_width=self.config.instrument.plot_width,
                    plot_height=self.config.instrument.plot_height)
                p.line(waves[qbluefit], refbluefit, line_color='black',
                       legend_label='Ref')
                p.circle(waves[qbluefit], refbluefit, fill_color='black')
                p.line(waves[qbluefit], bluefit, line_color='blue',
                       legend_label='Blue')
                p.circle(waves[qbluefit], bluefit, fill_color='blue')
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)
                # plot blue ratios
                p = figure(
                    title=self.action.args.plotlabel + ' Blue ratios',
                    x_axis_label='Wave (A)',
                    y_axis_label='Ratio',
                    plot_width=self.config.instrument.plot_width,
                    plot_height=self.config.instrument.plot_height)
                p.line(waves[qbluefit], refbluefit/bluefit, line_color='black',
                       legend_label='Ref')
                p.circle(waves[qbluefit], refbluefit/bluefit,
                         fill_color='black')
                p.line(waves[qbluefit], bluelinfity,
                       line_color='blue', legend_label='Blue')
                p.circle(waves[qbluefit], bluelinfity,
                         fill_color='blue')
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)
            if nqr > 1:
                # plot red fits
                p = figure(
                    title=self.action.args.plotlabel + ' Red fits',
                    x_axis_label='Wave (A)',
                    y_axis_label='Flux (e-)',
                    plot_width=self.config.instrument.plot_width,
                    plot_height=self.config.instrument.plot_height)
                p.line(waves[qredfit], refredfit, line_color='black',
                       legend_label='Ref')
                p.circle(waves[qredfit], refredfit, fill_color='black')
                p.line(waves[qredfit], redfit, line_color='red',
                       legend_label='Red')
                p.circle(waves[qredfit], redfit, fill_color='red')
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)
                # plot red ratios
                p = figure(
                    title=self.action.args.plotlabel + ' Red ratios',
                    x_axis_label='Wave (A)',
                    y_axis_label='Ratio',
                    plot_width=self.config.instrument.plot_width,
                    plot_height=self.config.instrument.plot_height)
                p.line(waves[qredfit], refredfit/redfit, line_color='black',
                       legend_label='Ref')
                p.circle(waves[qredfit], refredfit/redfit, fill_color='black')
                p.line(waves[qredfit], redlinfity,
                       line_color='red', legend_label='Red')
                p.circle(waves[qredfit], redlinfity, fill_color='red')
                bokeh_plot(p, self.context.bokeh_session)
                if self.config.instrument.plot_level >= 2:
                    input("Next? <cr>: ")
                else:
                    time.sleep(self.config.instrument.plot_pause)

        # at this point we are going to try to merge the points
        self.logger.info("Correcting points outside %.1f - %.1f A"
                         % (minrwave, maxrwave))
        qselblue = [i for i, v in enumerate(xfb) if v <= minrwave]
        qselred = [i for i, v in enumerate(xfd) if v >= maxrwave]
        nqsb = len(qselblue)
        nqsr = len(qselred)
        blue_all_tie = yfitr[0]
        red_all_tie = yfitr[-1]
        self.logger.info("Blue/Red ref tie values: %.3f, %.3f"
                         % (blue_all_tie, red_all_tie))

        if nqsb > 0:
            self.logger.info("Blue ext tie value: %.3f" % yfb[qselblue[-1]])
            if blue_zero_cross:
                blue_offset = yfb[qselblue[-1]] - blue_all_tie
                bluefluxes = [yfb[i] - blue_offset for i in qselblue]
                self.logger.info("Blue zero crossing, only applying offset")
            else:
                blue_offset = yfb[qselblue[-1]] * \
                              (bluelinfit[1]+bluelinfit[0]*xfb[qselblue[-1]]) \
                              - blue_all_tie
                bluefluxes = [yfb[i] * (bluelinfit[1]+bluelinfit[0]*xfb[i])
                              - blue_offset for i in qselblue]
                self.logger.info("Blue linear ratio fit scaling applied")
            self.logger.info("Blue offset of %.2f applied" % blue_offset)
        else:
            bluefluxes = None
        if nqsr > 0:
            self.logger.info("Red ext tie value: %.3f" % yfd[qselred[0]])
            if red_zero_cross:
                red_offset = yfd[qselred[0]] - red_all_tie
                redfluxes = [yfd[i] - red_offset for i in qselred]
                self.logger.info("Red zero crossing, only applying offset")
            else:
                red_offset = yfd[qselred[0]] * \
                             (redlinfit[1]+redlinfit[0]*xfd[qselred[0]]) \
                             - red_all_tie
                redfluxes = [yfd[i] * (redlinfit[1]+redlinfit[0]*xfd[i])
                             - red_offset for i in qselred]
                self.logger.info("Red linear ratio fit scaling applied")
            self.logger.info("Red offset of %.2f applied" % red_offset)
        else:
            redfluxes = None
        allx = xfr
        allfx = xfr[nrefx:-nrefx]
        ally = yfr[nrefx:-nrefx]
        if nqsb > 0:
            bluex = xfb[qselblue]
            allx = np.append(bluex, allx)
            allfx = np.append(bluex[nrefx:], allfx)
            ally = np.append(bluefluxes[nrefx:], ally)
        if nqsr > 0:
            redx = xfd[qselred]
            allx = np.append(allx, redx)
            allfx = np.append(allfx, redx[:-nrefx])
            ally = np.append(ally, redfluxes[:-nrefx])
        s = np.argsort(allx)
        allx = allx[s]
        s = np.argsort(allfx)
        allfx = allfx[s]
        ally = ally[s]

        bkpt = np.min(allx) + np.arange(knots+1) * \
            (np.max(allx) - np.min(allx)) / knots
        sftall, _ = Bspline.iterfit(allfx, ally, fullbkpt=bkpt)
        yfitall, _ = sftall.value(allx)

        if self.config.instrument.plot_level >= 1:
            # output filename stub
            fltfnam = "flat_%05d_%s_%s_%s" % \
                      (self.action.args.ccddata.header['FRAMENO'],
                       self.action.args.illum, self.action.args.grating,
                       self.action.args.ifuname)
            if xbin == 1:
                stride = int(len(allx) / 8000.)
            else:
                stride = 1
            xplt = allfx[::stride]
            yplt = ally[::stride]
            fxplt = allx[::stride]
            fplt = yfitall[::stride]
            yran = [np.nanmin(ally), np.nanmax(ally)]
            p = figure(
                title=self.action.args.plotlabel + ' Master Illumination',
                x_axis_label='Wave (A)',
                y_axis_label='Flux (e-)',
                plot_width=self.config.instrument.plot_width,
                plot_height=self.config.instrument.plot_height)
            p.circle(xplt, yplt, size=1, line_alpha=0., fill_color='black',
                     legend_label='Data')
            p.line(fxplt, fplt, line_color='red', legend_label='Fit',
                   line_width=2)
            p.line([minrwave, minrwave], yran, line_color='orange',
                   legend_label='Cor lim')
            p.line([maxrwave, maxrwave], yran, line_color='orange')
            p.legend.location = "top_left"
            bokeh_plot(p, self.context.bokeh_session)
            if self.config.instrument.plot_level >= 2:
                input("Next? <cr>: ")
            else:
                time.sleep(self.config.instrument.plot_pause)
            save_plot(p, filename=fltfnam+".png")

        # OK, Now we have extended to the full range... so... we are going to
        # make a ratio flat!
        comflat = np.zeros(newflat.shape, dtype=float)
        qz = [i for i, v in enumerate(wavemap.data.flat) if v >= 0]

        comvals = sftall.value(wavemap.data.flat[qz])

        comflat.flat[qz] = comvals
        ratio = np.zeros(newflat.shape, dtype=float)
        qzer = [i for i, v in enumerate(newflat.flat) if v != 0]
        ratio.flat[qzer] = comflat.flat[qzer] / newflat.flat[qzer]

        # trim negative points
        qq = [i for i, v in enumerate(ratio.flat) if v < 0]
        if len(qq) > 0:
            ratio.flat[qq] = 0.0

        # trim the high points near edges of slice
        qq = [i for i, v in enumerate(ratio.flat) if v >= 3. and
              (posmap.data.flat[i] <= 4/xbin or
               posmap.data.flat[i] >= 136/xbin)]
        if len(qq) > 0:
            ratio.flat[qq] = 0.0

        # don't correct low signal points
        qq = [i for i, v in enumerate(newflat.flat) if v < 30.]
        if len(qq) > 0:
            ratio.flat[qq] = 1.0

        # get master flat output name
        mfname = stack_list[0].split('.fits')[0] + '_' + suffix + '.fits'

        log_string = MakeMasterFlat.__module__
        stacked.header['IMTYPE'] = self.action.args.new_type
        stacked.header['HISTORY'] = log_string
        stacked.header['MASTFLAT'] = (True, 'master flat image?')
        stacked.header['WAVMAPF'] = wmf
        stacked.header['SLIMAPF'] = slf
        stacked.header['POSMAPF'] = pof

        # store flat in output frame
        stacked.data = ratio

        # output master flat
        kcwi_fits_writer(stacked, output_file=mfname,
                         output_dir=self.config.instrument.output_directory)
        self.context.proctab.update_proctab(frame=stacked, suffix=suffix,
                                            newtype=self.action.args.new_type,
                                            filename=self.action.args.name)
        self.context.proctab.write_proctab()

        self.logger.info(log_string)
        return self.action.args
Example #17
0
class MPLCanvas(QtWidgets.QGroupBox):
    """Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
    @enum.unique
    class WindowTypes(enum.Enum):
        Rectangular = 0
        Hann = 1
        Flattop = 2
        Tukey_5Percent = 3

    windowFunctionMap = {
        WindowTypes.Rectangular:
        lambda M: windows.boxcar(M, sym=False),
        WindowTypes.Hann:
        lambda M: windows.hann(M, sym=False),
        WindowTypes.Flattop:
        lambda M: windows.flattop(M, sym=False),
        WindowTypes.Tukey_5Percent:
        lambda M: windows.tukey(M, sym=False, alpha=0.05),
    }

    dataIsPower = False
    dataSet = None
    prevDataSet = None
    _prevAxesLabels = None
    _axesLabels = None
    _prevDataLabel = None
    _dataLabel = None

    _lastPlotTime = 0
    _isLiveData = False

    def __init__(self, parent=None):
        style_mpl()

        super().__init__(parent)

        dpi = QtWidgets.qApp.primaryScreen().logicalDotsPerInch()
        self.fig = Figure(dpi=dpi)
        self.fig.patch.set_alpha(0)

        self.axes = self.fig.add_subplot(2, 1, 1)
        self.ft_axes = self.fig.add_subplot(2, 1, 2)

        self.canvas = FigureCanvasQTAgg(self.fig)
        self.mpl_toolbar = NavigationToolbar2QT(self.canvas, self)

        self.mpl_toolbar.addSeparator()

        self.autoscaleAction = self.mpl_toolbar.addAction("Auto-scale")
        self.autoscaleAction.setCheckable(True)
        self.autoscaleAction.setChecked(True)
        self.autoscaleAction.triggered.connect(self._autoscale)

        self.mpl_toolbar.addWidget(
            QtWidgets.QLabel("Fourier transform "
                             "window: "))

        self.windowComboBox = QtWidgets.QComboBox(self.mpl_toolbar)
        for e in MPLCanvas.WindowTypes:
            self.windowComboBox.addItem(e.name, e)
        self.mpl_toolbar.addWidget(self.windowComboBox)
        self.windowComboBox.currentIndexChanged.connect(self._replot)

        vbox = QtWidgets.QVBoxLayout(self)
        vbox.addWidget(self.mpl_toolbar)
        vbox.addWidget(self.canvas)
        vbox.setContentsMargins(0, 0, 0, 0)
        vbox.setStretch(0, 1)
        vbox.setStretch(1, 1)

        self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
                           QtWidgets.QSizePolicy.Expanding)
        self.canvas.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
                                  QtWidgets.QSizePolicy.Expanding)
        self.updateGeometry()

        self.fig.tight_layout()

        self._lines = self.axes.plot([], [], [], [], animated=True)
        self._lines[0].set_alpha(0.25)
        self._ftlines = self.ft_axes.plot([], [], [], [], animated=True)
        self._ftlines[0].set_alpha(0.25)
        self.axes.legend(['Previous', 'Current'])
        self.ft_axes.legend(['Previous', 'Current'])
        self.axes.set_title('Data')
        self.ft_axes.set_title('Fourier transformed data')
        self._redraw()

        # Use a timer with a timeout of 0 to initiate redrawing of the canvas.
        # This ensures that the eventloop has run once more and prevents
        # artifacts.
        self._redrawTimer = QtCore.QTimer(self)
        self._redrawTimer.setSingleShot(True)
        self._redrawTimer.setInterval(100)
        self._redrawTimer.timeout.connect(self._redraw)

        # will be disconnected in drawDataSet() when live data is detected.
        self._redraw_id = self.canvas.mpl_connect('draw_event',
                                                  self._redraw_artists)

    def _redraw_artists(self, *args):
        if not self._isLiveData:
            self.axes.draw_artist(self._lines[0])
            self.ft_axes.draw_artist(self._ftlines[0])

        self.axes.draw_artist(self._lines[1])
        self.ft_axes.draw_artist(self._ftlines[1])

    def _redraw(self):
        self.fig.tight_layout()
        self.canvas.draw()
        self.backgrounds = [
            self.fig.canvas.copy_from_bbox(ax.bbox)
            for ax in (self.axes, self.ft_axes)
        ]
        self._redraw_artists()

    def showEvent(self, e):
        super().showEvent(e)
        self._redrawTimer.start()

    def resizeEvent(self, e):
        super().resizeEvent(e)
        self._redrawTimer.start()

    def get_ft_data(self, data):
        delta = np.mean(np.diff(data.axes[0]))
        winFn = self.windowFunctionMap[self.windowComboBox.currentData()]
        refUnit = 1 * data.data.units
        Y = np.fft.rfft(np.array(data.data / refUnit) * winFn(len(data.data)),
                        axis=0)
        freqs = np.fft.rfftfreq(len(data.axes[0]), delta)
        dBdata = 10 * np.log10(np.abs(Y))
        if not self.dataIsPower:
            dBdata *= 2
        data_slice = np.array(freqs) < 2.1
        return (freqs[data_slice], dBdata[data_slice])

    def _dataSetToLines(self, data, line, ftline):
        if data is None:
            line.set_data([], [])
            ftline.set_data([], [])
            return

        #data.data -= np.mean(data.data)
        line.set_data(data.axes[0], data.data)
        freqs, dBdata = self.get_ft_data(data)
        ftline.set_data(freqs, dBdata)

    def _autoscale(self, *, redraw=True):
        prev_xlim = self.axes.get_xlim()
        prev_ylim = self.axes.get_ylim()
        prev_ft_xlim = self.ft_axes.get_xlim()
        prev_ft_ylim = self.ft_axes.get_ylim()

        self.axes.relim()
        self.axes.autoscale()
        self.ft_axes.relim()
        self.ft_axes.autoscale()

        need_redraw = (prev_xlim != self.axes.get_xlim()
                       or prev_ylim != self.axes.get_ylim()
                       or prev_ft_xlim != self.ft_axes.get_xlim()
                       or prev_ft_ylim != self.ft_axes.get_ylim())

        if need_redraw and redraw:
            self._redraw()

        return need_redraw

    def _replot(self,
                redraw_axes=False,
                redraw_axes_labels=False,
                redraw_data_label=False):
        if not self._isLiveData:
            self._dataSetToLines(self.prevDataSet, self._lines[0],
                                 self._ftlines[0])
        self._dataSetToLines(self.dataSet, self._lines[1], self._ftlines[1])

        if self._axesLabels and redraw_axes_labels:
            self.axes.set_xlabel('{} [{:C~}]'.format(
                self._axesLabels[0], self.dataSet.axes[0].units))
            self.ft_axes.set_xlabel('1 / {} [1 / {:C~}]'.format(
                self._axesLabels[0], self.dataSet.axes[0].units))

        if self._dataLabel and redraw_data_label:
            self.axes.set_ylabel('{} [{:C~}]'.format(self._dataLabel,
                                                     self.dataSet.data.units))

            ftUnits = self.dataSet.data.units
            if not self.dataIsPower:
                ftUnits = ftUnits**2

            self.ft_axes.set_ylabel('Power [dB-({:C~})]'.format(ftUnits))

        axis_limits_changed = False
        if (self.autoscaleAction.isChecked()):
            axis_limits_changed = self._autoscale(redraw=False)

        # check whether a full redraw is necessary or if simply redrawing
        # the data lines is enough
        if (redraw_axes or redraw_axes_labels or redraw_data_label
                or axis_limits_changed):
            self._redraw()
        else:
            for bg in self.backgrounds:
                self.canvas.restore_region(bg)
            self._redraw_artists()
            self.canvas.blit(self.axes.bbox)
            self.canvas.blit(self.ft_axes.bbox)

    def drawDataSet(self, newDataSet, axes_labels, data_label):
        plotTime = time.perf_counter()

        looksLikeLiveData = plotTime - self._lastPlotTime < 1

        if looksLikeLiveData != self._isLiveData:
            if looksLikeLiveData:
                self.canvas.mpl_disconnect(self._redraw_id)
            else:
                self._redraw_id = self.canvas.mpl_connect(
                    'draw_event', self._redraw_artists)

        self._isLiveData = looksLikeLiveData

        # artificially limit the replot rate to 5 Hz
        if (plotTime - self._lastPlotTime < 0.2):
            return

        self._lastPlotTime = plotTime

        self.prevDataSet = self.dataSet
        self.dataSet = newDataSet

        redraw_axes = (self.prevDataSet is None
                       or len(self.prevDataSet.axes) != len(self.dataSet.axes))
        if not redraw_axes:
            for x, y in zip(self.prevDataSet.axes, self.dataSet.axes):
                if x.units != y.units:
                    redraw_axes = True
                    break

        redraw_axes_labels = (
            self._axesLabels != axes_labels
            or self.prevDataSet and self.dataSet
            and self.prevDataSet.axes[0].units != self.dataSet.axes[0].units)
        redraw_data_label = (
            self._dataLabel != data_label or self.prevDataSet and self.dataSet
            and self.prevDataSet.data.units != self.dataSet.data.units)

        self._axesLabels = axes_labels
        self._dataLabel = data_label

        self._replot(redraw_axes, redraw_axes_labels, redraw_data_label)
    def _perform(self):
        """Get atlas line positions for wavelength fitting"""
        self.logger.info("Finding isolated atlas lines")
        # get atlas wavelength range
        # get pixel values (no longer centered in the middle)
        specsz = len(self.context.arcs[self.config.instrument.REFBAR])
        xvals = np.arange(0, specsz)
        # min, max rows, trimming the ends
        minrow = 50
        maxrow = specsz - 50
        # wavelength range
        mnwvs = []
        mxwvs = []
        refbar_disp = 1.
        # Get wavelengths for each bar
        for b in range(self.config.instrument.NBARS):
            waves = np.polyval(self.action.args.twkcoeff[b], xvals)
            mnwvs.append(np.min(waves))
            mxwvs.append(np.max(waves))
            if b == self.config.instrument.REFBAR:
                refbar_disp = self.action.args.twkcoeff[b][-2]
        self.logger.info("Ref bar (%d) dispersion = %.3f Ang/px" %
                         (self.config.instrument.REFBAR, refbar_disp))
        # Get extrema (trim ends a bit)
        minwav = min(mnwvs) + 10.
        maxwav = max(mxwvs) - 10.
        wave_range = maxwav - minwav
        # Do we have a dichroic?
        if self.action.args.dich:
            if self.action.args.camera == 0:  # Blue
                maxwav = min([maxwav, 5620.])
            elif self.action.args.camera == 1:  # Red
                minwav = max([minwav, 5580.])
            else:
                self.logger.error("Camera keyword not defined!")
        dichroic_fraction = (maxwav - minwav) / wave_range
        # Get corresponding atlas range
        minrw = [
            i for i, v in enumerate(self.action.args.refwave) if v >= minwav
        ][0]
        maxrw = [
            i for i, v in enumerate(self.action.args.refwave) if v <= maxwav
        ][-1]
        self.logger.info("Min, Max wave (A): %.2f, %.2f" % (minwav, maxwav))
        if self.action.args.dich:
            self.logger.info("Dichroic fraction: %.3f" % dichroic_fraction)
        # store atlas ranges
        self.action.args.atminrow = minrw
        self.action.args.atmaxrow = maxrw
        self.action.args.atminwave = minwav
        self.action.args.atmaxwave = maxwav
        self.action.args.dichroic_fraction = dichroic_fraction
        # get atlas sub spectrum
        atspec = self.action.args.reflux[minrw:maxrw]
        atwave = self.action.args.refwave[minrw:maxrw]
        # get reference bar arc spectrum, pixel values, and prelim wavelengths
        subxvals = xvals[minrow:maxrow]
        subyvals = self.context.arcs[
            self.config.instrument.REFBAR][minrow:maxrow].copy()
        subwvals = np.polyval(
            self.action.args.twkcoeff[self.config.instrument.REFBAR], subxvals)
        # smooth subyvals
        win = boxcar(3)
        subyvals = sp.signal.convolve(subyvals, win, mode='same') / sum(win)
        # find good peaks in arc spectrum
        smooth_width = 4  # in pixels
        # peak width
        peak_width = int(self.action.args.atsig / abs(refbar_disp))
        if peak_width < 4:
            peak_width = 4
        # slope threshold
        slope_thresh = 0.7 * smooth_width / 2. / 100.
        # slope_thresh = 0.7 * smooth_width / 1000.   # more severe for arc
        # slope_thresh = 0.016 / peak_width
        # get amplitude threshold
        ampl_thresh = 0.
        self.logger.info("Using a peak_width of %d px, a slope_thresh of %.5f "
                         "a smooth_width of %d and an ampl_thresh of %.3f" %
                         (peak_width, slope_thresh, smooth_width, ampl_thresh))
        arc_cent, avwsg, arc_hgt = findpeaks(subwvals, subyvals, smooth_width,
                                             slope_thresh, ampl_thresh,
                                             peak_width)
        avwfwhm = avwsg * 2.354
        self.logger.info("Found %d lines with <sig> = %.3f (A),"
                         " <FWHM> = %.3f (A)" %
                         (len(arc_cent), avwsg, avwfwhm))
        # fitting window based on grating type
        if 'H' in self.action.args.grating or 'M' in self.action.args.grating:
            fwid = avwfwhm
        else:
            fwid = avwsg
        # clean near neighbors
        spec_cent = arc_cent
        spec_hgt = arc_hgt
        #
        # generate an atlas line list
        refws = []  # atlas line wavelength
        refas = []  # atlas line amplitude
        rej_fit_w = []  # fit rejected atlas line wavelength
        rej_fit_y = []  # fit rejected atlas line amplitude
        rej_par_w = []  # par rejected atlas line wavelength
        rej_par_a = []  # par rejected atlas line amplitude
        nrej = 0
        # look at each arc spectrum line
        for i, pk in enumerate(spec_cent):
            if pk <= minwav or pk >= maxwav:
                continue
            # get atlas pixel position corresponding to arc line
            try:
                line_x = [ii for ii, v in enumerate(atwave) if v >= pk][0]
                # get window around atlas line to fit
                minow, maxow, count = get_line_window(atspec, line_x)
            except IndexError:
                count = 0
                minow = None
                maxow = None
                self.logger.warning("line at edge: %d, %.2f, %.f2f" %
                                    (i, pk, max(atwave)))
            # is resulting window large enough for fitting?
            if count < 5 or not minow or not maxow:
                # keep track of fit rejected lines
                rej_fit_w.append(pk)
                rej_fit_y.append(spec_hgt[i])
                nrej += 1
                self.logger.info("Atlas window rejected for line %.3f" % pk)
                continue
            # get data to fit
            yvec = atspec[minow:maxow + 1]
            xvec = atwave[minow:maxow + 1]
            # attempt Gaussian fit
            try:
                fit, _ = curve_fit(gaus, xvec, yvec, p0=[spec_hgt[i], pk, 1.])
            except RuntimeError:
                # keep track of Gaussian fit rejected lines
                rej_fit_w.append(pk)
                rej_fit_y.append(spec_hgt[i])
                nrej += 1
                self.logger.info("Atlas Gaussian fit rejected for line %.3f" %
                                 pk)
                continue
            # get interpolation function of atlas line
            int_line = interpolate.interp1d(xvec,
                                            yvec,
                                            kind='cubic',
                                            bounds_error=False,
                                            fill_value='extrapolate')
            # use very dense pixel sampling
            x_dense = np.linspace(min(xvec), max(xvec), num=1000)
            # resample line with dense sampling
            y_dense = int_line(x_dense)
            # get peak amplitude and wavelength
            pki = y_dense.argmax()
            pkw = x_dense[pki]
            # calculate some diagnostic parameters for the line
            # how many atlas pixels have we moved?
            xoff = abs(pkw - fit[1]) / self.action.args.refdisp
            # what is the wavelength offset in Angstroms?
            woff = abs(pkw - pk)
            # what fraction of the canonical fit width is the line?
            wrat = abs(fit[2]) / fwid  # can be neg or pos
            # current criteria for these diagnostic parameters
            if woff > 5. or xoff > 1.5 or wrat > 1.1:
                # keep track of par rejected atlas lines
                rej_par_w.append(pkw)
                rej_par_a.append(y_dense[pki])
                nrej += 1
                self.logger.info(
                    "Atlas line parameters rejected for line %.3f" % pk)
                self.logger.info("woff = %.3f, xoff = %.2f, wrat = %.3f" %
                                 (woff, xoff, wrat))
                continue
            refws.append(pkw)
            refas.append(y_dense[pki])
        # eliminate faintest lines if we have a large number
        self.logger.info("number of remaining lines: %d" % len(refas))
        if len(refas) > 400:
            # sort on flux
            sf = np.argsort(refas)
            refws = np.asarray(refws)[sf]
            refas = np.asarray(refas)[sf]
            # remove faintest two-thirds
            hlim = int(len(refas) * 0.67)
            refws = refws[hlim:]
            refas = refas[hlim:]
            # sort back onto wavelength
            sw = np.argsort(refws)
            refws = refws[sw].tolist()
            refas = refas[sw].tolist()
        # check if line list was given on command line
        if self.config.instrument.LINELIST:
            with open(self.config.instrument.LINELIST) as llfn:
                atlines = llfn.readlines()
            refws = []
            refas = []
            for line in atlines:
                if '#' in line:
                    continue
                refws.append(float(line.split()[0]))
                refas.append(float(line.split()[1]))
            self.logger.info("Read %d lines from %s" %
                             (len(refws), self.config.instrument.LINELIST))
        else:
            self.logger.info("Using %d generated lines" % len(refws))
        # store wavelengths, fluxes
        self.action.args.at_wave = refws
        self.action.args.at_flux = refas
        # output filename stub
        atfnam = "arc_%05d_%s_%s_%s_atlines" % \
            (self.action.args.ccddata.header['FRAMENO'],
             self.action.args.illum, self.action.args.grating,
             self.action.args.ifuname)
        # output directory
        output_dir = os.path.join(self.config.instrument.cwd,
                                  self.config.instrument.output_directory)
        # write out final atlas line list
        atlines = np.array([refws, refas])
        atlines = atlines.T
        with open(os.path.join(output_dir, atfnam + '.txt'), 'w') as atlfn:
            np.savetxt(atlfn, atlines, fmt=['%12.3f', '%12.3f'])
        # plot final list of Atlas lines and show rejections
        norm_fac = np.nanmax(atspec)
        if self.config.instrument.plot_level >= 1:
            p = figure(title=self.action.args.plotlabel +
                       "ATLAS LINES Ngood = %d, Nrej = %d" %
                       (len(refws), nrej),
                       x_axis_label="Wavelength (A)",
                       y_axis_label="Normalized Flux",
                       plot_width=self.config.instrument.plot_width,
                       plot_height=self.config.instrument.plot_height)
            p.line(subwvals,
                   subyvals / np.nanmax(subyvals),
                   legend_label='RefArc',
                   color='lightgray')
            p.line(atwave,
                   atspec / norm_fac,
                   legend_label='Atlas',
                   color='blue')
            # Rejected: nearby neighbor
            # p.diamond(rej_neigh_w, rej_neigh_y / norm_fac,
            #          legend_label='NeighRej', color='cyan', size=8)
            # Rejected: fit failure
            p.diamond(rej_fit_w,
                      rej_fit_y / norm_fac,
                      legend_label='FitRej',
                      color='red',
                      size=8)
            # Rejected: line parameter outside range
            p.diamond(rej_par_w,
                      rej_par_a / norm_fac,
                      legend_label='ParRej',
                      color='orange',
                      size=8)
            p.diamond(refws,
                      refas / norm_fac,
                      legend_label='Kept',
                      color='green',
                      size=10)
            p.line([minwav, minwav], [-0.1, 1.1],
                   legend_label='WavLim',
                   color='brown')
            p.line([maxwav, maxwav], [-0.1, 1.1], color='brown')
            p.x_range = Range1d(min([min(subwvals), minwav - 10.]),
                                max(subwvals))
            p.y_range = Range1d(-0.04, 1.04)
            bokeh_plot(p, self.context.bokeh_session)
            if self.config.instrument.plot_level >= 2:
                input("Next? <cr>: ")
            else:
                time.sleep(self.config.instrument.plot_pause)
            save_plot(p, filename=atfnam + ".png")
        self.logger.info("Final atlas list has %d lines" % len(refws))

        log_string = GetAtlasLines.__module__
        self.action.args.ccddata.header['HISTORY'] = log_string
        self.logger.info(log_string)

        return self.action.args
Example #19
0
 def test_basic(self):
     assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1])
     assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1])
     assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1])
Example #20
0
def myexecute(cand_index, cand_DMs, cand_sigma, cand_dedisp_times, downfact,
              metadata, int_times, mask_zap_chans, mask_zap_chans_per_int,
              freqs_GHz, tot_time_samples, t_samp, chan_bw, npol, nchans,
              n_bytes, hdr_size, hotpotato, f, rank):
    print('RANK %d: Working on candidate at index %d' % (rank, cand_index))
    DM = cand_DMs[cand_index]  # DM (pc/cc) of single pulse candidate
    cand_time = cand_dedisp_times[cand_index]  # Candidate time (s)
    t_ex = calc_tDM(
        freqs_GHz[0], DM, freqs_GHz[-1]
    )  # Extra time of data to be loaded around the candidate time
    if DM < 15.0:
        t_ex = np.max([0.2, t_ex])
    # Index of start time of data chunk to be loaded.
    if hotpotato['t_before'] is not None:
        tstart = np.max(
            [0., np.floor(
                (cand_time - hotpotato['t_before']) / t_samp)]).astype(int)
    else:
        tstart = np.max([0, np.floor((cand_time - t_ex) / t_samp)]).astype(int)
    # Index of stop time of data chunk to be loaded.
    if hotpotato['t_after'] is not None:
        tstop = np.min([
            tot_time_samples,
            np.floor((cand_time + hotpotato['t_after']) / t_samp)
        ]).astype(int)
    else:
        tstop = np.min(
            [tot_time_samples,
             np.floor((cand_time + 2 * t_ex) / t_samp)]).astype(int)
    # 1D array of times (s)
    times = np.arange(tstart, tstop) * t_samp
    # Data chunk to load
    data = load_fil_data(f, tstart, tstop, npol, nchans, n_bytes, hdr_size,
                         hotpotato['pol'], f.tell())

    # Flip frequency axis of DS if channel bandwidth is negative.
    if (chan_bw < 0):
        print('RANK %d: Flipping frequency axis of DS' % (rank))
        data = np.flip(data, axis=0)
    # Clip bandpass edges.
    data = data[hotpotato['ind_band_low']:hotpotato['ind_band_high']]

    # Compute bandpass if needed.
    if hotpotato['bandpass_method'] == 'compute':
        hotpotato['median_bp'] = calc_median_bandpass(data)
    # Correct data for bandpass shape.
    print('RANK %d: Correcting data for bandpass shape' % (rank))
    if 0 in hotpotato['median_bp']:
        indices_zero_bp = np.where(hotpotato['median_bp'] == 0)[0]
        replace_value = np.median(
            hotpotato['median_bp'][np.where(hotpotato['median_bp'] != 0)[0]])
        hotpotato['median_bp'][indices_zero_bp] = replace_value
        data[indices_zero_bp] = replace_value
    data = correct_bandpass(data, hotpotato['median_bp'])

    # Apply rfifind mask on data.
    if hotpotato['apply_rfimask']:
        idx1 = np.where(int_times <= times[0])[0][-1]
        idx2 = np.where(int_times < times[-1])[0][-1] + 1
        cand_nint = idx2 - idx1
        cand_int_times = int_times[idx1:idx2]
        cand_mask_zap_chans_per_int = mask_zap_chans_per_int[idx1:idx2]
        # Boolean rfifind mask
        boolean_rfimask = np.zeros(data.shape, dtype=bool)
        for i in range(cand_nint):
            if i == cand_nint - 1:
                tstop_int = tstop
            else:
                tstop_int = np.min(np.where(times >= cand_int_times[i + 1])[0])
            tstart_int = np.min(np.where(times >= cand_int_times[i])[0])
            boolean_rfimask[cand_mask_zap_chans_per_int[i],
                            tstart_int:tstop_int] = True
        print('RANK %d: Applying RFI mask on data' % (rank))
        data = np.ma.MaskedArray(data, mask=boolean_rfimask)
        # Replaced masked entries with mean value.
        print('RANK %d: Replacing masked entries with mean values' % (rank))
        data = np.ma.filled(data, fill_value=np.nanmean(data))
        # Set up list of channels to mask in downsampled data.
        mask_zap_check = list(
            np.sort(mask_zap_chans) // hotpotato['kernel_size_freq_chans'])
        mask_chans = np.array([
            chan for chan in np.unique(mask_zap_check) if mask_zap_check.count(
                chan) == hotpotato['kernel_size_freq_chans']
        ])
    else:
        mask_chans = None

    # Remove zerodm signal.
    if hotpotato['remove_zerodm']:
        data = remove_additive_time_noise(data)[0]
        print('RANK %d: Zerodm removal completed.' % (rank))

    # Smooth and/or downsample the data.
    kernel_size_time_samples = hotpotato['downsamp_time'][np.where(
        np.array(hotpotato['low_dm_cats']) <= DM)[0][-1]]
    data, freqs_GHz_smoothed, times = smooth_master(
        data, hotpotato['smoothing_method'], hotpotato['convolution_method'],
        hotpotato['kernel_size_freq_chans'], kernel_size_time_samples,
        freqs_GHz, times)
    if hotpotato['smoothing_method'] != 'Blockavg2D':
        data, freqs_GHz_smoothed, times = smooth_master(
            data, 'Blockavg2D', hotpotato['convolution_method'],
            hotpotato['kernel_size_freq_chans'], kernel_size_time_samples,
            freqs_GHz_smoothed, times)

    # Remove residual spectral trend.
    print('RANK %d: Residual spectral trend subtracted.' % (rank))
    data = data - np.median(data, axis=1)[:, None]

    # Remove any residual temporal trend.
    if hotpotato['remove_zerodm']:
        data = data - np.median(data, axis=0)[None, :]
        if mask_chans is not None:
            data[mask_chans] = 0.0
        print('RANK %d: Zerodm removal completed.' % (rank))

    # Clip off masked channels at edges of the frequency band.
    if mask_chans is not None:
        # Lowest channel not to be masked.
        low_ch_index = 0
        while low_ch_index + 1 in mask_chans:
            low_ch_index += 1
        # Highest channel not to be masked.
        high_ch_index = len(freqs_GHz) - 1
        while high_ch_index in mask_chans:
            high_ch_index -= 1
        freqs_GHz = freqs_GHz[low_ch_index:high_ch_index + 1]
        data = data[low_ch_index:high_ch_index + 1]
        # Modify channel mask to reflect properties of updated data range.
        mask_chans = np.delete(mask_chans, np.where(mask_chans < low_ch_index))
        mask_chans = np.delete(mask_chans,
                               np.where(mask_chans > high_ch_index))
        mask_chans = np.array(mask_chans - low_ch_index, dtype=int)

    # Dedisperse the data at DM of candidate detection.
    dedisp_ds, dedisp_times, dedisp_timeseries = dedisperse_ds(
        data, freqs_GHz_smoothed, DM, freqs_GHz_smoothed[-1],
        freqs_GHz_smoothed[0], times[1] - times[0], times[0])

    # Smooth dedispersed dynamic spectrum and dedispersed time series using a boxcar matched-filter of size "downfact" samples.
    if hotpotato['do_smooth_dedisp']:
        filter = boxcar(
            int(downfact
                ))  # A uniform (boxcar) filter with a width equal to downfact
        filter = filter / np.sum(filter)  # Normalize filter to unit integral.
        print(
            'RANK %d: Convolving dedispersed dynamic spectrum along time with a Boxcar matched filter of width %d bins'
            % (rank, downfact))
        dedisp_ds = convolve1d(
            dedisp_ds, filter,
            axis=-1)  # Smoothed dedispersed dynamic spectrum
        dedisp_timeseries = np.sum(dedisp_ds, axis=0)

    # Candidate verification plot
    spcand_verification_plot(cand_index,
                             cand_dedisp_times,
                             cand_DMs,
                             cand_sigma,
                             metadata,
                             data,
                             times,
                             freqs_GHz_smoothed,
                             dedisp_ds,
                             dedisp_timeseries,
                             dedisp_times,
                             SAVE_DIR=hotpotato['OUTPUT_DIR'],
                             output_formats=hotpotato['output_formats'],
                             show_plot=hotpotato['show_plot'],
                             low_DM_cand=hotpotato['low_DM_cand'],
                             high_DM_cand=hotpotato['high_DM_cand'],
                             mask_chans=mask_chans,
                             vmin=np.mean(data) - 2 * np.std(data),
                             vmax=np.mean(data) + 5 * np.std(data),
                             cmap=hotpotato['cmap'],
                             do_smooth_dedisp=hotpotato['do_smooth_dedisp'],
                             filter_width=int(downfact))

    # Write smoothed dynamic spectrum to disk as .npz file.
    if hotpotato['write_npz']:
        npz_filename = hotpotato['OUTPUT_DIR'] + '/' + hotpotato[
            'basename'] + '_t%.2f_DM%.1f' % (cand_time, DM)
        write_npz_data(data, freqs_GHz_smoothed, times, mask_chans,
                       npz_filename)
Example #21
0
def ceps_envelope(signal_inp, fft_size, window, fs, f0, num_coeff, choice,
                  choice_inp):
    """
	Returns the Spectral Envelope based on the Windowed Cepstral 'Liftering' method

	Lifters the cepstrum and computes it's FFT to find the spectral envelope.

	Parameters
	----------
	signal_inp : np.array
	    numpy array containing the audio signal
     	look at choice_inp below
	fft_size : integer(even)
		FFT Size
	window : string
		Window function
	fs : integer
		Sampling rate
	f0 : integer
		Fundamental Frequency
	num_coeff : integer
		Number of cepstral coefficients to consider(0 <= num_coeff <= fft_size)
	choice : 0 or 1
		if 0, will use paper defined number of cepstral coefficients
		if 1, will use user specified number of cepstral coefficients
	choice_inp : 0 or 1
		if 0, signal_inp should be the time domain signal
		if 1, signal_inp should be the frequency domain signal(fft of the time domain signal)

	Returns
	-------
	spectral_envelope : np.array
	    Returns the spectral envelope

	References
    ----------
    .. [1] Cross Synthesis Using Cepstral Smoothing or Linear Prediction for Spectral Envelopes, J.O. Smith
           https://ccrma.stanford.edu/~jos/SpecEnv/LPC_Envelope_Example_Speech.html

	"""

    if (choice_inp == 0):
        cepstral_coeffs = real_cepstrum(signal_inp, fft_size)
    else:
        log_sig_fft_mag = np.log10(np.abs(signal_inp + 10**(-10)))
        cepstral_coeffs = np.real(np.fft.ifft(log_sig_fft_mag, fft_size))
    # Number of cepstral coefficients to keep(as defined in the True Envelope paper)
    num_paper = (int)(fs / (2 * f0))
    if (choice == 0):
        R = num_paper
    else:
        R = num_coeff

    # Generate the window of appropriate size(same as the number of cepstral coefficients to keep)
    if (window == 'hann'):
        win = windows.boxcar(2 * R)

    win_fin = np.zeros(fft_size)
    win_fin[0:R] = win[R:]
    win_fin[fft_size - R:] = win[:R]

    # Lifter the cepstrum
    liftered_ceps = cepstral_coeffs * win_fin
    # liftered_ceps[0] = 0

    # Finding the envelope by taking the FFT of the liftered signal
    spec_env = np.real(np.fft.fft(liftered_ceps, fft_size))

    # zero meaning
    # spec_env = spec_env - np.mean(spec_env)

    return spec_env, win_fin, liftered_ceps
def apply_boxcar(series, T, mean_num=50):
    b = boxcar(T) / T
    series = mean_padding(series, T, mean_num)
    temp = convolve(series, b, mode='same', method='direct')

    return temp[T:-T]