Example #1
0
    def calc_background_rms(self, data, axis=None, masked=False):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis, masked=False)
        else:
            # convert to ndarray with masked values as np.nan
            if isinstance(data, np.ma.MaskedArray):
                data = data.filled(np.nan)

        # ignore RuntimeWarning where axis is all NaN
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)

            # this is need to fix a bug in astropy 4.3.1 where
            # biweight_scale can drop the unit in some cases
            if isinstance(data, u.Quantity):
                result = data.__array_wrap__(
                    biweight_scale(data,
                                   c=self.c,
                                   M=self.M,
                                   axis=axis,
                                   ignore_nan=True))
            else:
                result = biweight_scale(data,
                                        c=self.c,
                                        M=self.M,
                                        axis=axis,
                                        ignore_nan=True)

        if masked and isinstance(result, np.ndarray):
            result = np.ma.masked_where(np.isnan(result), result)

        return result
Example #2
0
def plot_hist_errors(pasta, y_Z, y_age, y_Zmin=-3, y_Zmax=3, y_agemin=-3, y_agemax=3,
                     label_Z='label_Z', label_age='label_age', file='test'):
    location = [biweight_location(y_Z), biweight_location(y_age)]
    scale = [biweight_scale(y_Z), biweight_scale(y_age)]

    fig = plt.figure(constrained_layout=False)
    gs = fig.add_gridspec(nrows=1, ncols=2)  # , width_ratios=[2, 1], height_ratios=[1, 2])
    a = fig.add_subplot(gs[0, 0])
    n, bins_Z, patches = plt.hist(y_Z, orientation='vertical', color='khaki')
    plt.vlines(location[0] + scale[0], 0, np.max(n), label='scale=' + str(scale[0]))
    plt.vlines(location[0] - scale[0], 0, np.max(n))
    plt.vlines(location[0], 0, np.max(n), color='blue', label='location=' + str(location[0]))
    plt.xlabel(label_Z)
    plt.xlim(y_Zmin, y_Zmax)
    plt.legend()
    a = fig.add_subplot(gs[0, 1])
    n, bins_age, patches = plt.hist(y_age, orientation='vertical', color='khaki')
    plt.vlines(location[1] + scale[1], 0, np.max(n), label='scale=' + str(scale[1]))
    plt.vlines(location[1] - scale[1], 0, np.max(n))
    plt.vlines(location[1], 0, np.max(n), color='blue', label='location=' + str(location[1]))
    plt.legend()
    plt.xlabel(label_age)
    plt.xlim(y_agemin, y_agemax)
    '''
   
    '''
    # plt.legend()
    plt.tight_layout()
    plt.savefig('results/' + pasta + '/' + file + '.png', dpi=300, bbox_inches='tight')
    plt.close()
Example #3
0
def calculate_reference_statistics(times, mags):
    """Calculate reference statistics for a light curve

    We do this using biweight estimators for both the location and scale.  One challenge
    with this is that if there are many repeated observations, which is the case for
    some of the ZTF deep fields, they can overwhelm all of the other observations. To
    mitigate this, we only use the first observation from each night when calculating
    the statistics.

    Parameters
    ----------
    times : ndarray
        Time of each observation.
    mags : ndarray
        Magnitude of each observation.

    Returns
    -------
    reference_magnitude : float
        Reference magnitude
    reference_scale : float
        Reference scale
    """
    # Consider at most one observation from each night.
    _, indices = np.unique(times.astype(int), return_index=True)
    use_mags = mags[indices]

    # Use a robust estimator of the reference time and standard deviation.
    reference_magnitude = biweight_location(use_mags)
    reference_scale = biweight_scale(use_mags)

    return reference_magnitude, reference_scale
Example #4
0
def fast_find_noise(flux, **sigma_clip_kwargs):
    """ sigma clip then biweight scale """
    flux = replace_nans(flux, msg="fast_find_noise")
    clipped = sigma_clip(flux, **sigma_clip_kwargs)
    print("fast_find_noise: sigma-clipped {}/{}".format(
        np.sum(~clipped.mask), len(clipped)))
    noise = biweight_scale(clipped[~clipped.mask])
    return noise
def save_stars_in_shot(shot_idx):
    shot = shotlist[shot_idx]  # erin_stars["shotid"][idx]
    stars_shot = erin_stars[erin_stars["shotid"] == shot]
    table = load_shot(shot)
    table["spec_fullsky_sub"][table["spec_fullsky_sub"] == 0] = np.nan
    table["calfibe"][table["spec_fullsky_sub"] == 0] = np.nan
    weights = np.ones(table["spec_fullsky_sub"].shape)
    weights[~np.isfinite(table["spec_fullsky_sub"])] = np.nan
    #print("opened shot ", shot)
    for star_idx in range(len(stars_shot)):
        star_ID = stars_shot["ID"][star_idx]
        ff = glob.glob(
            "/data/05865/maja_n/Jupyter/use_stars/star_{}_{}.tab".format(
                shot, star_ID))
        #if len(ff) > 0:
        #    continue
        try:
            coords = SkyCoord(stars_shot["ra"][star_idx] * u.deg,
                              stars_shot["dec"][star_idx] * u.deg)
            ras, decs = table["ra"], table["dec"]
            n_here = ((ras - coords.ra.value) *
                      np.cos(coords.dec.value * np.pi / 180))**2 + (
                          decs - coords.dec.value)**2 < (13. / 3600)**2
            if len(n_here[n_here]) < 5:
                continue
            else:
                #mids = np.nanmean(table["calfib"][n_here][:,wlhere], axis=1)
                #stds = np.nanstd(table["calfib"][n_here][:,wlhere], axis=1)/np.sqrt(len(wlhere))
                mids = biweight_location_weights_karl(
                    table["spec_fullsky_sub"][n_here][:, wlhere],
                    weights=weights[n_here][:, wlhere],
                    axis=1)

                stds = biweight_scale(
                    table["spec_fullsky_sub"][n_here][:, wlhere],
                    axis=1,
                    ignore_nan=True) / np.sqrt((len(wlhere) - 1))
                tab = Table({
                    "ra":
                    ras[n_here],
                    "dec":
                    decs[n_here],
                    "flux":
                    mids,
                    "std":
                    stds,
                    "star_ra": [coords.ra.value for j in range(len(mids))],
                    "star_dec": [coords.dec.value for j in range(len(mids))]
                })
                ascii.write(
                    tab,
                    "/data/05865/maja_n/Jupyter/ff2.1_stars/star_{}_{}.tab".
                    format(shot, star_ID),
                    comment=True)
                #print("Wrote to new_startabs/star_{}_{}.tab".format(shot, star_ID))
        except:
            pass
    return 1
Example #6
0
def find_offsets(star, rpat, ZrangeA, ZrangeBC, starerrs=None):
    """ Find dlogepsA, dlogepsBC """
    output = []

    def get_solar_value(Z):
        if Z in rpat: return rpat[Z]
        else: return np.nan

    for Zrange in [ZrangeA, ZrangeBC]:
        _star = star[np.array([
            np.logical_and(Z >= Zrange[0], Z <= Zrange[1]) for Z in star.index
        ])]
        alldlogeps = np.array(
            [logeps - rpat[Z] for Z, logeps in _star.items()])
        if np.all(np.isnan(alldlogeps)):
            output.append(np.nan)
            output.append(np.nan)
        else:
            mind, maxd = round(np.nanmin(alldlogeps), 2) - 0.1, round(
                np.nanmax(alldlogeps), 2) + 0.1
            searchd = np.arange(mind, maxd + 0.1,
                                0.01)  # brute force minimization
            if starerrs is None:
                absdev = np.nansum(np.abs(alldlogeps[:, np.newaxis] -
                                          searchd[np.newaxis, :]),
                                   axis=0)
            else:
                allerrs = np.array(
                    [starerrs[Z] for Z, logeps in _star.items()])
                absdev = np.nansum(
                    np.abs(alldlogeps[:, np.newaxis] - searchd[np.newaxis, :])
                    / allerrs[:, np.newaxis],
                    axis=0)
            dlogeps = searchd[np.argmin(absdev)]
            output.append(dlogeps)
            e_dlogeps = alldlogeps - dlogeps
            e_dlogeps = biweight_scale(e_dlogeps[np.isfinite(e_dlogeps)])
            output.append(e_dlogeps)
    dlogepsA, e_dlogepsA, dlogepsBC, e_dlogepsBC = output
    return dlogepsA, dlogepsBC, e_dlogepsA, e_dlogepsBC, np.sqrt(
        e_dlogepsA**2 + e_dlogepsBC**2)
Example #7
0
    def calc_background_rms(self, data, axis=None, masked=False):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis, masked=False)
        else:
            # convert to ndarray with masked values as np.nan
            if isinstance(data, np.ma.MaskedArray):
                data = data.filled(np.nan)

        # ignore RuntimeWarning where axis is all NaN
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            result = biweight_scale(data,
                                    c=self.c,
                                    M=self.M,
                                    axis=axis,
                                    ignore_nan=True)

        if masked and isinstance(result, np.ndarray):
            result = np.ma.masked_where(np.isnan(result), result)

        return result
Example #8
0
def robustassess(n, d, k=2.24):  #raw frequency, doc length and threshold
    v = np.divide(n, d)

    rawcount = np.sum(n)

    mu = biweight_location(v)
    s = biweight_scale(v)
    mu2s = mu + k * s
    vcliph2s = np.minimum(v, mu2s)
    #Winsorising

    docsclipped = np.sum(np.greater(v, vcliph2s))  # number of docts

    adjustedcount = int(np.sum(np.rint(vcliph2s * d)))  # rounding to integer

    proportionv = n / rawcount  # proportion of word frequency per document
    proportiond = d / N  # document size proportion
    kld = np.sum(
        np.multiply(proportionv, np.log2(np.divide(proportionv, proportiond))))

    return [rawcount, adjustedcount, docsclipped, len(n), '%.3f' % kld]
Example #9
0
    def get_M(self, sigma_clip=5.0):
        '''
        Evalute the M parameter, which was defined in Cody et al. (2014), ApJ
        This task use the self.data_plot data instead of the self.data data.
        Becareful.

        sigma_clip     [float] : clipping data above and below sigma_clip times 1-sigma,
                                 before analyzing M. (sigma_clip=5.0 in Cody+14)
        '''

        sigma = biweight_scale(self.data_plot)
        data = self.data_plot[(self.data_plot >= (-1.0 * sigma_clip * sigma))
                              & (self.data_plot <= sigma_clip * sigma)]

        sigma_d = np.sqrt(np.mean(data**2))
        d_med = np.median(data)
        num_data = len(data)
        d_10per_upp = np.sort(data)[(num_data -
                                     int(round(num_data * 0.1))):num_data]
        d_10per_low = np.sort(data)[0:int(round(num_data * 0.1))]
        mean_d_10per = (np.mean(d_10per_upp) + np.mean(d_10per_low)) / 2.0
        self.M = (mean_d_10per - d_med) / sigma_d
Example #10
0
def get_ffss(shot, asymcut=True):
    table = load_shot(shot)

    xrt = get_xrt_time(shot)

    ff = glob.glob(
        "/work/05865/maja_n/stampede2/ffskysub/{}/exp0?/multi_???_???_???_??.fits"
        .format(shot))
    multis, fibers, exps = [], [], []
    bads = get_badamps(shot)
    for counter, fin in enumerate(ff):
        multi = fin.split("/")[-1][:-5]
        exps.append(fin.split("/")[-2])
        multis.append(multi)
        if multi[10:13] + multi[18:20] in bads:
            fibers.append(np.zeros((112, 1036)))
        else:
            fibers.append(fits.getdata(fin))

    exps = np.array(exps)
    multis = np.array(multis)
    fibers = np.array(fibers)
    fibers[fibers == 0] = np.nan

    # getting the number of continuum fibers to exclude
    #meanmean = np.nanmedian(np.concatenate(fibers), axis=1)
    #sorted_mean = np.argsort(meanmean)
    #finites = meanmean[np.isfinite(meanmean)].size

    wlhere_l = (def_wave > 3800.) & (def_wave <= 4500.)
    wlhere_u = (def_wave > 4500.) & (def_wave <= 5200.)

    meanmean_l = np.nanmedian(np.concatenate(fibers)[:, wlhere_l], axis=1)
    meanmean_u = np.nanmedian(np.concatenate(fibers)[:, wlhere_u], axis=1)

    #flexes = ascii.read('flexnum-biloc.dat')
    flexes_l = ascii.read(
        '/work/05865/maja_n/wrangler/im2d/flexnum-biloc-lower.dat')
    flexes_u = ascii.read(
        '/work/05865/maja_n/wrangler/im2d/flexnum-biloc-upper.dat')
    if shot in flexes_l['shot']:
        print('found shot in flexnum-biloc-lower.dat')
        #flex = flexes['flexnum'][flexes['shot']==shot]
        #if not type(flex)==int:
        #	flex = flex[0]
        maxim_l = flexes_l["maxim"][flexes_l["shot"] == shot]
        if not type(maxim_l) == int:
            maxim_l = maxim_l[0]
    if shot in flexes_u['shot']:
        print('found shot in flexnum-biloc-upper.dat')
        #flex = flexes['flexnum'][flexes['shot']==shot]
        #if not type(flex)==int:
        #	flex = flex[0]
        maxim_u = flexes_u["maxim"][flexes_u["shot"] == shot]
        if not type(maxim_u) == int:
            maxim_u = maxim_u[0]
    else:
        print('Error: did not find flexnum.')
        if False:  #### CHANGE THIS!!!
            return 0, 0, 0, 0, 0, 0, 0, 0, 0
            print('computing flex.')
            # include flexible parameter, such that median(medians) = 0
            fibers_con = np.concatenate(fibers)
            for flex in range(int(finites * 0.85), int(finites), 25):
                medmed = np.nanmedian(
                    np.nanmedian(fibers_con[sorted_mean[:flex]], axis=0))
                if medmed >= 0:
                    tmp = open('flexnum.dat', 'a')
                    tmp.write(str(shot) + '   ' + str(flex) + '\n')
                    tmp.close()
                    #print flex, medmed
                    print('percentage: ', float(flex) / finites)
                    break

    #maxim = meanmean[sorted_mean[flex]]
    #ftmp = np.concatenate(fibers)[abs(meanmean)<=maxim]
    #ftmp2 = np.concatenate(fibers)[(meanmean>-6.)&(meanmean<maxim)]
    #medmed = biweight_location(ftmp[np.isfinite(ftmp)])
    #medmed2 = biweight_location(ftmp2[np.isfinite(ftmp2)])
    #print("medmed: ", medmed)
    #print("medmed2: ", medmed2)
    #print("\n")

    #tmp = open("maxim.dat","a")
    #tmp.write(str(shot)+"	"+str(maxim)+"	"+str(medmed)+"\n")
    #tmp.close()

    #print("\n{}\n".format(maxim))

    if not asymcut:
        print("cutting symmetrically from -5 to 5.")
        maxim_l, maxim_u = 5., 5.
        min_l, min_u = -1 * maxim_l, -1 * maxim_u
    else:
        min_l, min_u = -6., -6.

    flag1 = (meanmean_l <= maxim_l) & (meanmean_l > min_l)
    flag2 = (meanmean_u <= maxim_u) & (meanmean_u > min_u)
    flag = flag1 * flag2

    ftmp = np.concatenate(fibers)[flag]
    print("\nbiloc remaining: {}\n".format(
        biweight_location(ftmp[np.isfinite(ftmp)])))

    flag = np.array(np.split(flag, len(ff)))

    amps = np.array([x[18:20] for x in multis])
    ifus = np.array([x[10:13] for x in multis])

    multiname, ffss, ras, decs = [], [], [], []
    throughputs, skysubs, errors, fibnums = [], [], [], []
    fluxcal = []

    for i in range(len(multis)):
        amp = amps[i]
        ifu = ifus[i]
        idx = (table["multiframe"] == multis[i]) & (table["expnum"] == int(
            exps[i][-1]))
        if len(idx[idx]) == 0:
            continue

        ifux, ifuy = table['ifux'][idx], table['ifuy'][idx]
        noedge = (abs(ifux) <= 22)  # change this !!! to 23
        flag[i] *= noedge
        noedge = (abs(ifuy) <= 22)  # change this !!! to 23
        flag[i] *= noedge

        fibers[i][~flag[i]] = 0.

        #print(f'fibers {i} == 0 : {fibers[i][fibers[i]==0].size/fibers[i].size}')
        f2f = np.array([
            np.interp(def_wave, x, y) for x, y in zip(
                table["wavelength"][idx], table["fiber_to_fiber"][idx])
        ])
        throughput = table["Throughput"][idx]

        A2A = table["Amp2Amp"][idx]
        a2a = np.ones(A2A.shape)
        a2a[A2A <= 0] = 0

        ra, dec = table["ra"][idx], table["dec"][idx]

        skysub = []
        for x, y in zip(table["wavelength"][idx],
                        table["sky_subtracted"][idx]):
            try:
                skysub.append(
                    np.interp(def_wave,
                              x[y != 0],
                              y[y != 0],
                              left=0.0,
                              right=0.0))
            except ValueError:
                skysub.append(np.zeros(def_wave.shape))
        skysub = np.array(skysub)
        skysub[~flag[i]] = 0.

        gain = table["calfib"][idx] / skysub
        gain[~np.isfinite(gain)] = 0.

        biscale = biweight_scale(gain[np.isfinite(gain)])
        biloc = biweight_location(gain[np.isfinite(gain)])
        these = abs(gain - biloc) > 6 * biscale
        gain[these] = 0.
        fluxcal.append(fibers[i] * gain / (f2f * xrt[(ifu, amp)](def_wave)) *
                       a2a)

        skysub[~flag[i]] = 0.0
        skysubs.append(skysub / (f2f * xrt[(ifu, amp)](def_wave)) * a2a)

        error = np.array([
            np.interp(def_wave, x, y, left=0.0, right=0.0)
            for x, y in zip(table["wavelength"][idx], table["error1Dfib"][idx])
        ])
        errors.append(error)

        fibnums.append(table["fibidx"][idx])

        ffss.append(fibers[i] / (f2f * xrt[(ifu, amp)](def_wave)) * a2a)
        ras.append(ra)
        decs.append(dec)
        throughputs.append(throughput)

    amps = [[x for i in range(112)] for x in amps]
    ffss = np.array(ffss)
    return np.array(ffss), np.array(ras), np.array(decs), np.array(
        throughputs), np.array(skysubs), np.array(errors), np.array(
            fibnums), np.array(amps), np.array(multis), np.array(fluxcal)
Example #11
0
def get_ffss_noflag(shot):
    table = load_shot(shot)

    xrt = get_xrt_time(shot)

    ff = glob.glob(
        "/work/05865/maja_n/stampede2/ffskysub/{}/exp0?/multi_???_???_???_??.fits"
        .format(shot))
    if len(ff) == 0:
        print("nothing found in {}".format(
            "/work/05865/maja_n/stampede2/ffskysub/{}/exp0?/multi_???_???_???_??.fits"
            .format(shot)))
    multis, fibers, exps = [], [], []
    bads = get_badamps(shot)
    for counter, fin in enumerate(ff):
        multi = fin.split("/")[-1][:-5]
        exps.append(fin.split("/")[-2])
        multis.append(multi)
        if multi[10:13] + multi[18:20] in bads:
            fibers.append(np.zeros((112, 1036)))
        else:
            fibers.append(fits.getdata(fin))

    exps = np.array(exps)
    multis = np.array(multis)
    fibers = np.array(fibers)
    fibers[fibers == 0] = np.nan

    # getting the number of continuum fibers to exclude
    meanmean = np.nanmedian(np.concatenate(fibers), axis=1)
    sorted_mean = np.argsort(meanmean)
    finites = meanmean[np.isfinite(meanmean)].size

    flexes = ascii.read('/work/05865/maja_n/wrangler/im2d/flexnum.dat')
    if shot in flexes['shot']:
        print('found shot in flexnum.dat')
        flex = flexes['flexnum'][flexes['shot'] == shot]
        if not type(flex) == int:
            flex = flex[0]
    else:
        print('Error: did not find flexnum.')
        return 0, 0, 0, 0, 0, 0, 0, 0, 0
        print('computing flex.')
        # include flexible parameter, such that median(medians) = 0
        fibers_con = np.concatenate(fibers)
        for flex in range(int(finites * 0.85), int(finites), 25):
            medmed = np.nanmedian(
                np.nanmedian(fibers_con[sorted_mean[:flex]], axis=0))
            if medmed >= 0:
                tmp = open('flexnum.dat', 'a')
                tmp.write(str(shot) + '   ' + str(flex) + '\n')
                tmp.close()
                #print flex, medmed
                print('percentage: ', float(flex) / finites)
                break

    maxim = meanmean[sorted_mean[flex]]
    #medmed = np.nanmedian(np.nanmedian(np.concatenate(fibers)[sorted_mean[:flex]], axis=0))
    #tmp = open("maxim.dat","a")
    #tmp.write(str(shot)+"	"+str(maxim)+"	"+str(medmed)+"\n")
    #tmp.close()

    #print("\n{}\n".format(maxim))
    flag = meanmean <= 10000.  #maxim

    flag = np.array(np.split(flag, len(ff)))

    amps = np.array([x[18:20] for x in multis])
    ifus = np.array([x[10:13] for x in multis])

    multiname, ffss, ras, decs = [], [], [], []
    throughputs, skysubs, errors, fibnums = [], [], [], []
    fluxcal = []

    for i in range(len(multis)):
        amp = amps[i]
        ifu = ifus[i]
        idx = (table["multiframe"] == multis[i]) & (table["expnum"] == int(
            exps[i][-1]))
        if len(idx[idx]) == 0:
            continue

        ifux, ifuy = table['ifux'][idx], table['ifuy'][idx]
        noedge = (abs(ifux) <= 23)  # change this !!!
        flag[i] *= noedge
        noedge = (abs(ifuy) <= 23)  # change this !!!
        flag[i] *= noedge

        fibers[i][~flag[i]] = 0.

        #print(f'fibers {i} == 0 : {fibers[i][fibers[i]==0].size/fibers[i].size}')
        f2f = np.array([
            np.interp(def_wave, x, y) for x, y in zip(
                table["wavelength"][idx], table["fiber_to_fiber"][idx])
        ])
        throughput = table["Throughput"][idx]

        A2A = table["Amp2Amp"][idx]
        a2a = np.ones(A2A.shape)
        a2a[A2A <= 0] = 0

        ra, dec = table["ra"][idx], table["dec"][idx]

        skysub = []
        for x, y in zip(table["wavelength"][idx],
                        table["sky_subtracted"][idx]):
            try:
                skysub.append(
                    np.interp(def_wave,
                              x[y != 0],
                              y[y != 0],
                              left=0.0,
                              right=0.0))
            except ValueError:
                skysub.append(np.zeros(def_wave.shape))
        skysub = np.array(skysub)
        skysub[~flag[i]] = 0.0
        skysubs.append(skysub / (f2f * xrt[(ifu, amp)](def_wave)) * a2a)

        gain = table["calfib"][idx] / skysub
        gain[~np.isfinite(gain)] = 0.

        biscale = biweight_scale(gain[np.isfinite(gain)])
        biloc = biweight_location(gain[np.isfinite(gain)])
        these = abs(gain - biloc) > 6 * biscale
        gain[these] = 0.
        fluxcal.append(fibers[i] * gain / (f2f * xrt[(ifu, amp)](def_wave)) *
                       a2a)

        error = np.array([
            np.interp(def_wave, x, y, left=0.0, right=0.0)
            for x, y in zip(table["wavelength"][idx], table["error1Dfib"][idx])
        ])
        errors.append(error)

        fibnums.append(table["fibidx"][idx])

        ffss.append(fibers[i] / (f2f * xrt[(ifu, amp)](def_wave)) * a2a)
        ras.append(ra)
        decs.append(dec)
        throughputs.append(throughput)

    amps = [[x for i in range(112)] for x in amps]
    ffss = np.array(ffss)
    return np.array(ffss), np.array(ras), np.array(decs), np.array(
        throughputs), np.array(skysubs), np.array(errors), np.array(
            fibnums), np.array(amps), np.array(multis), np.array(fluxcal)
    deccos = np.cos(middec * np.pi / 180.)
    return (((ras - midra) * deccos)**2 + (decs - middec)**2) * (3600)**2


shot_tab = load_shot(shotid)
ffskysub = shot_tab["spec_fullsky_sub"]
ffskysub[ffskysub == 0] = np.nan

errors = shot_tab["calfibe"]
errors[~np.isfinite(ffskysub)] = np.nan

weights = errors**(-2)
print("weights: ", np.nanmean(weights), np.nanmin(weights), np.nanmax(weights))
weights_mean = biweight_location(weights, ignore_nan=True)
weights_std = biweight_scale(weights, ignore_nan=True)
print(weights_mean, weights_mean + 4 * weights_std)
weights[weights > (weights_mean + 4 * weights_std)] = np.nan
errors[~np.isfinite(ffskysub)] = np.nan
ffskysub[~np.isfinite(errors)] = np.nan

def_wave = np.arange(3470, 5542, 2)

karl_stars = ascii.read(basedir + "lists/KarlsCalStars.tab")
stars = karl_stars[karl_stars["shotid"] == shotid]

psf_shape = ascii.read(basedir + "intensity-mapping/PSF/PSF.tab")
#psf_shape = psf_shape[np.isfinite(psf_shape["psf_iter"])]
psf_shape["psf_iter"][~np.isfinite(psf_shape["psf_iter"])] = 0

Example #13
0
def imfunc(idx):
    st = time.time()
    # get ffss, weights, etc
    shot = shotlist[idx]

    try:
        xrt = get_xrt_time(shot)
        ffss, ras, decs, thru, skysub, error, fibidx, amp, multis, fluxcal = get_ffss(
            shot, asymcut=False)
    except Exception as e:
        print(e)
        print("In shot {}".format(shot))
        return 0
    if type(ffss) == int:
        return 0
    ffss = np.concatenate(ffss)
    ras, decs = np.concatenate(ras), np.concatenate(decs)
    fibidx = np.concatenate(fibidx)
    skysub = np.concatenate(skysub)
    error = np.concatenate(error)
    fluxcal = np.concatenate(fluxcal)
    amps = np.concatenate(amp)
    print("amps shape: ", amps.shape)
    ifuslots = np.concatenate([[x[10:13] for j in range(112)] for x in multis])
    print("ifuslots shape: ", ifuslots.shape)

    line = 3910
    here1 = np.where((def_wave > line - 7) & (def_wave < line + 7))[0]
    line = 4359
    here2 = np.where((def_wave > line - 7) & (def_wave < line + 7))[0]
    line = 5461
    here3 = np.where((def_wave > line - 7) & (def_wave < line + 7))[0]
    #ffss[:,here1] = 0.
    #ffss[:,here2] = 0.
    #ffss[:,here3] = 0.

    #ffss[:,:20] = 0.
    #ffss[:,-20:] = 0.

    ffss[ffss == 0] = np.nan
    fluxcal[~np.isfinite(ffss)] = np.nan
    skysub[~np.isfinite(ffss)] = np.nan
    print('Finite ffss: ', ffss[np.isfinite(ffss)].size / ffss.size)
    weights = error**(-2.)
    weights[~np.isfinite(weights)] = 0.
    weights[~np.isfinite(ffss)] = 0.
    weights[weights > 1.] = 0.  # new !!!

    #print('min {}, med {}, max {}, std {}, biscale {}'.format(np.nanmin(weights),np.nanmedian(weights), np.nanmax(weights), np.nanstd(weights), biweight_scale(weights[np.isfinite(weights)])))

    if args.continuumcut > 0:
        wlhere = (def_wave >= 4000) & (def_wave < 5000)
        continuum = np.nanmedian(ffss[:, wlhere], axis=1)
        print(
            f"\nBefore continuum cut of {args.continuumcut}: {ffss[np.isfinite(ffss)].size/ffss.size}"
        )
        ffss[abs(continuum) > args.continuumcut] *= np.nan
        print(
            f"After continuum cut of {args.continuumcut}: {ffss[np.isfinite(ffss)].size/ffss.size}\n"
        )

    imlist, weightlist = [[] for i in range(len(distances) - 1)
                          ], [[] for i in range(len(distances) - 1)]
    callist = [[] for i in range(len(distances) - 1)]
    amplist = [[] for i in range(len(distances) - 1)]
    # loop through LAEs in this shot

    shotid = int(shot[:-4] + shot[-3:])
    tmptab = laetab[laetab["shotid"] == shotid]
    for lae_idx in range(len(tmptab)):
        #print(f"lae number {lae_idx} in {shot}")
        # define ra_lae and dec_lae etc.
        ifu = str(tmptab["ifuslot"][lae_idx])
        if len(ifu) == 2:
            ifu = "0" + ifu
        if args.error:
            idxs = np.where(ifuslots == ifu)[0]
            print("len idxs: {}".format(len(idxs)))
            #np.where(np.array([x[10:13] for x in multis]) == ifu)[0]
            for iternum in range(20):
                diff0 = random.randint(0, len(idxs) - 1)
                diff1 = idxs[diff0]
                lae_fibnum, ralae, declae = fibidx[diff1], ras[diff1], decs[
                    diff1]
                wave = def_wave[random.randint(0, 1035)]
                zlae = wave / 1215.67 - 1

                radiff, decdiff = ras - ralae, decs - declae
                diff = np.sqrt(radiff**2 + decdiff**2)
                if args.kpc:
                    diff = kpc_proper_per_deg(zlae)[0] * diff

                wlhere = abs(def_wave - wave) <= 2.5
                #longhere = (abs(def_wave - wave) <= 100. )&( ~wlhere)
                #print("len(longhere[longhere])", len(longhere[longhere]))

                # loop through distances and append fluxes to imlist, and weights to weightlist
                for counter, dist in enumerate(distances[:-1]):
                    here = (diff > distances[counter]) & (
                        diff <= distances[counter + 1])
                    for flux_0, weight_0, cal_0, amp_0 in zip(
                            ffss[here], weights[here], fluxcal[here],
                            skysub[here]):
                        #tmplong = flux_0[longhere][np.isfinite(flux_0[longhere])]
                        imlist[counter].append(
                            flux_0[wlhere])  #- biweight_location(tmplong))
                        weightlist[counter].append(weight_0[wlhere])
                        #tmpcallong = cal_0[longhere][np.isfinite(cal_0[longhere])]
                        callist[counter].append(
                            cal_0[wlhere])  #- biweight_location(tmpcallong))
                        amplist[counter].append(
                            amp_0[wlhere])  #- biweight_location(tmpcallong))

        else:
            ifu, wave, ralae, declae = str(tmptab["ifuslot"][lae_idx]), tmptab[
                "wave"][lae_idx], tmptab["ra"][lae_idx], tmptab["dec"][lae_idx]
            amp = tmptab["amp"][lae_idx]
            zlae = wave / 1215.67 - 1

            radiff, decdiff = ras - ralae, decs - declae
            diff = np.sqrt(radiff**2 + decdiff**2)
            if args.kpc:
                diff = kpc_proper_per_deg(zlae)[0] * diff

            wlhere = abs(def_wave - wave) <= 2.5
            #longhere = (abs(def_wave - wave) <= 100. )&( ~wlhere)
            #print("len(longhere[longhere])", len(longhere[longhere]))

            # loop through distances and append fluxes to imlist, and weights to weightlist
            for counter, dist in enumerate(distances[:-1]):
                here = (diff > distances[counter]) & (diff <=
                                                      distances[counter + 1])
                for flux_0, weight_0, cal_0, amp_0 in zip(
                        ffss[here], weights[here], fluxcal[here],
                        skysub[here]):
                    #tmplong = flux_0[longhere][np.isfinite(flux_0[longhere])]
                    imlist[counter].append(
                        flux_0[wlhere])  #-biweight_location(tmplong))
                    weightlist[counter].append(weight_0[wlhere])
                    #tmpcallong = cal_0[longhere][np.isfinite(cal_0[longhere])]
                    callist[counter].append(
                        cal_0[wlhere])  #- biweight_location(tmpcallong))
                    amplist[counter].append(
                        amp_0[wlhere])  #- biweight_location(tmpcallong))

    try:
        imlist = [np.concatenate(x) for x in imlist]
        weightlist = [np.concatenate(x) for x in weightlist]
        callist = [np.concatenate(x) for x in callist]
        amplist = [np.concatenate(x) for x in amplist]
    except ValueError:
        imlist_2 = []
        weightlist_2 = []
        callist_2 = []
        amplist_2 = []
        for im, we, ca, am in zip(imlist, weightlist, callist, amplist):
            if len(im) > 0:
                imlist_2.append(np.concatenate(im))
            else:
                imlist_2.append([])
            if len(we) > 0:
                weightlist_2.append(np.concatenate(we))
            else:
                weightlist_2.append([])
            if len(ca) > 0:
                callist_2.append(np.concatenate(ca))
            else:
                callist_2.append([])
            if len(am) > 0:
                amplist_2.append(np.concatenate(am))
            else:
                amplist_2.append([])

        imlist, weightlist = imlist_2, weightlist_2
        callist = callist_2
        amplist = amplist_2

    try:
        radial_biw = []
        radial_maja = []
        radial_karl = []
        cal_biw = []
        cal_karl = []
        amp_karl = []
        sigma = []
        numbers = []
        median = []
        for x in range(len(imlist)):
            if len(imlist[x]) > 0:
                radial_biw.append(
                    biweight_location(imlist[x][np.isfinite(imlist[x])]))
                radial_karl.append(
                    biweight_location_weights_karl(imlist[x], weightlist[x]))
                radial_maja.append(
                    biweight_location_weights(imlist[x], weightlist[x]))
                cal_biw.append(
                    biweight_location(callist[x][np.isfinite(callist[x])]))
                cal_karl.append(
                    biweight_location_weights_karl(callist[x], weightlist[x]))
                sigma.append(biweight_scale(imlist[x][np.isfinite(imlist[x])]))
                numbers.append(len(imlist[x]))
                median.append(np.nanmedian(imlist[x]))
                amp_karl.append(
                    biweight_location_weights_karl(amplist[x], weightlist[x]))
            else:
                radial_biw.append(0)
                radial_karl.append(0)
                radial_maja.append(0)
                cal_biw.append(0)
                cal_karl.append(0)
                sigma.append(0)
                numbers.append(0)
                median.append(0)
                amp_karl.append(0)

        if args.kpc:
            dist_string = "delta_r[kpc]"
        else:
            dist_string = "deltatheta"
        #ascii.write({"karl":radial_karl,"maja":radial_maja,"flux_biw": radial_biw, "sigma": sigma, dist_string:distances[:-1], 'number fibers':numbers, 'median':median, "cal_biw":cal_biw, "cal_karl":cal_karl, "amp_karl":amp_karl}, "radials_sub/{}.dat".format(idx), overwrite=True)
    except Exception as e:
        print("Error: {}".format(e))
        pass

    en = time.time()
    print("Time needed in loop: " + str(en - st))
    print("\nfinished {}.\n".format(idx))
    return imlist, weightlist, callist, amplist
Example #14
0
for counter in range(len(imlists)):
    fluxes, weights = imlists[counter], weightlists[counter]
    cals = callists[counter]
    ampws = amplists[counter]
    if len(fluxes) == 0:
        radial.append(0)
        sigma.append(0)
        continue

    radial_karl.append(biweight_location_weights_karl(fluxes, weights))
    radial_maja.append(biweight_location_weights(fluxes, weights))
    radial_biw.append(biweight_location(fluxes[np.isfinite(fluxes)]))
    median.append(np.nanmedian(fluxes))
    cal_biw.append(biweight_location(cals[np.isfinite(cals)]))
    cal_karl.append(biweight_location_weights_karl(cals, weights))
    cal_sigma.append(biweight_scale(cals[np.isfinite(cals)]))
    amp_karl.append(biweight_location_weights_karl(ampws, weights))
    amp_sigma.append(biweight_scale(ampws[np.isfinite(ampws)]))

    N = len(fluxes[np.isfinite(fluxes)])
    numbers.append(N)
    std = biweight_scale(fluxes[np.isfinite(fluxes)])
    sigma.append(std)

dist_string = "deltatheta"
rstring = "deg"
if args.kpc:
    dist_string = "delta_r[kpc]"
    rstring = "kpc"

metadict = {
Example #15
0
def tmp():
    # For each spectrum, find the offset that will give the best
    import matplotlib.pyplot as plt
    fig, axes = plt.subplots(nobjs // 2, 2, figsize=(16, 4 * nobjs / 2))
    corrarr = np.zeros((ntrace, nx))
    offsets = np.zeros(ntrace)
    for iobj in range(nobjs):
        ax = axes.flat[iobj]
        ax.set_title(str(iobj))
        # this is the slice
        iy1, iy2 = iobj * norder, (iobj + 1) * norder
        for iorder in range(norder):
            xarr = xarrlist[iorder]
            y = onedarcs[iy1 + iorder, xarr]
            y = y - np.nanmean(y)
            # cross correlate. Doesn't seem to care about saturated lines
            xmid = np.arange(len(xarr)) - len(xarr) // 2
            corr = signal.correlate(y, refarcs[iorder, xarr], "same")
            l, = ax.plot(xmid, corr)
            corrarr[iobj * norder + iorder, xarr] = corr
            imax = np.argmax(corr)
            out = gaussfit(xmid, corr, [np.max(corr), xmid[imax], 10])
            ax.set_ylim(ax.get_ylim())
            ax.plot([out[1], out[1]], ax.get_ylim(), color='k', lw=1)
            offsets[iobj * norder + iorder] = out[1]

    fig.savefig("correlation.png", dpi=300)
    plt.close(fig)

    # My ThArNe arcs are highly saturated in the red (even for the short exposures).
    # So I need to mask out the saturated Ne lines.
    # If a feature is similar strength across all orders for one object, it is from saturated overspilling
    # Use this to construct a mask manually for the 90s arcs.
    maskranges = [(22, 33), (74, 90), (232, 245), (259, 270), (364, 377),
                  (425, 437), (605, 620), (700, 712), (770, 785), (1048, 1065),
                  (1110, 1141), (1315, 1335), (1373, 1400), (1471, 1500),
                  (1611, 1620), (1629, 1654), (1823, 1855)]
    mask = np.zeros_like(onedarcs, dtype=bool)
    for mr in maskranges:
        for itrace, offset in enumerate(offsets):
            mr0 = int(mr[0] + offset)
            mr1 = int(mr[1] + offset)
            mask[itrace, mr0:mr1] = True
    maskarcs = onedarcs.copy()
    maskarcs[mask] = np.nan
    fig, ax = plt.subplots()
    plt.imshow(maskarcs, origin='lower', aspect='auto')
    plt.colorbar()
    fig.savefig("maskarcs.png")
    plt.close(fig)

    # find peak locations
    all_peak_locations = []
    fig, axes = plt.subplots(nobjs, norder, figsize=(8 * norder, 3 * nobjs))
    for itrace in range(ntrace):
        iorder = itrace % norder
        iobj = int(itrace / norder)
        xarr = xarrlist[iorder]
        yarr = maskarcs[itrace][xarr]

        mask = np.isnan(yarr)
        yarr[mask] = 0.
        yarr = yarr - ndimage.filters.median_filter(yarr, 100)
        yarr[mask] = 0.

        dyarr = np.gradient(yarr)
        noise = biweight_scale(yarr)
        thresh = 10. * noise

        this_linelist = linelist[linelist["order"] == iorder + 1]
        w0s = this_linelist["wavetrue"]
        x0s = this_linelist["X"] + offsets[itrace]

        ii1 = yarr > thresh
        ii2 = dyarr >= 0
        ii3 = np.zeros_like(ii2)
        ii3[:-1] = dyarr[1:] < 0
        peaklocs = ii1 & ii2 & ii3
        peaklocs[mask] = False
        peakindices = np.where(peaklocs)[0]
        numpeaks = peaklocs.sum()
        print("{:3}: noise={:.3f}, {} peaks".format(itrace, noise, numpeaks))
        peak_locations = []
        window = 5  # pixel window for fitting peak location
        maxpixdiff = 17  # pixel window for matching to a line
        for ipeak, ix in enumerate(peakindices):
            _xx = xarr[ix - window:ix + window + 1]
            _yy = yarr[ix - window:ix + window + 1]
            xloc = xarr[ix]
            guess = [yarr[ix], xloc, 2, 0.]
            try:
                popt = gaussfit(_xx, _yy, guess)
            except (RuntimeError, TypeError):
                print("       Failed to fit trace {} line {}/{} at {}".format(
                    itrace, ipeak, numpeaks, xloc))
                continue
            if np.abs(xloc - popt[1]) > 3 or popt[0] < 0:
                print("       Bad fit for trace {} line {}/{} at {}".format(
                    itrace, ipeak, numpeaks, xloc))
                continue
            closest_line = np.argmin(np.abs(x0s - popt[1]))
            x0 = x0s[closest_line]
            w0 = w0s[closest_line]
            if np.abs(x0 - popt[1]) > maxpixdiff:
                print(
                    "       Bad line match for trace {} line {}/{} at {}: w0={:.1f} x0={:.1f} fit={:.1f}"
                    .format(itrace, ipeak, numpeaks, xloc, w0, x0, popt[1]))
                continue
            peak_locations.append((xloc, x0, w0, popt[1], popt[0], popt[2]))
        ax = axes[iobj, iorder]
        ax.plot(xarr, yarr, lw=.7)
        for loc in peak_locations:
            ax.axvline(loc[1], color='r', lw=.3)
            ax.axvline(loc[3], color='b', lw=.3)
        all_peak_locations.append(peak_locations)
        for x0 in x0s:
            ax.axvline(x0, 0, .1, color='k', lw=.3)
        for mr in maskranges:
            mr0 = int(mr[0] + offsets[itrace])
            mr1 = int(mr[1] + offsets[itrace])
            ax.axvspan(mr0, mr1, 0, 1, color='grey', alpha=.3)
        ax.set_xlim(xarr[0], xarr[-1])

    fig.savefig("arcmatch.pdf", bbox_inches="tight")
    print(list(map(len, all_peak_locations)))
Example #16
0
def rescale_snr(specwave,
                flux=None,
                ivar=None,
                x1=None,
                x2=None,
                cont=None,
                cont_kernel=51,
                cont_Niter=3,
                make_fig=False,
                **sigma_clip_kwargs):
    """
    Take biweight standard deviation of x_i/sigma_i of sigma-clipped data,
    rescale ivar so that standard deviation is 1
    Returns a Spectrum1D object
    """
    if flux is None and ivar is None:
        assert isinstance(specwave, Spectrum1D)
        spec = specwave
        wave, flux, ivar = spec.dispersion, spec.flux, spec.ivar
        meta = spec.metadata
    else:
        wave = specwave
        assert len(wave) == len(flux)
        assert len(wave) == len(ivar)
        meta = OrderedDict({})
    if cont is None:
        cont = fast_find_continuum(flux, cont_kernel, cont_Niter)
    else:
        assert len(cont) == len(flux)
    errs = ivar**-0.5
    errs[errs > 10 * flux] = np.nan

    iirescale = np.ones_like(wave, dtype=bool)
    if x1 is not None: iirescale = iirescale & (wave > x1)
    if x2 is not None: iirescale = iirescale & (wave < x2)

    norm = flux[iirescale] / cont[iirescale]
    normerrs = errs / cont[iirescale]

    z = (norm - 1.) / normerrs
    clipped = sigma_clip(z[np.isfinite(z)], **sigma_clip_kwargs)
    noise = biweight_scale(clipped[~clipped.mask])
    print("Noise is {:.2f} compared to 1.0".format(noise))

    new_ivar = ivar / (noise**2.)

    outspec = Spectrum1D(wave, flux, new_ivar, meta)
    if make_fig:
        newz = z / noise
        newerrs = errs * noise
        newnormerrs = normerrs * noise

        import matplotlib.pyplot as plt
        fig, axes = plt.subplots(2, 3, figsize=(12, 6))
        ax = axes[0, 0]
        ax.plot(wave, flux)
        ax.plot(wave, errs)
        ax.plot(wave, newerrs)
        ax.plot(wave, cont, color='k', ls=':')
        ax.set_xlabel('wavelength')
        ax.set_ylabel('counts')
        ax = axes[1, 0]
        ax.plot(wave, norm)
        ax.plot(wave, normerrs)
        ax.plot(wave, newnormerrs)
        ax.axhline(1, color='k', ls=':')
        ax.set_xlabel('wavelength')
        ax.set_ylabel('norm')
        ax.set_ylim(0, 1.2)
        ax = axes[1, 1]
        ax.plot(wave, z)
        ax.plot([np.nan], [np.nan])  # hack to get the right color
        ax.plot(wave, newz)
        ax.axhline(0, color='k', ls=':')
        ax.set_xlabel('wavelength')
        ax.set_ylabel('z')
        ax.set_ylim(-7, 7)

        ax = axes[0, 1]
        bins = np.linspace(-7, 7, 100)
        binsize = np.diff(bins)[1]
        ax.plot(bins,
                norm_distr.pdf(bins) * np.sum(np.isfinite(z)) * binsize,
                color='k')
        ax.hist(z[np.isfinite(z)], bins=bins)
        ax.hist(clipped[~clipped.mask], bins=bins, histtype='step')
        ax.hist(newz[np.isfinite(newz)], bins=bins, histtype='step')
        ax.set_xlabel('z')
        ax.set_xlim(-7, 7)

        ax = axes[0, 2]
        zfinite = z.copy()
        zfinite[~np.isfinite(zfinite)] = 0.
        autocorr = np.correlate(zfinite, zfinite, mode="same")
        ax.plot(np.arange(len(flux)), autocorr, '.-')
        ax.axvline(len(flux) // 2)
        ax.set_xlim(
            len(flux) // 2 - 10,
            len(flux) // 2 + 10,
        )
        ax.set_xlabel("pixel")
        ax.set_ylabel("autocorrelation(z)")

        z1, z2 = -10, 10
        zarr1 = np.zeros((len(z) - 1, 2))
        zarr1[:, 0] = z[:-1]
        zarr1[:, 1] = z[1:]
        zarr1 = zarr1[np.sum(np.isfinite(zarr1), axis=1) == 2]
        zarr2 = np.zeros((len(z) - 2, 2))
        zarr2[:, 0] = z[:-2]
        zarr2[:, 1] = z[2:]
        zarr2 = zarr2[np.sum(np.isfinite(zarr2), axis=1) == 2]
        #ax = axes[0,2]
        #ax.plot([z1,z2],[z1,z2],'k:')
        #ax.plot(z[:-1], z[1:], '.', alpha=.3)
        #ax.set_title("r={:+.2}".format(pearsonr(zarr1[:,0],zarr1[:,1])[0]))
        #ax.set_xlabel("z(pixel)"); ax.set_ylabel("z(pixel+1)")
        #ax.set_xlim(z1,z2); ax.set_ylim(z1,z2)

        ax = axes[1, 2]
        ax.plot([z1, z2], [z1, z2], 'k:')
        ax.plot(z[:-2], z[2:], '.', alpha=.3)
        ax.set_title("r={:+.2}".format(pearsonr(zarr2[:, 0], zarr2[:, 1])[0]))
        ax.set_xlabel("z(pixel)")
        ax.set_ylabel("z(pixel+2)")
        ax.set_xlim(z1, z2)
        ax.set_ylim(z1, z2)

        fig.tight_layout()

        return fig, outspec, noise

    return outspec, noise
Example #17
0
 def dispersion(val_array):
     if (self.disp_func == "biweight"):
         dis = stat.biweight_scale(val_array)
     else:
         dis = np.std(val_array)
     return dis
Example #18
0
def plot_errors(pasta, param, x, y, ymin=-3, ymax=3, label_x='label_x', label_y='label_y', file='test', ticks=0):
    results_strings = np.loadtxt('results/' + pasta + '/clusters_output.txt', usecols=[0, 1], dtype=str)
    results_floats = np.loadtxt('results/' + pasta + '/clusters_output.txt', usecols=[2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
                                dtype=float)
    param_table_name = np.loadtxt('results/parameter-cat.txt', usecols=[0], dtype=str)
    param_table_ebv = np.loadtxt('results/parameter-cat.txt', usecols=[7], dtype=float)
    results = np.hstack((results_strings, results_floats))
    mmm_Z = np.zeros((len(results_floats)))
    mmm_age = np.zeros((len(results_floats)))
    ebv_results = np.zeros((len(results_floats)))
    for i in range(len(results_floats)):
        cluster_locator = 0  # locating the cluster in the literature table
        while (results_strings[i, 0] != param_table_name[cluster_locator]):
            cluster_locator = cluster_locator + 1
        ebv_results[i] = param_table_ebv[cluster_locator]
        mmm_Z[i] = np.mean((results_floats[i, 1], results_floats[i, 2]))
        mmm_age[i] = np.mean((results_floats[i, 6], results_floats[i, 7]))

    fig, ax = plt.subplots(ncols=1)
    ax.set_xlabel(label_x, fontsize=15)
    ax.set_ylabel(label_y, fontsize=15)
    points = plt.scatter(x, y, marker='o', c=ebv_results, cmap='inferno_r')
    # if(param!='ebv'):
    cb = plt.colorbar(points)
    cb.ax.set_ylabel('ref_E(B-V)', fontsize=15)
    location = biweight_location(y)
    scale = biweight_scale(y)

    if (param == 'Z'):
        if (ticks == 1):
            ax.hlines(location + scale, 0, len(results_floats[:, 1]), label='biweight scale=' + str(scale))
            ax.hlines(location - scale, 0, len(results_floats[:, 1]))
            ax.hlines(location, 0, len(results_floats[:, 1]), color='blue',
                      label='biweight location=' + str(location))
            ax.set_xticks(x)
            ax.set_xticklabels((results_strings[:, 0]), rotation='vertical', fontsize=8)
        else:
            ax.hlines(location + scale, np.min(results_floats[:, 0]), np.max(results_floats[:, 0]),
                      label='biweight scale=' + str(scale))
            ax.hlines(location - scale, np.min(results_floats[:, 0]), np.max(results_floats[:, 0]))
            ax.hlines(location, np.min(results_floats[:, 0]), np.max(results_floats[:, 0]), color='blue',
                      label='biweight location=' + str(location))

    if (param == 'age'):
        if (ticks == 1):
            ax.hlines(location + scale, 0, len(results_floats[:, 1]), label='biweight scale=' + str(scale))
            ax.hlines(location - scale, 0, len(results_floats[:, 1]))
            ax.hlines(location, 0, len(results_floats[:, 1]), color='blue', label='biweight location=' + str(location))
            ax.set_xticks(x)
            ax.set_xticklabels((results_strings[:, 0]), rotation='vertical', fontsize=8)
        else:
            ax.hlines(location + scale, np.min(results_floats[:, 5]), np.max(results_floats[:, 5]),
                      label='biweight scale=' + str(scale))
            ax.hlines(location - scale, np.min(results_floats[:, 5]), np.max(results_floats[:, 5]))
            ax.hlines(location, np.min(results_floats[:, 5]), np.max(results_floats[:, 5]), color='blue',
                      label='biweight location=' + str(location))

    plt.ylim(ymin, ymax)
    plt.legend()
    plt.tight_layout()
    plt.savefig('results/' + pasta + '/' + file + '.png', dpi=300, bbox_inches='tight')
    plt.close()
norm_values = np.concatenate(norm_values)
order = np.argsort(roverfwhm)
roverfwhm = roverfwhm[order]
norm_values = norm_values[order]

# get the PSF
medfilt = []
lower = []
upper = []
Ns = []
errs = []
xbins = np.arange(0, 5, 0.02)
for x, y in zip(xbins[:-1], xbins[1:]):
    here = (roverfwhm>=x)&(roverfwhm<y)
    loc = biweight_location(norm_values[here])
    scale = biweight_scale(norm_values[here])
    N = len(here[here])
    Ns.append(N)
    medfilt.append(loc)
    err = np.sqrt(scale / (N-1))
    lower.append(loc-err)
    upper.append(loc+err)
    errs.append(err)


SAVE = False

# plot the result
plt.figure()
plt.plot(roverfwhm, norm_values, ".k", alpha=0.02)
plt.axhline(0, color="gray", linestyle=":")
Example #20
0
    def calc_background_rms(self, data, axis=None):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis)

        return biweight_scale(data, c=self.c, M=self.M, axis=axis)
# normalize: r -> r/FWHM and flux -> flux/amp
roverfwhm, stars_normed = [], []
for i in np.where(keepthese)[0]:
    roverfwhm.append(np.sqrt(star_rsqs[i])/fwhms[i])
    stars_normed.append(star_fluxes[i]/amps[i])

# combine all
all_roverfwhm = np.concatenate(roverfwhm)
all_stars_normed = np.concatenate(stars_normed)
order = np.argsort(all_roverfwhm)
all_roverfwhm = all_roverfwhm[order]
all_stars_normed = all_stars_normed[order]

# get the running biweight and its error in small bins
rbins = np.arange(0,5,0.02)
runbiw = []
runbiw_err = []
for rmin, rmax in zip(rbins[:-1], rbins[1:]):
    here = (all_roverfwhm>=rmin)&(all_roverfwhm<rmax)
    runbiw.append(biweight_location(all_stars_normed[here]))
    runbiw_err.append(np.sqrt(biweight_scale(all_stars_normed[here])/(len(here[here]-1))))
runbiw, runbiw_err = np.array(runbiw), np.array(runbiw_err)

if SAVE:
    # save in a file (the same file)
    print("r/fwhm the same?: ", psf_shape["r/fwhm"] == rbins[:-1])
    # they are the same
    psf_shape["runbiw_int_ff_2.1"] = runbiw
    psf_shape["runbiw_int_ff_err_2.1"] = runbiw_err
    ascii.write(psf_shape, "PSF_runbiw.dat", overwrite=True)
Example #22
0
    def calc_background_rms(self, data, axis=None):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis)

        return biweight_scale(data, c=self.c, M=self.M, axis=axis)
Example #23
0
cbarG = fig.colorbar(pG, ax=ax2)
cbarG.ax.tick_params(labelsize=6)
cbarG.ax.set_title('$\Delta v_\mathrm{los}\,(\mathrm{km/s})$', fontsize=7)

#ax2.scatter(x[gaG]-X[indG], y[gaG]-Y[indG], s=r[gaG]/10., linewidths=0, alpha=0.5)

ax2.set_aspect('equal')

ax2.text(0.02, 0.93, 'G$_{1D}$', fontsize=8, fontname='stix', family='sans-serif', transform=ax2.transAxes)
ax2.text(0.9, 0.93, 'b.', fontsize=8, fontname='stix', family='sans-serif', transform=ax2.transAxes)

fig.savefig('/2/home/idroberts/mdpl2/plots/halo_image.png', dpi=800, bbox_inches='tight')

######

sigNG = biweight_scale(newvz_ng)
sigG = biweight_scale(newvz_g)

#kdeNG = st.gaussian_kde((newvz_ng - newvz_par_ng)/sigNG)
kdeNG = st.gaussian_kde((newvz_ng - np.average(newvz_ng))/sigNG)
kdeG = st.gaussian_kde((newvz_g - np.average(newvz_g))/sigG)

x = np.linspace(-3.8, 3.8, 500)

cdfNG = cumtrapz(kdeNG(x), x=x, initial=0)
cdfG = cumtrapz(kdeG(x), x=x, initial=0)

fig = plt.figure()

grid = gs.GridSpec(2,2)
grid.update(wspace=0.2)