예제 #1
0
def make_plot(days_ago, dates, mag):
    print('Making plot...')
    time_span = np.max(dates) - np.min(dates)
    flatten_lc, trend_lc = flatten(
        days_ago,
        mag,
        method='lowess',
        window_length=time_span/3,
        return_trend=True,
        )
    plt.scatter(days_ago, mag, s=5, color='blue', alpha=0.5)

    plt.scatter(days_ago1, all_mags1, s=10, color='black', alpha=0.8, marker="x")
    plt.xlabel('Days before today')
    plt.ylabel('Visual magnitude')
    mid = biweight_location(mag)
    plt.ylim(mid-1, mid+1)
    plt.xlim(-1, 20)
    plt.plot(days_ago, trend_lc, color='red', linewidth=1)
    plt.gca().invert_yaxis()
    plt.gca().invert_xaxis()
    date_text = datetime.datetime.now().strftime("%d %b %Y")
    data_last24hrs = np.where(days_ago<1)
    mean_last24hrs = biweight_location(mag[data_last24hrs])
    lumi = str(format(mean_last24hrs, '.2f'))
    plt.text(19.5, mid+1-0.25, "AAVSO observations visual (by-eye) in blue", color='blue')
    plt.text(19.5, mid+1-0.15, "AAVSO observations from CCDs in black", color='black')
    plt.text(19.5, mid+1-0.05, "LOESS trend in red", color='red')
    plt.text(19.5, mid-1+0.1, '#Betelgeuse brightness ' + lumi + " mag on " + date_text + " by @betelbot")
    plt.savefig(plot_file, bbox_inches='tight', dpi=300)
    print('Done.')
예제 #2
0
def plot_2D_offsets(df, stat_key, prefices=["Delta_x_", "Delta_y_"],
                    suffices=["_x", "_y"], prefix_bool=1):
    for i, key in enumerate(stat_key) :
        plt.axes().set_aspect('equal')
        if prefix_bool:
            fixed_keys = [prefices[0] + key, prefices[1] + key]
        else:
            fixed_keys = [key + suffices[0], key + suffices[1]]

        plt.plot(
            np.array(df[fixed_keys[0]]),
            np.array(df[fixed_keys[1]]), 'b.', alpha=0.05
            )
        biweight_loc = (
            biweight_location(df[fixed_keys[0]]),
            biweight_location(df[fixed_keys[1]]))

        # The red cross is the biweight location along each dimension
        plt.plot(biweight_loc[0], biweight_loc[1],
             'rx', mew=2.)
        plt.tick_params(labeltop='off', labelright='off')
        plt.axes().yaxis.set_ticks_position('left')
        plt.axes().xaxis.set_ticks_position('bottom')
        plt.xlim(-300, 300)
        plt.ylim(-300, 300)
        plt.title(key + ', biweight_loc = {0:.2f}, {1:.2f}'.format(
            *biweight_loc))

        plt.show()
        plt.clf()
예제 #3
0
def plot_hist_errors(pasta, y_Z, y_age, y_Zmin=-3, y_Zmax=3, y_agemin=-3, y_agemax=3,
                     label_Z='label_Z', label_age='label_age', file='test'):
    location = [biweight_location(y_Z), biweight_location(y_age)]
    scale = [biweight_scale(y_Z), biweight_scale(y_age)]

    fig = plt.figure(constrained_layout=False)
    gs = fig.add_gridspec(nrows=1, ncols=2)  # , width_ratios=[2, 1], height_ratios=[1, 2])
    a = fig.add_subplot(gs[0, 0])
    n, bins_Z, patches = plt.hist(y_Z, orientation='vertical', color='khaki')
    plt.vlines(location[0] + scale[0], 0, np.max(n), label='scale=' + str(scale[0]))
    plt.vlines(location[0] - scale[0], 0, np.max(n))
    plt.vlines(location[0], 0, np.max(n), color='blue', label='location=' + str(location[0]))
    plt.xlabel(label_Z)
    plt.xlim(y_Zmin, y_Zmax)
    plt.legend()
    a = fig.add_subplot(gs[0, 1])
    n, bins_age, patches = plt.hist(y_age, orientation='vertical', color='khaki')
    plt.vlines(location[1] + scale[1], 0, np.max(n), label='scale=' + str(scale[1]))
    plt.vlines(location[1] - scale[1], 0, np.max(n))
    plt.vlines(location[1], 0, np.max(n), color='blue', label='location=' + str(location[1]))
    plt.legend()
    plt.xlabel(label_age)
    plt.xlim(y_agemin, y_agemax)
    '''
   
    '''
    # plt.legend()
    plt.tight_layout()
    plt.savefig('results/' + pasta + '/' + file + '.png', dpi=300, bbox_inches='tight')
    plt.close()
예제 #4
0
파일: LCSbiweight.py 프로젝트: rfinn/LCS
def getbiweight(z):
    biweightscale=biweight_midvariance(z)
    biweightlocation=biweight_location(z)

    flag=abs(z-biweightlocation)/biweightscale < scale_cut
    #flag=np.ones(len(z),'bool')
    repeatflag=1
    nloop=0
    #print biweightlocation, biweightscale
    oldbiweightscale=biweightscale
    while repeatflag:
        newdata=z[flag]
        biweightscale=biweight_midvariance(newdata, M=biweightlocation)
        biweightlocation=biweight_location(newdata, M=biweightlocation)
        oldflag = flag
        #flag=abs(z-biweightlocation)/biweightscale < scale_cut
        nloop += 1
        #print nloop, biweightlocation, biweightscale, len(newdata), sum(flag)
        #if sum(flag == oldflag) == len(flag): 
        #    repeatflag=0
        #    print 'nloop = ', nloop
        if abs(biweightscale - oldbiweightscale) < 1.: 
            repeatflag=0
            #print 'nloop = ', nloop
        if nloop > 5:
            repeatflag = 0
        oldbiweightscale=biweightscale
        #print nloop, biweightlocation, biweightscale
    #flag=abs(z-biweightlocation)/biweightscale < 4.
    #biweightscale=biweight_midvariance(z[flag],M=biweightlocation)
    return biweightlocation, biweightscale
예제 #5
0
파일: hetobs.py 프로젝트: grzeimann/HETobs
def find_matches(sources, xc, yc):
    ''' Matching sources using closest neighbor and clipping '''
    dx = (sources['xcentroid'][:, np.newaxis] - xc)
    dy = (sources['ycentroid'][:, np.newaxis] - yc)
    dd = np.sqrt(dx**2 + dy**2)
    ind = np.argmin(dd, axis=1)
    dxk = dx[np.arange(dx.shape[0]), ind]
    dyk = dy[np.arange(dy.shape[0]), ind]
    bvx = biweight_midvariance(dxk)
    bvy = biweight_midvariance(dyk)
    mvx = biweight_location(dxk)
    mvy = biweight_location(dyk)
    sel = np.where(
        (np.abs(dxk - mvx) < 3. * bvx) * (np.abs(dyk - mvy) < 3. * bvy))[0]
    return sel, ind[sel]
예제 #6
0
def plot_linearity(scifile, ivmfile):
    """
    Assumes sky-subtracted images in counts/sec and corresponding IVM
    :param scifile: science image file (cts/sec)
    :param ivmfile: inverse variance map
    """
    scih = fits.open(scifile)
    ivmh = fits.open(ivmfile)
    exptime = scih[0].header['EXPTIME']
    scidata = scih[0].data
    vardata = 1/ivmh[0].data
    skyvar = biweight_location(vardata)
    snr_cps = scidata / np.sqrt(vardata - skyvar)
    # Only plot pixels that are at least 2 sigma above sky
    mask = scidata > 2*np.sqrt(skyvar)
    pp.scatter(scidata[mask].flat, snr_cps[mask].flat,
               label='Predicted (Data*sqrt(Weight))')
    # Idealized (poisson) SNR model is SNR(cts) = sqrt(cts)
    x_ideal = np.linspace(0, scidata[mask].max(), 200)
    y_ideal = np.sqrt(x_ideal*exptime)
    pp.plot(x_ideal, y_ideal,
            label='Ideal (sqrt(Counts))')
    pp.xlabel('Signal (counts/sec)')
    pp.ylabel('SNR (sky variance subtracted)')
    pp.axis([x_ideal.min(), x_ideal.max(), y_ideal.min(), y_ideal.max()])
    pp.legend(loc='lower right')
    pp.show()
    scih.close()
    ivmh.close()
    return
예제 #7
0
파일: test.py 프로젝트: rag9704/PTS
    def calculate_statistics_no_clipping(self):
        """
        This function ...
        :return:
        """

        # Compress (remove masked values)
        flattened = np.ma.array(self.sources_with_noise.data,
                                mask=self.rotation_mask.data).compressed()

        median = np.median(flattened)
        biweight_loc = biweight_location(flattened)

        biweight_midvar = biweight_midvariance(flattened)
        median_absolute_deviation = mad_std(flattened)

        #print("median", median)
        #print("biweigth_loc", biweight_loc)
        #print("biweight_midvar", biweight_midvar)
        #print("median_absolute_deviation", median_absolute_deviation)

        self.statistics.no_clipping = Map()
        self.statistics.no_clipping.median = median
        self.statistics.no_clipping.biweight_loc = biweight_loc
        self.statistics.no_clipping.biweight_midvar = biweight_midvar
        self.statistics.no_clipping.median_absolute_deviation = median_absolute_deviation
예제 #8
0
def compute_apcor(apcor_all, apcorlim):
    """
    Filter out edge and other bad regions by selecting
    the apcorlim greatest aperture correction values.
    Then compute the biweight value and return the
    resultant average aperture correction.

    Parameters
    ----------
    apcor_all : numpy:ndarray
        the aperture corrections

    apcorlim : int
        the number of largest
        values to consider
    """

    flattened = sorted(apcor_all.flatten())
    top_vals = np.flip(flattened, 0)[:apcorlim]

    # Check if all elements are identical
    # as this breaks biweight
    if any(((top_vals - top_vals[0]) / top_vals[0]) > 1e-10):
        return biweight_location(top_vals)
    else:
        _logger.warning("All aperture correction measurements the same!")
        return top_vals[0]
def Multi_Band_GP(x_range, x, y, y_err, dim, n_samples=False, sampling=False):
    """ Considers cross corrolations between multiple bands as dims, prone to holding the order of the bands too rigidly """
    """ Will optimize for 'best' parameters when given no parameters """
    """ x = mjd, y and y_err = measurment, dim and dim_err = wavelength in nm """
    length_scale = 20
    signal_to_noises = (np.abs(y) /
                        np.sqrt(np.power(y_err, 2) + (1e-2 * np.max(y))**2))
    scale = np.abs(y[signal_to_noises.argmax()])
    kernel = (
        (0.5 * scale)**2 *
        george.kernels.Matern32Kernel([length_scale**2, 6000**2], ndim=2))
    kernel.freeze_parameter('k2:metric:log_M_1_1')
    kernel.freeze_parameter('k1:log_constant')  #Fixed Scale
    x_data = np.vstack([x, dim]).T
    gp = george.GP(kernel, mean=biweight_location(y))
    guess_parameters = gp.get_parameter_vector()
    gp.compute(x_data, y_err)
    x_pred = np.linspace(x.min(), x.max(), n_samples)
    x_pred = np.vstack([x, dim]).T
    pred, pred_var = gp.predict(y, x_pred, return_var=True)

    # bounds = [(0, np.log(1000**2))]
    def neg_ln_like(p):
        gp.set_parameter_vector(p)
        return -gp.log_likelihood(y)

    def grad_neg_ln_like(p):
        gp.set_parameter_vector(p)
        return -gp.grad_log_likelihood(y)

    result = minimize(
        neg_ln_like,
        gp.get_parameter_vector(),
        jac=grad_neg_ln_like,
        # bounds=bounds
    )
    if result.success:
        gp.set_parameter_vector(result.x)
    else:
        gp.set_parameter_vector(guess_parameters)
    gp.set_parameter_vector(result.x)
    # print(kernel.get_parameter_vector(True))
    #print("\nFinal ln-likelihood: {0:.2f}".format(gp.log_likelihood(y)))
    if n_samples != False:
        x_pred = np.vstack([
            np.array(
                list(np.linspace(x_range.min(), x_range.max(), n_samples)) *
                np.unique(dim).size),
            np.array(np.sort(list(np.unique(dim)) * n_samples))
        ]).T
        # x_pred = np.vstack([np.array(list(np.linspace(x_range.min(), x_range.max(), n_samples))*6),
        #                     np.array(np.sort([357, 476, 621, 754, 870, 1004]*n_samples))]).T
        pred, pred_var = gp.predict(y, x_pred, return_var=True)
        output = [x_pred[:, 0], pred, np.sqrt(pred_var), x_pred[:, 1], []]
        return output
    elif sampling != False:
        x_pred = np.vstack([np.array(sampling[0]), np.array(sampling[1])]).T
        pred, pred_var = gp.predict(y, x_pred, return_var=True)
        output = [x_pred[:, 0], pred, np.sqrt(pred_var), x_pred[:, 1], []]
        return output
예제 #10
0
def calculate_reference_statistics(times, mags):
    """Calculate reference statistics for a light curve

    We do this using biweight estimators for both the location and scale.  One challenge
    with this is that if there are many repeated observations, which is the case for
    some of the ZTF deep fields, they can overwhelm all of the other observations. To
    mitigate this, we only use the first observation from each night when calculating
    the statistics.

    Parameters
    ----------
    times : ndarray
        Time of each observation.
    mags : ndarray
        Magnitude of each observation.

    Returns
    -------
    reference_magnitude : float
        Reference magnitude
    reference_scale : float
        Reference scale
    """
    # Consider at most one observation from each night.
    _, indices = np.unique(times.astype(int), return_index=True)
    use_mags = mags[indices]

    # Use a robust estimator of the reference time and standard deviation.
    reference_magnitude = biweight_location(use_mags)
    reference_scale = biweight_scale(use_mags)

    return reference_magnitude, reference_scale
예제 #11
0
파일: test.py 프로젝트: SKIRT/PTS
    def calculate_statistics_no_clipping(self):

        """
        This function ...
        :return:
        """

        # Compress (remove masked values)
        flattened = np.ma.array(self.sources_with_noise.data, mask=self.rotation_mask.data).compressed()

        median = np.median(flattened)
        biweight_loc = biweight_location(flattened)

        biweight_midvar = biweight_midvariance(flattened)
        median_absolute_deviation = mad_std(flattened)

        #print("median", median)
        #print("biweigth_loc", biweight_loc)
        #print("biweight_midvar", biweight_midvar)
        #print("median_absolute_deviation", median_absolute_deviation)

        self.statistics.no_clipping = Map()
        self.statistics.no_clipping.median = median
        self.statistics.no_clipping.biweight_loc = biweight_loc
        self.statistics.no_clipping.biweight_midvar = biweight_midvar
        self.statistics.no_clipping.median_absolute_deviation = median_absolute_deviation
예제 #12
0
def base_reduction(filename, get_header=False):
    # Load fits file
    a = fits.open(filename)
    image = np.array(a[0].data, dtype=float)

    # Overscan subtraction
    overscan_length = 32 * (image.shape[1] / 1064)
    O = biweight_location(image[:, -(overscan_length - 2):])
    image[:] = image - O

    # Trim image
    image = image[:, :-overscan_length]

    # Gain multiplication (catch negative cases)
    gain = a[0].header['GAIN']
    gain = np.where(gain > 0., gain, 0.85)
    rdnoise = a[0].header['RDNOISE']
    rdnoise = np.where(rdnoise > 0., rdnoise, 3.)
    amp = (a[0].header['CCDPOS'].replace(' ', '') +
           a[0].header['CCDHALF'].replace(' ', ''))
    try:
        ampname = a[0].header['AMPNAME']
    except:
        ampname = None
    header = a[0].header

    # Orient image
    a = orient_image(image, amp, ampname) * gain

    # Calculate error frame
    E = np.sqrt(rdnoise**2 + np.where(a > 0., a, 0.))
    if get_header:
        return a, E, header
    return a, E
예제 #13
0
def build_XA(IFU, ww, skys, wstart, wend, amps):
    # Select referece source
    # here we will use as A  the beiweight location (~ mean) from the entire IFU
    XA = []
    for k in skys[(IFU, amps[0])]:
        amps_data = []
        for amp in amps:
            if k in skys[(IFU, amp)]:
                amps_data.append(skys[(IFU, amp)][k])
        stack = np.vstack(amps_data)
        bloc = biweight_location(stack, axis=0)
        XA.append(bloc)

    XA = np.array(XA)

    # hack to homogenize lengths, the rebinning does make sure
    # that the wavelength grid always stars at the same wavelength
    # but not necessarey, end at the same ( ther may be a few pixel more or less)
    #N = np.min([XA.shape[1], XB.shape[2], ww.shape[0]])
    N = np.min([XA.shape[1], ww.shape[0]])
    ww = ww[:N]
    XA = XA[:, :N]

    # can't have nans
    XA[np.isnan(XA)] = 0.

    ii = (ww >= wstart) * (ww <= wend)
    wwcut = ww[ii]
    XAcut = XA[:, ii]

    return wwcut, XAcut
예제 #14
0
    def subtract_background(self):
        """Subtract the background levels from each band.

        The background levels are estimated using a biweight location
        estimator. This estimator will calculate a robust estimate of the
        background level for objects that have short-lived light curves, and it
        will return something like the median flux level for periodic or
        continuous light curves.

        Returns
        -------
        subtracted_observations : pandas.DataFrame
            A modified version of the observations DataFrame with the
            background level removed.
        """
        subtracted_observations = self.observations.copy()

        for band in self.bands:
            mask = self.observations["band"] == band
            band_data = self.observations[mask]

            # Use a biweight location to estimate the background
            ref_flux = biweight_location(band_data["flux"])

            subtracted_observations.loc[mask, "flux"] -= ref_flux

        return subtracted_observations
예제 #15
0
파일: utils.py 프로젝트: rendinam/stginga
def calc_stat(data, sigma=1.8, niter=10, algorithm='median'):
    """Calculate statistics for given data.

    Parameters
    ----------
    data : ndarray
        Data to be calculated from.

    sigma : float
        Sigma for sigma clipping.

    niter : int
        Number of iterations for sigma clipping.

    algorithm : {'mean', 'median', 'mode', 'stddev'}
        Algorithm for statistics calculation.

    Returns
    -------
    val : float
        Statistics value.

    Raises
    ------
    ValueError
        Invalid algorithm.

    """
    arr = np.ravel(data)

    if len(arr) < 1:
        return 0.0

    kwargs = {'sigma': sigma}

    if ASTROPY_LT_3_1:
        kwargs['iters'] = niter
    else:
        kwargs['maxiters'] = niter

    arr_masked = sigma_clip(arr, **kwargs)
    arr = arr_masked.data[~arr_masked.mask]

    if len(arr) < 1:
        return 0.0

    algorithm = algorithm.lower()
    if algorithm == 'mean':
        val = arr.mean()
    elif algorithm == 'median':
        val = np.median(arr)
    elif algorithm == 'mode':
        val = biweight_location(arr)
    elif algorithm == 'stddev':
        val = arr.std()
    else:
        raise ValueError('{0} is not a valid algorithm for sky background '
                         'calculations'.format(algorithm))

    return val
예제 #16
0
def calc_stat(data, sigma=1.8, niter=10, algorithm='median'):
    """Calculate statistics for given data.

    Parameters
    ----------
    data : ndarray
        Data to be calculated from.

    sigma : float
        Sigma for sigma clipping.

    niter : int
        Number of iterations for sigma clipping.

    algorithm : {'mean', 'median', 'mode', 'stddev'}
        Algorithm for statistics calculation.

    Returns
    -------
    val : float
        Statistics value.

    Raises
    ------
    ValueError
        Invalid algorithm.

    """
    arr = np.ravel(data)

    if len(arr) < 1:
        return 0.0

    if ((astropy_version.major==1 and astropy_version.minor==0) or
            (astropy_version.major < 1)):
        arr_masked = sigma_clip(arr, sig=sigma, iters=niter)
    else:
        arr_masked = sigma_clip(arr, sigma=sigma, iters=niter)

    arr = arr_masked.data[~arr_masked.mask]

    if len(arr) < 1:
        return 0.0

    algorithm = algorithm.lower()
    if algorithm == 'mean':
        val = arr.mean()
    elif algorithm == 'median':
        val = np.median(arr)
    elif algorithm == 'mode':
        val = biweight_location(arr)
    elif algorithm == 'stddev':
        val = arr.std()
    else:
        raise ValueError('{0} is not a valid algorithm for sky background '
                         'calculations'.format(algorithm))

    return val
예제 #17
0
def setup_fluxlim(args, rargs):
    """
    This is equivalent to the rflim0 and rsetfl scripts.

    Determine the input values for the flux limit calculation,
    create the input file, create the slurm file using the jobsplitter
    and launch it using sbatch
    """

    nightshot = args.night + 'v' + args.shotid
    dithall = DithAllFile(args.dithall_dir + '/' + nightshot + '/dithall.use')

    if args.ifu_slots:
        ifus = args.ifu_slots
    else:
        ifus = np.unique(dithall.ifuslot)

    fname = 'flim%s' % nightshot

    with open(fname, 'w') as f:

        for ifu in ifus:
            ifu_dith = dithall.where(dithall.ifuslot == ifu)
            dist = np.sqrt(ifu_dith.x * ifu_dith.x + ifu_dith.y * ifu_dith.y)
            sortidx = np.argsort(dist)

            ra_mean = aps.biweight_location(ifu_dith.ra[sortidx][0:2])
            dec_mean = aps.biweight_location(ifu_dith.dec[sortidx][0:2])

            ixyname = ifu_dith.filename[sortidx][0]

            logstr = ''
            if '-l' not in rargs:
                logstr = '-l %s_%s_%s.log'  \
                    % (args.night, args.shotid,
                       '_'.join(ixyname.split('_')[0:4]))

            f.write('vdrp_calc_flim %s %s %.7f %.7f %s %s %s\n' %
                    (' '.join(rargs), logstr, ra_mean, dec_mean, args.night,
                     args.shotid, '_'.join(ixyname.split('_')[0:4])))

    # Now prepare the job splitter for it
    args.cmdfile = fname

    vj.main(args)
예제 #18
0
파일: hetobs.py 프로젝트: grzeimann/HETobs
def reduce_acam(image):
    ''' Really simple reduction '''
    # Get overscan but skip first column because it looks off
    overscan = biweight_location(image[:, 1:4])
    # Trim all of the overscan and subtracted the average value
    image = image[:, 4:] - overscan
    # Subtract the currently global variable "acam_bias" (master bias)
    image = image - acam_bias
    return image
예제 #19
0
    def chisq_metric(self, chisq, chisq_std_zscore_cut=4.0, return_dict=True):
        """
        chi square metrics

        chisq : ndarray, shape=(Nants, Ntimes, Nfreqs)
            ndarray containing chisq for each antenna (single pol)

        chisq_std_cut : float, default=5.0
            sigma tolerance (or z-score tolerance) for std of chisq fluctuations

        return_dict : bool, default=True
            return per-antenna metrics as a dictionary, with antenna number as key
            rather than an ndarray
        """
        # Get chisq statistics
        chisq_avg = np.median(np.median(chisq[:, :, self.band], axis=0),
                              axis=1).astype(np.float64)
        chisq_tot_avg = astats.biweight_location(
            chisq[:, :, self.band]).astype(np.float64)
        chisq_ant_avg = np.array(
            list(map(astats.biweight_location,
                     chisq[:, :, self.band]))).astype(np.float64)
        chisq_ant_std = np.sqrt(
            np.array(
                list(map(astats.biweight_midvariance, chisq[:, :,
                                                            self.band]))))
        chisq_ant_std_loc = astats.biweight_location(chisq_ant_std).astype(
            np.float64)
        chisq_ant_std_scale = np.sqrt(
            astats.biweight_midvariance(chisq_ant_std))
        chisq_ant_std_zscore = (chisq_ant_std -
                                chisq_ant_std_loc) / chisq_ant_std_scale
        chisq_ant_std_zscore_max = np.max(np.abs(chisq_ant_std_zscore))
        chisq_good_sol = chisq_ant_std_zscore_max < chisq_std_zscore_cut

        # convert to dictionaries
        if return_dict is True:
            chisq_ant_std = odict(zip(self.ant_array, chisq_ant_std))
            chisq_ant_avg = odict(zip(self.ant_array, chisq_ant_avg))

        return (chisq_avg, chisq_tot_avg, chisq_ant_avg, chisq_ant_std,
                chisq_ant_std_loc, chisq_ant_std_scale, chisq_ant_std_zscore,
                chisq_ant_std_zscore_max, chisq_good_sol)
예제 #20
0
def build_string(days_ago, mag):
    print('Building string...')
    data_last24hrs = np.where(days_ago < 1)
    data_last1_6_days = np.where((days_ago < 6) & (days_ago > 1))
    n_obs_last24hrs = np.size(mag[data_last24hrs])
    n_obs_last1_6_days = np.size(mag[data_last1_6_days])
    mean_last24hrs = biweight_location(mag[data_last24hrs])
    mean_last1_6_days = biweight_location(mag[data_last1_6_days])
    stdev = np.std(mag[data_last24hrs]) / np.sqrt(n_obs_last24hrs) \
        + np.std(mag[data_last1_6_days]) / np.sqrt(n_obs_last1_6_days)
    diff = mean_last24hrs - mean_last1_6_days
    sigma = diff / stdev

    if n_obs_last24hrs < 3 or n_obs_last1_6_days < 3:
        print('Not enough observations. Abort.')
        return None
    else:

        if diff > 0:
            changeword = 'dimmer'
        else:
            changeword = 'brighter'

        mag_text = "My visual mag from last night was " + \
            str(format(mean_last24hrs, '.2f')) + \
            ' (robust mean of ' + \
            str(n_obs_last24hrs) + \
            ' observations). '

        change_text = 'That is ' + \
            format(abs(diff), '.2f') + \
            ' mag ' + \
            changeword + \
            ' than the robust mean of the 5 previous nights (n=' + \
            str(n_obs_last1_6_days) + \
            ', ' + \
            format(abs(sigma), '.1f') + \
            'σ). #Betelgeuse'

        text = mag_text + change_text
        print(text)
        return text
예제 #21
0
    def _stats_data(self, stats, mask, scipy, astropy, decimals_mode):
        data = self.data

        # The original data size, for computation of valid elements and how
        # many are masked/invalid.
        size_initial = data.size

        # Delete masked values, this will directly convert it to a 1D array
        # if the mask is not appropriate then ravel it.
        data = data[~mask]
        size_masked = data.size

        # Delete invalid (NaN, Inf) values. This should ensure that the result
        # is always a 1D array
        data = data[np.isfinite(data)]
        size_valid = data.size
        stats['elements'] = [size_valid]

        stats['min'] = [np.amin(data)]
        stats['max'] = [np.amax(data)]
        stats['mean'] = [np.mean(data)]
        stats['median'] = [np.median(data)]
        # Use custom mode defined in this package because scipy.stats.mode is
        # very, very slow and by default tries to calculate the mode along
        # axis=0 and not for the whole array.
        # Take the first element since the second is the number of occurences.
        stats['mode'] = [mode(data, decimals=decimals_mode)[0]]

        if astropy:
            stats['biweight_location'] = [biweight_location(data)]

        stats['std'] = [np.std(data)]

        if astropy:
            stats['mad'] = [mad_std(data)]
            stats['biweight_midvariance'] = [biweight_midvariance(data)]

        stats['var'] = [np.var(data)]

        if scipy:  # pragma: no cover
            if not OPT_DEPS['SCIPY']:
                log.info('SciPy is not installed.')
            else:
                # Passing axis=None should not be important since we already
                # boolean indexed the array and it's 1D. But it's important
                # to remember that there default is axis=0 and not axis=None!
                stats['skew'] = [skew(data, axis=None)]
                stats['kurtosis'] = [kurtosis(data, axis=None)]

        stats['masked'] = [size_initial - size_masked]
        stats['invalid'] = [size_masked - size_valid]

        return data
예제 #22
0
파일: CatPrep.py 프로젝트: nkern/C4
def proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,vlim,rlim,C):

	# Project Galaxies
	ang_d,lum_d = C.zdistance(clus_z,H0)
	angles = C.findangle(gal_ra,gal_dec,clus_ra,clus_dec)
	rdata = angles * ang_d
	vdata = c * (gal_z - clus_z) / (1 + clus_z)

	# Take Average
	cut = np.where((np.abs(vdata)<vlim)&(rdata<1.5))[0]
	if len(cut) < 2: cut = np.where((np.abs(vdata)<vlim+500)&(rdata<rlim))[0]
	if len(cut) < 2:
		clus_ra = np.median(gal_ra[cut])
		clus_dec = np.median(gal_dec[cut])
		clus_z = np.median(gal_z[cut])
	else:
		clus_ra = astats.biweight_location(gal_ra[cut])
		clus_dec = astats.biweight_location(gal_dec[cut])
		clus_z = astats.biweight_location(gal_z[cut])

	return clus_ra, clus_dec, clus_z
예제 #23
0
def proj_avg(clus_ra, clus_dec, clus_z, gal_ra, gal_dec, gal_z, vlim, rlim, C):

    # Project Galaxies
    ang_d, lum_d = C.zdistance(clus_z, H0)
    angles = C.findangle(gal_ra, gal_dec, clus_ra, clus_dec)
    rdata = angles * ang_d
    vdata = c * (gal_z - clus_z) / (1 + clus_z)

    # Take Average
    cut = np.where((np.abs(vdata) < vlim) & (rdata < 1.5))[0]
    if len(cut) < 2:
        cut = np.where((np.abs(vdata) < vlim + 500) & (rdata < rlim))[0]
    if len(cut) < 2:
        clus_ra = np.median(gal_ra[cut])
        clus_dec = np.median(gal_dec[cut])
        clus_z = np.median(gal_z[cut])
    else:
        clus_ra = astats.biweight_location(gal_ra[cut])
        clus_dec = astats.biweight_location(gal_dec[cut])
        clus_z = astats.biweight_location(gal_z[cut])

    return clus_ra, clus_dec, clus_z
예제 #24
0
def make_plot(days_ago, dates, mag):
    print('Making plot...')
    time_span = np.max(dates) - np.min(dates)
    min_plot = 0
    max_plot = +1.5
    x_days = 300

    # Make bins
    bin_width = 1
    nights = np.arange(0, max(days_ago), bin_width)
    bin_mags = []
    errors = []
    for night in nights:
        selector = np.where((days_ago < night + bin_width)
                            & (days_ago > night))
        n_obs = np.size(mag[selector])
        flux = biweight_location(mag[selector])
        error = np.std(mag[selector]) / np.sqrt(n_obs)
        if error > 0.2:
            error = 0
        if error == 0:  # and flux < 0.2:
            flux = np.nan
        bin_mags.append(flux)
        errors.append(error)
        print(night, flux, error, n_obs, np.std(mag[selector]))

    # Convert magnitudes to fluxes
    bin_mags = np.array(bin_mags)
    flux = 1 / (10**(0.4 * (bin_mags - baseline_mag)))
    latest_flux = flux[0]
    if np.isnan(latest_flux):
        latest_flux = flux[1]

    plt.errorbar(nights + 0.5, flux, yerr=errors, fmt='.k')
    plt.xlabel('Days before today')
    plt.ylabel('Normalized flux (0.5 mag baseline)')
    plt.ylim(min_plot, max_plot)
    plt.xlim(x_days, 0)
    date_text = datetime.datetime.now().strftime("%d %b %Y")
    try:
        lumi = str(int((round(latest_flux * 100, 0))))
        text = "#Betelgeuse at " + lumi + r"% of its usual brightness @betelbot "
    except:
        text = "No new #Betelgeuse brightness tonight @betelbot"
        lumi = 0
    plt.text(x_days - 2, 0.19, "Update: " + date_text)
    plt.text(x_days - 2, 0.12, text)
    plt.text(x_days - 2, 0.05, "AAVSO visual (by-eye) daily bins")
    plt.savefig(plot_file, bbox_inches='tight', dpi=300)
    print('Plot made')
    return lumi
예제 #25
0
def make_plot(days_ago, dates, mag):
    print('Making plot...')
    time_span = np.max(dates) - np.min(dates)
    flatten_lc, trend_lc = flatten(
        days_ago,
        mag,
        method='lowess',
        window_length=time_span / 5,
        return_trend=True,
    )
    #mpl.rcParams['font.sans-serif']=['Times New Roman']   #指定默认字体 SimHei为黑体
    mpl.rcParams['font.sans-serif'] = ['SimHei']
    mpl.rcParams['axes.unicode_minus'] = False  #用来正常显示负号
    #fontcn = {'family': 'Droid Sans Fallback'} # 1pt = 4/3px
    fontcn = {'family': 'SimHei'}  # 1pt = 4/3px
    fonten = {'family': 'Times New Roman'}
    plt.scatter(days_ago, mag, s=5, color='blue', alpha=0.5)
    plt.scatter(days_ago1,
                all_mags1,
                s=10,
                color='black',
                alpha=0.8,
                marker="x")
    plt.xlabel(u'从今天往回数的天数', fontdict=fontcn)
    #plt.xlabel('从今天往回数的天数')
    plt.ylabel(u'视星等', fontdict=fontcn)
    mid = 0.6
    plt.ylim(mid - 1, mid + 1)
    plt.xlim(-1, 20)
    plt.plot(days_ago, trend_lc, color='red', linewidth=1)
    plt.gca().invert_yaxis()
    plt.gca().invert_xaxis()
    date_text = datetime.datetime.now().strftime("%Y-%m-%d")
    data_last24hrs = np.where(days_ago < 1)
    mean_last24hrs = biweight_location(mag[data_last24hrs])
    lumi = str(format(mean_last24hrs, '.2f'))
    plt.text(19.5, mid + 1 - 0.25, u"裸眼 ", color='blue', fontdict=fontcn)
    plt.text(18, mid + 1 - 0.25, u"观测星等 蓝色", color='blue', fontdict=fontcn)
    plt.text(14, mid + 1 - 0.25, u"○", color='blue', fontdict=fonten)
    plt.text(19.5, mid + 1 - 0.15, u"CCD ", color='black', fontdict=fonten)
    plt.text(18, mid + 1 - 0.15, u"观测星等 黑色", color='black', fontdict=fontcn)
    plt.text(14, mid + 1 - 0.15, u"×", color='black', fontdict=fonten)
    plt.text(19.5, mid + 1 - 0.05, u"局部加权拟合 红色线", color='red', fontdict=fontcn)
    plt.text(7.5, mid - 1 + 0.1, u'目前参宿四的星等为 ', fontdict=fontcn)
    plt.text(2, mid - 1 + 0.1, lumi, fontdict=fonten)
    plt.text(7.5, mid - 1 + 0.2, u"由 天文通 译制于", fontdict=fontcn)
    plt.text(3, mid - 1 + 0.2, date_text, fontdict=fonten)
    plt.savefig(plot_file, bbox_inches='tight', dpi=300)
    print('Done.')
예제 #26
0
 def eval_CMR(self, M1600, beta, keys=None):
     lnL = 0.
     blob = {}
     keys, M1600, beta = self._fix_dim(keys, M1600, beta)
     for k, mags, b in zip(keys, M1600, beta):
         obsCMR = self.obsData[k]['obsCMR']
         obsCMRerr = self.obsData[k]['obsCMRerr']
         modelCMR = np.zeros(len(obsCMR))
         for iB, (lower, upper) in enumerate(self.obsData[k]['binCMR']):
             sample = b[(mags >= lower) & (mags < upper)]
             if len(sample) < 2:
                 modelCMR[iB] = np.nan
             else:
                 modelCMR[iB] = biweight_location(sample)
         lnL += self._compute_lnL(modelCMR, obsCMR, obsCMRerr)
         blob[k] = modelCMR
     return self._retval(lnL, blob)
예제 #27
0
파일: core.py 프로젝트: rosteen/photutils
    def calc_background(self, data, axis=None, masked=False):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis, masked=False)
        else:
            # convert to ndarray with masked values as np.nan
            if isinstance(data, np.ma.MaskedArray):
                data = data.filled(np.nan)

        # ignore RuntimeWarning where axis is all NaN
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            result = biweight_location(data,
                                       c=self.c,
                                       M=self.M,
                                       axis=axis,
                                       ignore_nan=True)

        if masked and isinstance(result, np.ndarray):
            result = np.ma.masked_where(np.isnan(result), result)

        return result
예제 #28
0
def robustassess(n, d, k=2.24):  #raw frequency, doc length and threshold
    v = np.divide(n, d)

    rawcount = np.sum(n)

    mu = biweight_location(v)
    s = biweight_scale(v)
    mu2s = mu + k * s
    vcliph2s = np.minimum(v, mu2s)
    #Winsorising

    docsclipped = np.sum(np.greater(v, vcliph2s))  # number of docts

    adjustedcount = int(np.sum(np.rint(vcliph2s * d)))  # rounding to integer

    proportionv = n / rawcount  # proportion of word frequency per document
    proportiond = d / N  # document size proportion
    kld = np.sum(
        np.multiply(proportionv, np.log2(np.divide(proportionv, proportiond))))

    return [rawcount, adjustedcount, docsclipped, len(n), '%.3f' % kld]
예제 #29
0
def _pixel_stats(pixels, clip_sig=3, n_clip=10, min_pix=50):
    """
    Calculate mode and scale of pixel distribution, clipping outliers. Uses
    biweight as "robust" estimator of these quantities.

    :param pixels: Array to calculate statistics for
    :param clip_sig: Sigma value at which to clip outliers
    :param n_clip: Number of clipping iterations
    :param min_pix: Minimum number of retained pixels
    :return: 2-tuple of distribution mode, scale
    """
    clip_iter = 0
    mode, scale = 0, 1
    mask = np.ones(pixels.shape, dtype=bool)
    while True:
        mode = biweight_location(pixels[mask])
        scale = biweight_midvariance(pixels[mask])
        mask &= np.abs(pixels - mode) < clip_sig * scale
        clip_iter += 1
        if np.sum(mask) < min_pix or clip_iter >= n_clip:
            break
    return mode, scale
예제 #30
0
파일: core.py 프로젝트: gbrammer/photutils
    def calc_background(self, data, axis=None):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis)

        return biweight_location(data, c=self.c, M=self.M, axis=axis)
예제 #31
0
def overscan_estimate(ccd_in,
                      meta=None,
                      master_bias=None,
                      binsize=None,
                      min_width=1,
                      max_width=8,
                      box_size=100,
                      min_hist_val=10,
                      show=False,
                      *args,
                      **kwargs):
    """Estimate overscan in ADU in the absense of a formal overscan region

    For biases, returns in the median of the image.  For all others,
    uses the minimum of: (1) the first peak in the histogram of the
    image or (2) the minimum of the median of four boxes at the
    corners of the image (specific to the IoIO coronagraph)

    Works best if bias shape (particularly bias ramp) is subtracted
    first.  Will subtract bias if bias is supplied and has not been
    subtracted.

    Parameters
    ----------
    ccd_in : `~astropy.nddata.CCDData` or filename
        Image from which to extract overscan estimate

    meta : `astropy.io.fits.header` or None
        referece to metadata of ccd into which to write OVERSCAN_* cards.
        If None, no metadata will be returned

    master_bias : `~astropy.nddata.CCDData`, filename, or None
        Bias to subtract from ccd before estimate is calculated.
        Improves accruacy by removing bias ramp.  Bias can be in units
        of ADU or electrons and is converted using the specified gain.
        If bias has already been subtracted, this step will be skipped
        but the bias header will be used to extract readnoise and gain
        using the *_key keywords.  Default is ``None``.

    binsize: float or None, optional
        The binsize to use for the histogram.  If None, binsize is 
        (readnoise in ADU)/4.  Default = None

    min_width : int, optional
        Minimum width peak to search for in histogram.  Keep in mind
        histogram bins are binsize ADU wide.  Default = 1

    max_width : int, optional
        See min_width.  Default = 8

    box_size : int
        Edge size of square box used to extract biweight median location
        from the corners of the image for this method of  overscan
        estimation.  Default = 100

    show : boolean
       Show image with min/max set to highlight overscan pixels and
       histogram with overscan chopped  histogram.  Default is False [consider making this boolean or name of plot file]

    """
    if ccd_in.meta.get('overscan_value') is not None:
        # Overscan has been subtracted in a previous reduction step,
        # so exit quietly
        return 0

    # Work with a copy since we mess with both the ccd.data and .meta
    ccd = ccd_in.copy()
    # Get CCD characteristics
    ccd.meta = sx694.metadata(ccd.meta)
    if meta is None:
        meta = ccd.meta
    if ccd.unit != u.adu:
        # For now don't get fancy with unit conversion
        raise ValueError('CCD units must be in ADU for overscan estimation')
    if ccd.meta['IMAGETYP'] == "BIAS":
        overscan = np.median(ccd)
        meta['HIERARCH OVERSCAN_MEDIAN'] = (overscan, 'ADU')
        meta['HIERARCH OVERSCAN_METHOD'] = \
            ('median', 'Method used for overscan estimation')
        return overscan

    # Prepare for histogram method of overscan estimation.  These
    # keywords are guaranteed to be in meta because we put there there
    # in ccd_metadata
    readnoise = ccd.meta['RDNOISE']
    gain = ccd.meta['GAIN']
    if ccd.meta.get('subtract_bias') is None and master_bias is not None:
        # Bias has not been subtracted and we have a bias around to be
        # able to do that subtraction
        if isinstance(master_bias, str):
            bias = CorData.read(master_bias)
            meta['HIERARCH OVERSCAN_MASTER_BIAS'] = 'OSBIAS'
            meta['OSBIAS'] = master_bias
        else:
            # Work with a copy since we are going to muck with it
            bias = master_bias.copy()
            meta['HIERARCH OVERSCAN_MASTER_BIAS'] = 'CCDData object provided'
        # Improve our readnoise (measured) and gain (probably not
        # re-measured) values
        readnoise = bias.meta['RDNOISE']
        gain = bias.meta['GAIN']
        if bias.unit is u.electron:
            # Convert bias back to ADU for subtraction
            bias = bias.divide(gain * u.electron / u.adu)
        ccd = ccd.subtract(bias)
        ccd.meta['HIERARCH subtract_bias'] = True
    if type(ccd) != CorData and ccd.meta.get('subtract_bias') is None:
        # Don't gunk up logs when we are taking data, but subclasses
        # of CorObs (e.g. RedCorObs) will produce message
        log.warning(
            'overscan_estimate: bias has not been subtracted, which can lead to inaccuracy of overscan estimate'
        )
    # The coronagraph creates a margin of un-illuminated pixels on the
    # CCD.  These are great for estimating the bias and scattered
    # light for spontanous subtraction.
    # Corners method
    s = ccd.shape
    bs = box_size
    c00 = biweight_location(ccd[0:bs, 0:bs])
    c10 = biweight_location(ccd[s[0] - bs:s[0], 0:bs])
    c01 = biweight_location(ccd[0:bs, s[1] - bs:s[1]])
    c11 = biweight_location(ccd[s[0] - bs:s[0], s[1] - bs:s[1]])
    corners_method = min(c00, c10, c01, c11)
    # Histogram method.  The first peak is the bias, the second is the
    # ND filter.  Note that the 1.25" filters do a better job at this
    # than the 2" filters but with carefully chosen parameters, the
    # first small peak can be spotted.
    if binsize is None:
        # Calculate binsize based on readnoise in ADU, but oversample
        # by 4.  Note need to convert from Quantity to float
        binsize = readnoise / gain / 4.
    im_hist, im_hist_centers = hist_of_im(ccd, binsize)
    # Note that after bias subtraction, there is sometimes some noise
    # at low counts.  We expect a lot of pixels in the histogram, so filter
    good_idx = np.flatnonzero(im_hist > min_hist_val)
    im_hist = im_hist[good_idx]
    im_hist_centers = im_hist_centers[good_idx]
    # The arguments to linspace are the critical parameters I played
    # with together with binsize to get the first small peak to be recognized
    im_peak_idx = signal.find_peaks_cwt(im_hist,
                                        np.linspace(min_width, max_width))
    hist_method = im_hist_centers[im_peak_idx[0]]
    overscan_methods = ['corners', 'histogram']
    overscan_values = np.asarray((corners_method, hist_method))
    meta['HIERARCH OVERSCAN_CORNERS'] = (corners_method, 'ADU')
    meta['HIERARCH OVERSCAN_HISTOGRAM'] = (hist_method, 'ADU')
    o_idx = np.argmin(overscan_values)
    overscan = overscan_values[o_idx]
    meta['HIERARCH OVERSCAN_METHOD'] = (overscan_methods[o_idx],
                                        'Method used for overscan estimation')
    if show:
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8.5, 9))
        ccds = ccd.subtract(1000 * u.adu)
        range = 5 * readnoise / gain
        vmin = overscan - range - 1000
        vmax = overscan + range - 1000
        ax1.imshow(ccds,
                   origin='lower',
                   cmap=plt.cm.gray,
                   filternorm=0,
                   interpolation='none',
                   vmin=vmin,
                   vmax=vmax)
        ax1.set_title('Image minus 1000 ADU')
        ax2.plot(im_hist_centers, im_hist)
        ax2.set_yscale("log")
        ax2.set_xscale("log")
        ax2.axvline(overscan, color='r')
        # https://stackoverflow.com/questions/13413112/creating-labels-where-line-appears-in-matplotlib-figure
        # the x coords of this transformation are data, and the
        # y coord are axes
        trans = transforms.blended_transform_factory(ax2.transData,
                                                     ax2.transAxes)
        ax2.set_title('Histogram')
        ax2.text(overscan + 20,
                 0.05,
                 overscan_methods[o_idx] +
                 ' overscan = {:.2f}'.format(overscan),
                 rotation=90,
                 transform=trans,
                 verticalalignment='bottom')
        plt.show()
    return overscan
예제 #32
0
파일: stats.py 프로젝트: mcara/imutils
 def biweight_location(self):
     """
     The biweight location of the pixel values.
     """
     return biweight_location(self.goodvals)
예제 #33
0
    def calc_background(self, data):

        return biweight_location(self.sigma_clip(data), c=self.c, M=self.M)
예제 #34
0
파일: trend.py 프로젝트: samotracio/trend
def fbimean(y):
    """Helper function of trendplot. Returns biweight mean of input 1d-array (see astropy.stats)"""
    return biweight_location(y)
예제 #35
0
    def calc_background(self, data, axis=None):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis)

        return biweight_location(data, c=self.c, M=self.M, axis=axis)
예제 #36
0
		clus_dec = DEC[i]
		clus_z = Z[i]

		# Load Galaxies
		galdata = C4.load_chris_gals(HaloID[i])	
		gal_ra,gal_dec,gal_z,gal_gmags,gal_rmags,gal_imags = galdata

		# Project Galaxies
		ang_d,lum_d = C.zdistance(clus_z,H0)
		angles = C.findangle(gal_ra,gal_dec,clus_ra,clus_dec)
		rdata = angles * ang_d
		vdata = c * (gal_z - clus_z) / (1 + clus_z)

		# Take Average Three times
		cut1 = np.where((np.abs(vdata)<1000)&(rdata<1.5))[0]
		clus_ra = astats.biweight_location(gal_ra[cut1])
		clus_dec = astats.biweight_location(gal_dec[cut1])
		clus_z = astats.biweight_location(gal_z[cut1])

		ang_d,lum_d = C.zdistance(clus_z,H0)
		angles = C.findangle(gal_ra,gal_dec,clus_ra,clus_dec)
		rdata = angles * ang_d
		vdata = c * (gal_z - clus_z) / (1 + clus_z)

                cut2 = np.where((np.abs(vdata)<500)&(rdata<1.5))[0]
                clus_ra = astats.biweight_location(gal_ra[cut2])
                clus_dec = astats.biweight_location(gal_dec[cut2])
                clus_z = astats.biweight_location(gal_z[cut2])

                ang_d,lum_d = C.zdistance(clus_z,H0)
                angles = C.findangle(gal_ra,gal_dec,clus_ra,clus_dec)
예제 #37
0
def return_biweight_one_sigma(shots, pixlo=5, pixhi=25, wave_bins=None):
    """
    Return the biweight midvariance of the 
    1 sigma values of a series of shots. Trim
    off edge values
   
    Parameters
    ----------
    shots : list
        a list of shots to loop
        over
    pixlo, pixhi : int (optional)
        limits for the 2D ra/dec
        pixel array when computing 
        the values. Default is 5,25.
    """
    sigmas_all = []
    for shot in shots:
        fn, fnmask = return_sensitivity_hdf_path(shot, 
                                                 return_mask_fn = True)

        print(fn)
        with SensitivityCubeHDF5Container(fn, mask_filename = fnmask) as hdf:

            first = True

            for ifuslot, scube in hdf.itercubes():

                 # assume wavelenghth WCS the same for whole HDF
                 if (type(wave_bins) != type(None)) and first:
                     junkx, junky, wlo = scube.wcs.all_world2pix(0., 0., wave_bins[:-1], 0)
                     junkx, junky, whi = scube.wcs.all_world2pix(0., 0., wave_bins[1:], 0)
                     first = False
                     maxsize = int(max(floor(whi) - ceil(wlo))*(pixhi - pixlo)*(pixhi - pixlo))

                 if type(wave_bins) != type(None):
                     sigmas = []
                     for lo, high in zip(wlo, whi):
                         sigmas_slice = scube.sigmas.filled(nan)[int(ceil(lo)):int(floor(high)), 
                                                                 pixlo:pixhi, pixlo:pixhi]
                         sigmas_slice = sigmas_slice.reshape(sigmas_slice.shape[0]*sigmas_slice.shape[1]*sigmas_slice.shape[2])
                         
                         # pad with NaN to keep consistent size
                         if len(sigmas_slice) < maxsize:
                             sigmas_slice = pad(sigmas_slice, (0, maxsize - len(sigmas_slice)), 
                                                'empty')

                         sigmas.append(sigmas_slice)
                 else:
                     sigmas = scube.sigmas.filled(nan)[:, pixlo:pixhi, pixlo:pixhi]
                     sigmas = sigmas.reshape(sigmas.shape[0], sigmas.shape[1]*sigmas.shape[2])

                 sigmas_all.extend(array(sigmas).T)

            # assume wavelenghth WCS the same for whole HDF
            if type(wave_bins) == type(None):
                 pixels = arange(scube.sigmas.shape[0])
                 junkras, junkdecs, waves = scube.wcs.all_pix2world(0*pixels, 0*pixels,  
                                                                    pixels, 0)
            else:
                 waves = 0.5*(wave_bins[1:] + wave_bins[:-1])
 
    biwt = biweight_location(array(sigmas_all), axis=0, ignore_nan=True)

    return waves, biwt 
예제 #38
0
파일: na_back.py 프로젝트: jpmorgen/IoIO
def na_back_directory(directory,
                      calibration=True,
                      read_pout=True,
                      write_pout=True,
                      write_plot=True,
                      create_outdir=True,
                      show=False,
                      **kwargs):
    rd = reduced_dir(directory, create=False)
    poutname = os.path.join(rd, 'Na_back.pout')
    pout = cached_pout(na_back_pipeline,
                       poutname=poutname,
                       read_pout=read_pout,
                       write_pout=write_pout,
                       create_outdir=create_outdir,
                       directory=directory,
                       **kwargs)
    if len(pout) == 0:
        log.debug(f'no Na background measurements found in {directory}')
        return {}
    _, pipe_meta = zip(*pout)
    na_back_list = [pm['Na_back'] for pm in pipe_meta]
    df = pd.DataFrame(na_back_list)
    df.sort_values('jd')
    just_date = df['date'].iloc[0]

    instr_mag = u.Magnitude(df['best_back'] * u.electron / u.s / u.pix**2)
    df['instr_mag'] = instr_mag

    #tdf = df.loc[df['airmass'] < 2.0]
    tdf = df.loc[df['airmass'] < 2.5]
    mean_back = np.mean(tdf['best_back'])
    std_back = np.std(tdf['best_back'])
    biweight_back = biweight_location(tdf['best_back'])
    mad_std_back = mad_std(tdf['best_back'])

    # https://stackoverflow.com/questions/20664980/pandas-iterate-over-unique-values-of-a-column-that-is-already-in-sorted-order
    # and
    # https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html

    #https://matplotlib.org/stable/tutorials/intermediate/color_cycle.html
    offset_cycler = cycler(color=['r', 'g', 'b', 'y'])
    plt.rc('axes', prop_cycle=offset_cycler)

    f = plt.figure(figsize=[8.5, 11])
    plt.suptitle(f"Na background {just_date}")
    offset_groups = df.groupby(['raoff', 'decoff']).groups
    ax = plt.subplot(3, 1, 1)
    for offset_idx in offset_groups:
        gidx = offset_groups[offset_idx]
        gdf = df.iloc[gidx]
        plot_dates = julian2num(gdf['jd'])
        plt.plot_date(plot_dates,
                      gdf['best_back'],
                      label=f"dRA {gdf.iloc[0]['raoff']} "
                      f"dDEC {gdf.iloc[0]['decoff']} armin")
        plt.axhline(y=biweight_back, color='red')
        plt.axhline(y=biweight_back + mad_std_back,
                    linestyle='--',
                    color='k',
                    linewidth=1)
        plt.axhline(y=biweight_back - mad_std_back,
                    linestyle='--',
                    color='k',
                    linewidth=1)
        plt.text(0.5,
                 biweight_back + 0.1 * mad_std_back,
                 f'{biweight_back:.4f} +/- {mad_std_back:.4f}',
                 ha='center',
                 transform=ax.get_yaxis_transform())
        plt.xlabel('date')
        plt.ylabel('electron/s')
    ax.legend()

    ax = plt.subplot(3, 1, 2)
    for offset_idx in offset_groups:
        gidx = offset_groups[offset_idx]
        gdf = df.iloc[gidx]
        plt.plot(gdf['airmass'], gdf['instr_mag'], '.')
        #plt.axhline(y=biweight_back, color='red')
        #plt.axhline(y=biweight_back+mad_std_back,
        #            linestyle='--', color='k', linewidth=1)
        #plt.axhline(y=biweight_back-mad_std_back,
        #            linestyle='--', color='k', linewidth=1)
        plt.xlabel('Airmass')
        plt.ylabel('mag (electron/s/pix^2')

    ax = plt.subplot(3, 1, 3)
    for offset_idx in offset_groups:
        gidx = offset_groups[offset_idx]
        gdf = df.iloc[gidx]
        plt.plot(gdf['alt'], gdf['best_back'], '.')
        plt.axhline(y=biweight_back, color='red')
        plt.axhline(y=biweight_back + mad_std_back,
                    linestyle='--',
                    color='k',
                    linewidth=1)
        plt.axhline(y=biweight_back - mad_std_back,
                    linestyle='--',
                    color='k',
                    linewidth=1)
        plt.xlabel('Alt')
        plt.ylabel('electron/s')

    f.subplots_adjust(hspace=0.3)
    if write_plot is True:
        write_plot = os.path.join(rd, 'Na_back.png')
    if isinstance(write_plot, str):
        plt.savefig(write_plot, transparent=True)
    if show:
        plt.show()
    plt.close()

    # Problem discussed in  https://mail.python.org/pipermail/tkinter-discuss/2019-December/004153.html
    gc.collect()

    return {
        'date': just_date,
        'jd': np.floor(df['jd'].iloc[0]),
        'biweight_back': biweight_back,
        'mad_std_back': mad_std_back,
        'na_back_list': na_back_list
    }
예제 #39
0
	def richness_est(self,ra,dec,z,pz,gmags,rmags,imags,abs_rmags,haloid,clus_ra,clus_dec,clus_z,clus_rvir=None,gr_slope=None,gr_width=None,gr_inter=None,shiftgap=True,use_specs=False,use_bcg=False,fit_rs=False,spec_list=None,fixed_aperture=False,fixed_vdisp=False,plot_gr=False,plot_sky=False,plot_phase=False,find_pairs=True):
		''' Richness estimator, magnitudes should be apparent mags except for abs_rmag
			z : spectroscopic redshift
			pz : photometric redshift
			gr_slope : if fed color-mag (g-r vs. r) slope of fit to red sequence
			gr_width : if fed color-mag (g-r vs. r) width of fit to red sequence
			gr_inter : if red color-mag (g-r vs. r) intercept of fit to red sequence
			spec_list : indexing array specifying gals w/ spectroscopic redshifts, preferably fed as boolean numpy array
			fixed_aperture : clus_rvir = 1 Mpc
			fixed_vdisp : clus_vdisp = 1000 km/s (members < clus_vdisp*2)
			use_specs : includes all spectro members in richness regardless of their color, subject to counting twice
			use_bcg : use bcg RS_color as cluster RS_color
			fit_rs : fit a line to the member galaxy RS relationship, as of now this does not work and we take a flat RS relationship
			plot_gr : plot r vs g-r color diagram with RS fits
			plot_sky : plot RA and DEC of gals with signal and background annuli
			plot_phase : plot rdata and vdata phase space
			find_pairs : run c4 cluster pair identifier on phase spaces
			- If at least one quarter of the outer annuli doesn't have any galaxies (perhaps because it has run over the observation edge),
				it will return -99 as a richness
		'''
		# Put Into Class Namespace
		keys = ['ra','dec','z','pz','gmags','rmags','imags','clus_ra','clus_dec','clus_z','clus_rvir','vel_disp','color_data','color_cut','RS_color','RS_sigma','clus_color_cut','signal','background','richness','mems','phot','spec','spec_mems','deg_per_rvir','SA_outer','SA_inner','outer_red_dense','inner_background','Nphot_inner','Nphot_outer','red_inner','red_outer','all','outer_edge','inner_edge','shift_cut','set','pair','Nspec','gr_slope','gr_inter','gr_width']
		self.__dict__.update(ez.create(keys,locals()))

		# Take rough virial radius measurement, this method is BAD...
		if clus_rvir == None:
			clus_rvir = np.exp(-1.86)*len(np.where((rmags < -19.55) & (rdata < 1.0) & (np.abs(vdata) < 3500))[0])**0.51
			self.clus_rvir = clus_rvir

		# Set clus_rvir = 1.0 Mpc if fixed aperture == True
		if fixed_aperture == True:
			clus_rvir = 1.0

		# Magnitue Cut at 19.1 Apparent R Mag, then at -19 Absolute R Mag and sort by them
		bright = np.where(rmags < 19.1)[0]
		data = np.vstack([ra,dec,z,pz,gmags,rmags,imags,abs_rmags])
		data = data.T[bright].T
		ra,dec,z,pz,gmags,rmags,imags,abs_rmags = data

                bright = np.where(abs_rmags < -19.5)[0]
                data = np.vstack([ra,dec,z,pz,gmags,rmags,imags,abs_rmags])
                data = data.T[bright].T
                ra,dec,z,pz,gmags,rmags,imags,abs_rmags = data

		sorts = np.argsort(abs_rmags)
                data = np.vstack([ra,dec,z,pz,gmags,rmags,imags,abs_rmags])
		data = data.T[sorts].T
                ra,dec,z,pz,gmags,rmags,imags,abs_rmags = data
		self.__dict__.update(ez.create(keys,locals()))

		# Separate Into Spectro Z and Photo Z DataSets
		# Define the Spectro Z catalogue
		if spec_list == None:
			spec_cut = np.abs(z) > 1e-4
		else:
			if type(spec_list) == list: spec_list = np.array(spec_list)
			if spec_list.dtype != 'bool':
				spec_cut = np.array([False]*len(ra))
				spec_cut[spec_list] = True
		all = {'ra':ra,'dec':dec,'z':z,'pz':pz,'gmags':gmags,'rmags':rmags,'imags':imags,'abs_rmags':abs_rmags}
		spec = {'ra':ra[spec_cut],'dec':dec[spec_cut],'z':z[spec_cut],'pz':pz[spec_cut],'gmags':gmags[spec_cut],'rmags':rmags[spec_cut],'imags':imags[spec_cut],'abs_rmags':abs_rmags[spec_cut]}
		phot = {'ra':ra[~spec_cut],'dec':dec[~spec_cut],'z':z[~spec_cut],'pz':pz[~spec_cut],'gmags':gmags[~spec_cut],'rmags':rmags[~spec_cut],'imags':imags[~spec_cut],'abs_rmags':abs_rmags[~spec_cut]}

		# Project Spectra into radius and velocity
		ang_d,lum_d = C.zdistance(clus_z,self.H0)
		angles = C.findangle(spec['ra'],spec['dec'],clus_ra,clus_dec)
		rdata = angles * ang_d
		vdata = self.c * (spec['z'] - clus_z) / (1 + clus_z)
                spec.update({'rdata':rdata,'vdata':vdata})
                self.__dict__.update(ez.create(keys,locals()))

		# Take Hard Phasespace Limits
		limit = np.where( (rdata < 5) & (np.abs(vdata) < 5000) )[0]
		clus_data = np.vstack([rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags']])
		clus_data = clus_data.T[limit].T
		rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags'] = clus_data
                spec.update({'rdata':rdata,'vdata':vdata})
		self.__dict__.update(ez.create(keys,locals()))

		# Shiftgapper for Interlopers
		if shiftgap == True:
			len_before = np.where((rdata < clus_rvir*1.5)&(np.abs(vdata)<4000))[0].size
			clus_data = np.vstack([rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags']])
			clus_data = C.shiftgapper(clus_data.T).T
			sorts = np.argsort(clus_data[-1])
			clus_data = clus_data.T[sorts].T
			rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags'] = clus_data
			shift_cut = len_before - np.where((rdata < clus_rvir)&(np.abs(vdata)<4000))[0].size

		# Measure Velocity Dispersion of all galaxies within 1 * r_vir and np.abs(vdata) < 4000 km/s
                spec.update({'rdata':rdata,'vdata':vdata})
                self.__dict__.update(ez.create(keys,locals()))
		vel_disp = astats.biweight_midvariance(vdata[np.where((rdata < clus_rvir*1)&(np.abs(vdata) < 4000))])
		if fixed_vdisp == True: vel_disp = 1000

		# Run Cluster Pair Finder
		if find_pairs == True:
			pair, d1_chi, d2_chi, d3_chi, s_chi, double1, double2, double3, single, v_range, bins = pair_find(rdata,vdata)

		# Calculate Nspec, Nspec = # of galaxies within RVIR
		try:
			Nspec = np.where(rdata<clus_rvir)[0].size
		except:
			Nspec = 0
                self.__dict__.update(ez.create(keys,locals()))

		# Get Members from Specta, get their red sequence color
		mems = np.where((spec['rdata'] < clus_rvir)&(np.abs(spec['vdata'])<2*vel_disp))[0]
		color_data = spec['gmags'] - spec['rmags']
		color_cut = np.where((color_data[mems] < 1.2) & (color_data[mems] > 0.65))[0]
		RS_color = astats.biweight_location(color_data[mems][color_cut])
		RS_shift = color_data[mems][color_cut] - RS_color
		RS_sigma = astats.biweight_midvariance(RS_shift[np.where(np.abs(RS_shift)<.15)])

		if fit_rs == True:
			clf = linear_model.LinearRegression()
			set = np.where(np.abs(color_data[mems]-RS_color)<3*RS_sigma)[0]
			clf.fit(spec['rmags'][mems][set].reshape(set.size,1),color_data[mems][set])

		if use_bcg == True:
			bright = np.argsort(all['abs_rmags'][mems])[0]
			RS_color = color_data[mems][bright]
	                RS_shift = color_data[mems][color_cut] - RS_color
	                RS_sigma = astats.biweight_midvariance(RS_shift[np.where(np.abs(RS_shift)<.15)])
			
		# spec_mems is # of members that are within 2 sigma of cluster color
		clus_color_cut = np.where(np.abs(color_data[mems] - RS_color) < RS_sigma*2)[0]
		spec_mems = len(clus_color_cut)
		self.__dict__.update(ez.create(keys,locals()))

		# If fed gr fit
		if gr_slope != None:
			def RS_color(r_mag): return r_mag*gr_slope + gr_inter
			RS_sigma = gr_width / 2
			clus_color_cut = np.where(np.abs(color_data[mems] - RS_color(spec['rmags'])[mems]) < RS_sigma*2)[0]
			spec_mems = len(clus_color_cut)
			self.__dict__.update(ez.create(keys,locals()))

		# Get Rdata from PhotoZ & SpecZ Data Set
		angles = C.findangle(all['ra'],all['dec'],clus_ra,clus_dec)
		all['rdata'] = angles * ang_d

		# Get deg per RVIR proper
		deg_per_rvir = R.Cosmo.arcsec_per_kpc_proper(clus_z).value * 1e3 * clus_rvir / 3600

		# Get Area of Virial Circle out to 1 rvir, and Area of outer annuli, which is annuli from 4rvir < R < 6rvir, or dependent on rvir
		if clus_rvir < 2.5:
			outer_edge = 6.0
			inner_edge = 4.0
		elif clus_rvir >= 2.5 and clus_rvir < 3:
			outer_edge = 5.0
			inner_edge = 3.5
		else:
			outer_edge = 3.0
			inner_edge = 2.0	
		SA_inner = np.pi*deg_per_rvir**2
		SA_outer = np.pi * ( (outer_edge*deg_per_rvir)**2 - (inner_edge*deg_per_rvir)**2 )

		# Get Number of Cluster Color Galaxies from Photo Data Set in Inner Circle and Outer Annuli
		RS_color_sig = 2.0
		if gr_slope == None:
			red_inner = np.where(((all['gmags']-all['rmags']) < RS_color + RS_color_sig*RS_sigma)&((all['gmags']-all['rmags']) > RS_color - RS_color_sig*RS_sigma)&(all['rdata']<clus_rvir))[0]
			red_outer = np.where(((all['gmags']-all['rmags']) < RS_color + 1.5*RS_sigma)&((all['gmags']-all['rmags']) > RS_color - 1.5*RS_sigma)&(all['rdata']<outer_edge*clus_rvir)&(all['rdata']>inner_edge*clus_rvir))[0]
		else:
			red_inner = np.where(((all['gmags']-all['rmags']) < RS_color(all['rmags']) + RS_color_sig*RS_sigma)&((all['gmags']-all['rmags']) > RS_color(all['rmags']) - RS_color_sig*RS_sigma)&(all['rdata']<clus_rvir))[0]
			red_outer = np.where(((all['gmags']-all['rmags']) < RS_color(all['rmags']) + 1.5*RS_sigma)&((all['gmags']-all['rmags']) > RS_color(all['rmags']) - 1.5*RS_sigma)&(all['rdata']<outer_edge*clus_rvir)&(all['rdata']>inner_edge*clus_rvir))[0]

		Nphot_inner = len(red_inner)
		Nphot_outer = len(red_outer)

		self.__dict__.update(ez.create(keys,locals()))

		# Get Solid Angle Density of Outer Red Galaxies
		outer_red_dense = Nphot_outer / SA_outer
		inner_background = int(np.ceil(outer_red_dense * SA_inner))

		# If inner_background is less than Nphot_inner, then Nphot_inner -= inner_background, otherwise Nphot_inner = 0
		if inner_background < Nphot_inner:
			Nphot_inner -= inner_background
		else:
			Nphot_inner = 0

		# Richness = spec_mems + Nphot_inner or just Nphot_inner
		if use_specs == True:
			richness = spec_mems + Nphot_inner
		else:
			richness = Nphot_inner
		self.__dict__.update(ez.create(keys,locals()))

                # Plot
                if plot_gr == True:
                        fig,ax = mp.subplots()
                        ax.plot(rmags,gmags-rmags,'ko',alpha=.8)
                        ax.plot(spec['rmags'][mems],color_data[mems],'co')
			if gr_slope == None:
                        	ax.axhline(RS_color,color='r')
                        	ax.axhline(RS_color+RS_sigma,color='b')
                        	ax.axhline(RS_color-RS_sigma,color='b')
			else:
				xdata = np.arange(spec['rmags'][mems].min(),spec['rmags'][mems].max(),.1)
				ax.plot(xdata,RS_color(xdata),color='r')
				ax.plot(xdata,RS_color(xdata)+RS_sigma,color='b')
				ax.plot(xdata,RS_color(xdata)-RS_sigma,color='b')
                        ax.set_xlim(13,19)
                        ax.set_ylim(0,1.3)
                        ax.set_xlabel('Apparent R Mag',fontsize=16)
                        ax.set_ylabel('App G Mag - App R Mag',fontsize=16)
                        ax.set_title('Color-Mag Diagram, Cluster '+str(haloid))
                        fig.savefig('colormag_'+str(haloid)+'.png',bbox_inches='tight')
                        mp.close(fig)

		if plot_sky == True:
			fig,ax = mp.subplots()
			ax.plot(all['ra'],all['dec'],'ko')
			ax.plot(all['ra'][red_inner],all['dec'][red_inner],'ro')
			ax.plot(all['ra'][red_outer],all['dec'][red_outer],'yo')
			ax.plot(clus_ra,clus_dec,'co',markersize=9)
			ax.set_xlabel('RA',fontsize=15)
			ax.set_ylabel('Dec.',fontsize=15)
			ax.set_title('Richness Annuli for Halo '+str(haloid))
			fig.savefig('skyplot_'+str(haloid)+'.png',bbox_inches='tight')
			mp.close(fig)

		if plot_phase == True:
			fig,ax = mp.subplots()
			ax.plot(spec['rdata'],spec['vdata'],'ko')
			ax.plot(spec['rdata'][mems],spec['vdata'][mems],'co')
			bcg = np.where(spec['abs_rmags']==spec['abs_rmags'][mems].min())[0][0]
			ax.plot(spec['rdata'][bcg],spec['vdata'][bcg],'ro')
			ax.set_xlim(0,5)
			ax.set_ylim(-5000,5000)
			ax.set_xlabel('Radius (Mpc)',fontsize=15)
			ax.set_ylabel('Velocity (km/s)',fontsize=15)
			ax.set_title('phasespace haloid '+str(haloid))
			fig.savefig('phasespace_'+str(haloid)+'.png',bbox_inches='tight')
			mp.close(fig)

                # Check to make sure galaxies exist (somewhat) uniformely in outer annuli (aka. cluster isn't on edge of observation strip)
                # First, project all galaxies into polar coordinates centered on cluster center
		x = (all['ra']-clus_ra)/np.cos(clus_dec*np.pi/180)		# x coordinate in arcsec (of dec) centered on cluster center
		y = (all['dec']-clus_dec)
                all['radius'] = np.sqrt( x**2 + y**2 ) / deg_per_rvir	#radius scaled by RVIR
                all['theta'] = np.arctan( y / x )
		# Add corrections to arctan function
		all['theta'][np.where( (x < 0) & (y > 0) )] += np.pi	# Quadrant II
		all['theta'][np.where( (x < 0) & (y < 0) )] += np.pi	# Quadrant III
		all['theta'][np.where( (x > 0) & (y < 0) )] += 2*np.pi	# Quadrant IV
                # Then break outer annuli into 4 sections and check if at least 1 galaxy exists in each section
                sizes1 = np.array([np.where((np.abs(all['theta']-i)<=np.pi/2)&(all['radius']>inner_edge)&(all['radius']<15))[0].size for i in np.linspace(0,2*np.pi,4)])
                # Do it again but shift theta by np.pi/8 this time
                sizes2 = np.array([np.where((np.abs(all['theta']-i)<=np.pi/2)&(all['radius']>inner_edge)&(all['radius']<15))[0].size for i in np.linspace(np.pi/8,17*np.pi/8,4)])
                if 0 in sizes1 or 0 in sizes2:
			mp.plot(all['radius'],all['theta'],'ko')
			mp.xlabel('radius')
			mp.ylabel('theta')
			mp.savefig('rth_'+str(haloid)+'.png')
			mp.close()
			print 'sizes1=',sizes1
			print 'sizes2=',sizes2
                        return -99

		return richness
예제 #40
0
def calc_stat(data, sigma=1.8, niter=10, algorithm='median'):
    """Calculate statistics for given data.

    Parameters
    ----------
    data : ndarray
        Data to be calculated from.

    sigma : float
        Sigma for sigma clipping.

    niter : int
        Number of iterations for sigma clipping.

    algorithm : {'mean', 'median', 'mode', 'stddev'}
        Algorithm for statistics calculation.

    Returns
    -------
    val : float
        Statistics value.

    Raises
    ------
    ValueError
        Invalid algorithm.

    """
    arr = np.ravel(data)

    if len(arr) < 1:
        return 0.0

    # NOTE: Now requires Astropy 1.1 or later, so this check is not needed.
    # from astropy import version as astropy_version
    # if ((astropy_version.major==1 and astropy_version.minor==0) or
    #         (astropy_version.major < 1)):
    #     arr_masked = sigma_clip(arr, sig=sigma, iters=niter)
    # else:
    #     arr_masked = sigma_clip(arr, sigma=sigma, iters=niter)
    arr_masked = sigma_clip(arr, sigma=sigma, iters=niter)

    arr = arr_masked.data[~arr_masked.mask]

    if len(arr) < 1:
        return 0.0

    algorithm = algorithm.lower()
    if algorithm == 'mean':
        val = arr.mean()
    elif algorithm == 'median':
        val = np.median(arr)
    elif algorithm == 'mode':
        val = biweight_location(arr)
    elif algorithm == 'stddev':
        val = arr.std()
    else:
        raise ValueError('{0} is not a valid algorithm for sky background '
                         'calculations'.format(algorithm))

    return val
예제 #41
0
                                                header=hdu_counts[1].header))

    # if there's more than one filter, do reprojection
    if len(filter_list) > 1:
        for f in range(1,len(filter_list)):
            new_array, _ = reproject_interp(hdu_list[f], hdu_list[0].header)
            hdu_list[f] = fits.ImageHDU(data=new_array, header=hdu_list[0].header)

    # normalize the images
    for f in range(len(filter_list)):

        # subtract mode
        # - do a sigma clip
        pix_clip = sigma_clip(hdu_list[f].data, sigma=2.5, iters=3)
        # - calculate biweight
        biweight_clip = biweight_location(pix_clip.data[~pix_clip.mask])
        # - subtraction
        new_array = hdu_list[f].data - biweight_clip

        # set anything below 0 to 0
        new_array[new_array < 0] = 0

        # set 95th percentile to 1
        new_array = new_array/np.nanpercentile(new_array, 95)

        # save it
        hdu_list[f].data = new_array


    # add the images together
    im_sum = np.mean([hdu_list[f].data for f in range(len(filter_list))], axis=0)