示例#1
0
def quickstart_example():
    """
    Create time series of comparison data by pairing and
    substracting 100 different Poisson distributions

    """
    mu_values = np.random.randint(80, 110, 100)
    mu1, mu2 = map(np.array, zip(*combinations(mu_values, 2)))
    labels1, labels2 = [mu.astype(str) for mu in [mu1, mu2]]
    spreads = skellam.rvs(mu1=mu1, mu2=mu2)
    times = np.arange(spreads.size).astype('datetime64[s]')

    # MELO class arguments (explained in docs)
    lines = np.arange(-59.5, 60.5)
    k = .15

    # train the model on the list of comparisons
    melo = Melo(lines=lines, k=k)
    melo.fit(times, labels1, labels2, spreads)

    # predicted and true (analytic) comparison values
    pred_times = np.repeat(melo.last_update, times.size)
    pred = melo.mean(pred_times, labels1, labels2)
    true = skellam.mean(mu1=mu1, mu2=mu2)

    # plot predicted means versus true means
    plt.scatter(pred, true)
    plt.plot([-20, 20], [-20, 20], color='k')
    plt.xlabel('predicted mean')
    plt.ylabel('true mean')
示例#2
0
def shuffle_reactions(pop0, pop1, S1, S2, r, w):
    r1 = r[0:2]
    r2_o = r[2:4]
    # pertubate r2 and update r1
    r2 = r2_o + skellam.rvs(w, w, size=2)
    # [[1,1],[0,1]] is inverse of S2=[[0,1],[1,-1]]
    r1 = np.matmul(np.array([[1, 1], [0, 1]]), pop1 - pop0 -
                   np.matmul(S2, r2))  # not needed, S1 is unity matrix
    r = np.array(np.append(r1, r2), dtype=int)
    if np.any(r < 0):
        while True:
            # pertubate r2 and update r1
            r2 = r2_o + skellam.rvs(w, w, size=2)
            r1 = np.matmul(np.array([[1, 1], [0, 1]]), pop1 - pop0 -
                           np.matmul(S2, r2))  # not needed, S1 is unity matrix
            r = np.array(np.append(r1, r2), dtype=int)
            if np.all(r >= 0):
                break
    return r
示例#3
0
def shuffle_reactions(pop0, pop1, S2, r, w):
    r1 = r[0:2]
    r2_o = r[2]
    # pertubate r2 and update r1
    r2 = r2_o + skellam.rvs(w, w)
    #r1 = np.matmul(np.linalg.inv(S1), pop1-pop0-S2*r2) # not needed, S1 is unity matrix
    r1 = pop1 - pop0 - S2 * r2
    if np.any(r1 < 0) or r2 < 0:
        while True:
            # pertubate r2 and update r1
            while True:
                r2 = r2_o + skellam.rvs(w, w, size=5)
                if np.any(r2 >= 0):
                    r2 = r2[r2 >= 0][0]
                    break
            r1 = pop1 - pop0 - S2 * r2
            if np.all(r1 >= 0):
                break
    return np.array(np.append(r1, r2), dtype=int)
示例#4
0
    def __init__(self, size=1000):

        self.times = np.arange(size).astype('datetime64[s]')
        lambdas1, lambdas2 = np.random.choice(self.lambdas, size=(2, size))

        self.spreads = skellam.rvs(mu1=lambdas1, mu2=lambdas2, size=size)
        self.totals = poisson.rvs(mu=(lambdas1 + lambdas2), size=size)

        self.labels1 = lambdas1.astype(str)
        self.labels2 = lambdas2.astype(str)
示例#5
0
def fit_chi2(res_data, median1, median2, conf_level=0.995, dx=100, dy=100, std_thresh=1000, normalize=True):
    if np.std(res_data) == 0:
        Q = 0
    elif (res_data.mask==True).all() == True:
        Q = 0
    elif np.std(res_data) >= std_thresh:
        Q = 0
    else:
        if normalize == True:
            res_data = (res_data - (median1 - median2)) / np.sqrt(median1 + median2)
        expected_iter = len(res_data.flatten())
        expected_interval = skell.interval(0.99999999, median1, median2)
        medianAvg = np.mean([median1, median2])
        bns = int((np.log2(expected_iter) + 1))
        expected = skell.rvs(median1, median2, size=(expected_iter))
        if medianAvg >= 0:
            data_hist = np.histogram(res_data.data, weights=(res_data.mask-1)*-1, bins=bns, range=expected_interval, density=False)
            expected_hist = np.histogram(expected, bins=bns, range=expected_interval, density=False)
        elif medianAvg < 0:
            data_hist = np.histogram(res_data.data, weights=(np.logical_not(res_data.mask)).astype(int), bins=bns, range=expected_interval, density=False)
        data_freq = list(data_hist[0])
        expected_freq = list(expected_hist[0])
        data_zeroind = [i for i in range(len(data_freq)) if expected_freq[i] == 0]
        expected_zeroind = [i for i in range(len(expected_freq)) if expected_freq[i] == 0]
        for i in expected_zeroind:
            if i not in data_zeroind:
                data_zeroind.append(i)
        data_freq_new = []
        expected_freq_new = []
        data_hist_new = []
        for ind in range(len(data_freq)):
            if ind not in data_zeroind:
                data_freq_new.append(data_freq[ind])
                expected_freq_new.append(expected_freq[ind])
                data_hist_new.append(data_hist[1][ind])
        del data_freq, expected_freq
        data_hist_new = np.array(data_hist_new, dtype=np.float64)
        chi2_test = chisquare(data_freq_new, f_exp=expected_freq_new)
        Q = 1 / (1 + (chi2_test[0] / (1 * expected_iter)))
        b_mid = 0.5*(data_hist_new[1:]+data_hist_new[:-1])
        b_mid = b_mid.astype(np.float64)
    return Q
示例#6
0
 def _sample_scipy(self, size):
     mu1, mu2 = float(self.mu1), float(self.mu2)
     from scipy.stats import skellam
     return skellam.rvs(mu1=mu1, mu2=mu2, size=size)
x = np.arange(skellam.ppf(0.01, mu1, mu2), skellam.ppf(0.99, mu1, mu2))
ax.plot(x, skellam.pmf(x, mu1, mu2), 'bo', ms=8, label='skellam pmf')
ax.vlines(x, 0, skellam.pmf(x, mu1, mu2), colors='b', lw=5, alpha=0.5)

# Alternatively, the distribution object can be called (as a function)
# to fix the shape and location. This returns a "frozen" RV object holding
# the given parameters fixed.

# Freeze the distribution and display the frozen ``pmf``:

rv = skellam(mu1, mu2)
ax.vlines(x,
          0,
          rv.pmf(x),
          colors='k',
          linestyles='-',
          lw=1,
          label='frozen pmf')
ax.legend(loc='best', frameon=False)
plt.show()

# Check accuracy of ``cdf`` and ``ppf``:

prob = skellam.cdf(x, mu1, mu2)
np.allclose(x, skellam.ppf(prob, mu1, mu2))
# True

# Generate random numbers:

r = skellam.rvs(mu1, mu2, size=1000)
示例#8
0
def phose(image,
          dpixel=1,
          thresh=3.5,
          kron_min=0.01,
          fill_method='gauss',
          negative=False,
          fit='moffat'):
    data_image = image.replace('residual_', '')
    data_image = data_image.replace('residuals', 'data')
    res_data = fits.getdata(image)
    if negative == True:
        res_data *= -1
    res_mask = fits.getdata(image, 1)
    try:
        weight_check = fits.getval(image, 'WEIGHT')
    except:
        weight_check = 'N'
    if weight_check == 'Y':
        res_mask = (res_mask - 1) * -1
    location = image.split('/')[:-2]
    location = '/'.join(location)
    template = glob.glob("%s/templates/*.fits" % (location))[0]
    try:
        template_median = float(fits.getval(template, 'MEDIAN'))
    except:
        template_median = np.median(fits.getdata(template))
    try:
        science_median = float(fits.getval(data_image, 'MEDIAN'))
    except:
        science_median = np.median(fits.getdata(data_image))
    res_data_sep = res_data.byteswap().newbyteorder()
    try:
        res_bkg = sep.Background(res_data_sep, mask=res_mask)
    except ValueError:
        res_bkg = res_bkg = sep.Background(res_data, mask=res_mask)
    res_rms = res_bkg.globalrms
    res_back = res_bkg.globalback
    if fill_method == 'gauss':
        fill_bkg = np.random.normal(loc=res_bkg.globalback,
                                    scale=res_bkg.globalrms,
                                    size=res_data.shape)
    elif fill_method == 'skellam':
        fill_bkg = skellam.rvs(float(science_median),
                               float(template_median),
                               size=(res_data.shape))
        fill_bkg = fill_bkg.astype(np.float64)
    else:
        print(
            "-> Error: Invalid value for 'fill_method' keyword\n-> Exiting...")
        sys.exit()
    FWHM = psf.fwhm(data_image)
    sigma = FWHM / 2.355
    if fit == 'moffat':
        fit_param = moffat_fwhm_to_a(FWHM)
    elif fit == 'gauss':
        fit_param = FWHM / 2.355
    else:
        print("-> Error: Invalid value for 'fit' parameter\n-> Exiting...")
        sys.exit()
    unfiltered_sources, temp_sources = get_sources(data_image,
                                                   filtered=False,
                                                   phose=True)
    filtered_sources, filtered_inds = get_sources(data_image, filtered=True)
    bad_mask = np.zeros(res_data.shape)
    good_mask = np.zeros(res_data.shape)
    for s in unfiltered_sources:
        og_flux = s[0]
        x = s[2]
        y = s[3]
        kron_radius = s[1]
        a_image = s[4]
        b_image = s[5]
        min_flux = (np.pi * np.mean(
            (kron_radius * a_image, kron_radius * b_image))**2) * res_back
        if (og_flux < min_flux) or (kron_radius == 0):
            continue
        theta_image = s[6]
        if kron_radius < kron_min:
            indices = [0]
        else:
            indices = [
                v[1] for i, v in enumerate(temp_sources)
                if (v[2] - dpixel <= x <= v[2] + dpixel and v[3] -
                    dpixel <= y <= v[3] + dpixel)
                and phose_check(res_data, x, y, kron_radius, a_image, b_image,
                                theta_image, thresh, og_flux, v[0])
            ]
#        phoseCheck = phose_check(res_data, x, y, kron_radius, a_image, b_image, theta_image, thresh, og_flux, og_flux)
#        if phoseCheck == True or (s not in filtered_sources):
        if indices != []:
            if np.mean(indices) > kron_radius:
                kron_radius = np.mean(indices)
            kron_radius *= 1.5
            size_check = aperture_resize(res_rms,
                                         fit_param,
                                         sigma,
                                         og_flux,
                                         kron_radius,
                                         x,
                                         y,
                                         a_image,
                                         b_image,
                                         dist=fit)
            while size_check == True:
                kron_radius *= 1.5
                size_check = aperture_resize(res_back,
                                             fit_param,
                                             sigma,
                                             og_flux,
                                             kron_radius,
                                             x,
                                             y,
                                             a_image,
                                             b_image,
                                             dist=fit)
            ellipse_mask(bad_mask, (y - 1), (x - 1),
                         kron_radius,
                         a_image,
                         b_image,
                         theta_image,
                         fill_value=1)
        else:
            ellipse_mask(good_mask, (y - 1), (x - 1),
                         kron_radius,
                         a_image,
                         b_image,
                         theta_image,
                         fill_value=1)


#            res_data = phose_fill(res_data, phose_check(res_data, x, y, kron_radius, a_image, b_image, theta_image, mask_return=True), fill_bkg)
#        for t in temp_sources:
#            t_flux = t[0]
#            t_x = t[2]
#            t_y = t[3]
#            if (t_x-dpixel<=x<=t_x+dpixel) and (t_y-dpixel<=y<=t_y+dpixel):
#                temp_mask = np.ones(res_data.shape)
#                ellipse_mask(temp_mask, (y-1), (x-1), kron_radius, a_image, b_image, theta_image, fill_value=0)
#                res_data_phot = np.ma.MaskedArray(res_data, mask=temp_mask, fill_value=0)
#                s_flux = np.sum(res_data_phot.filled())
#                if s_flux < (thresh * np.sqrt(og_flux + t_flux)):
#                    temp_mask_fill = (temp_mask - 1) * -1
#                    res_data_final = np.ma.MaskedArray(res_data, mask = temp_mask_fill, fill_value=0)
#                    res_data_final = res_data_final.filled()
#                    phose_patch = np.multiply(temp_mask_fill, fill_bkg)
#                    res_data = res_data_final + phose_patch
#                    del temp_mask, res_data_phot, temp_mask_fill, res_data_final
#    bad_hdu = fits.PrimaryHDU(bad_mask.astype(np.float64))
#    good_hdu = fits.PrimaryHDU(good_mask.astype(np.float64))
#    bad_hdu.writeto("%s/%s_bad.fits" % (location, (image.split('/'))[-1]))
#    good_hdu.writeto("%s/%s_good.fits" % (location, (image.split('/'))[-1]))
    and_mask = np.logical_and(bad_mask, good_mask)
    bad_mask -= and_mask
    bad_mask = np.logical_or(bad_mask, res_mask)
    res_data = phose_fill(res_data, bad_mask, fill_bkg)
    #    res_hdu = fits.PrimaryHDU(res_data)
    #    res_hdu.writeto("%s/%s_res.fits" % (location, (image.split('/'))[-1]))
    #    bad_hdu = fits.PrimaryHDU(bad_mask.astype(np.float64))
    #    bad_hdu.writeto("%s/%s.fits" % (location, (image.split('/'))[-1]))
    return (res_data -
            (science_median - template_median)) / np.sqrt(science_median +
                                                          template_median)