Beispiel #1
0
    def prepare(self):

        if self.filtered == False:
            print('Filtering data...')
            self.filter()

        print('Rebinning flux...')
        rebin_flux = rebin(self.data['bjd', 'flux'],
                           binwidth=2. / 60. / 24.,
                           exptime=2. / 60. / 24.,
                           timestamp_position=0.5,
                           median_replace=True)
        print('Rebinning error...')
        rebin_err = rebin(self.data['bjd', 'err'],
                          binwidth=2. / 60. / 24.,
                          exptime=2. / 60. / 24.,
                          timestamp_position=0.5,
                          median_replace=True)

        new_bjd = rebin_flux[:, 0]
        new_flux = rebin_flux[:, 1]
        new_err = rebin_err[:, 1]

        self.bjd = new_bjd
        self.flux = new_flux
        self.err = new_err

        self.data = Table([new_bjd, new_flux, new_err],
                          names=['bjd', 'flux', 'err'])

        print('Done!')
Beispiel #2
0
def fit(im, impsf, hpsf, xpos=None, ypos=None, radius=32):

    x = xpos
    y = ypos

    ny, nx = np.shape(im)
    ixlo, iylo = int(x - radius), int(y - radius)
    if ixlo < 0: ixlo = 0
    if iylo < 0: iylo = 0
    ixhi = int(x + radius) + 1
    iyhi = int(y + radius) + 1
    if ixhi > (nx - 1): ixhi = nx - 1
    if iyhi > (ny - 1): iyhi = ny - 1
    ixx = ixhi - ixlo + 1
    iyy = iyhi - iylo + 1
    dx = np.arange(ixx) + ixlo - x
    dy = np.arange(iyy) + iylo - y
    gauss = [
        hpsf['GAUSS1'], hpsf['GAUSS2'], hpsf['GAUSS3'], hpsf['GAUSS4'],
        hpsf['GAUSS5']
    ]
    dx = dx.reshape(1, len(dx))
    dy = dy.reshape(len(dy), 1)
    dx = rebin.rebin(dx, [np.shape(dx)[1], np.shape(dx)[1]])
    dy = rebin.rebin(dy, [len(dy), len(dy)])
    try:
        model = dao_value.dao_value(dx, dy, gauss, impsf, deriv=False)
    except:
        return 1, 1, 0, 0, False, 0, 0, 0

    subim = im[iylo - 1:iyhi, ixlo - 1:ixhi]

    model = model / 10**(-0.4 * (hpsf['PSFMAG'] - 25))

    return model, subim
Beispiel #3
0
def fit(im, impsf,hpsf,xpos=None, ypos=None, radius=32):

    x = xpos
    y = ypos

    ny, nx = np.shape(im)
    ixlo, iylo = int(x - radius), int(y - radius)
    if ixlo < 0: ixlo = 0
    if iylo < 0: iylo = 0
    ixhi = int(x + radius) + 1
    iyhi = int(y + radius) + 1
    if ixhi > (nx - 1): ixhi = nx - 1
    if iyhi > (ny - 1): iyhi = ny - 1
    ixx = ixhi - ixlo + 1
    iyy = iyhi - iylo + 1
    dx = np.arange(ixx) + ixlo - x
    dy = np.arange(iyy) + iylo - y
    gauss = [hpsf['GAUSS1'], hpsf['GAUSS2'], hpsf['GAUSS3'],
             hpsf['GAUSS4'], hpsf['GAUSS5']]
    dx = dx.reshape(1, len(dx))
    dy = dy.reshape(len(dy), 1)
    dx = rebin.rebin(dx, [np.shape(dx)[1], np.shape(dx)[1]])
    dy = rebin.rebin(dy, [len(dy), len(dy)])
    try:
        model = dao_value.dao_value(dx, dy, gauss, impsf, deriv=False)
    except:
        return 1, 1, 0, 0, False, 0, 0, 0

    subim = im[iylo - 1:iyhi, ixlo - 1:ixhi]

    model = model/ 10 ** (-0.4 * (hpsf['PSFMAG'] - 25))

    return model,subim
Beispiel #4
0
 def test2(self):
     a = np.linspace(1, 24, num=24,
                     dtype=np.float64).reshape(4, 6)
     actual = rebin(a, 2)
     expected = np.array([[4.5, 6.5, 8.5],
                          [16.5, 18.5, 20.5]])
     assert_array_equal(expected, actual)
Beispiel #5
0
    def local_coverage(self, features, *args, **kwargs):
        processes = kwargs.pop('processes', None)
        if not processes:
            return _local_coverage(self.adapter, features, *args, **kwargs)

        if isinstance(features, (list, tuple)):
            raise ValueError(
                "only single features are supported for parallel "
                "local_coverage")

        # we don't want to have self.array do the binning
        bins = kwargs.pop('bins', None)

        # since if we got here processes is not None, then this will trigger
        # a parallel array creation
        features = helpers.tointerval(features)
        x = np.arange(features.start, features.stop)
        features = list(helpers.split_feature(features, processes))
        ys = self.array(
            features, *args, bins=None, processes=processes, ragged=True,
            **kwargs)
        # now we ravel() and re-bin
        y = np.column_stack(ys).ravel()
        if bins:
            xi, yi = rebin.rebin(x, y, bins)
            del x, y
            return xi, yi
        return x, y
Beispiel #6
0
def test_x2_in_x1_2():
    """
    x2 has a couple of bins, each of which span more than one original bin
    """
    # old size
    m = 10

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.array([0.25, 0.55, 0.75])

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)

    y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_new_here = unp.uarray(np.zeros(2), np.zeros(2))
    y_new_here[0] = 0.5 * y_old[2] + y_old[3] + y_old[4] + 0.5 * y_old[5]
    y_new_here[1] = 0.5 * y_old[5] + y_old[6] + 0.5 * y_old[7]

    assert_allclose(unp.nominal_values(y_new),
                   unp.nominal_values(y_new_here))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new),
                       unp.std_devs(y_new_here))
Beispiel #7
0
def test_x2_in_x1():
    """
    x2 only has one bin, and it is surrounded by x1 range
    """
    # old size
    m = 4

    # new size
    n = 1

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(0.3, 0.65, n+1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_old_ave  = y_old / np.ediff1d(x_old)
    y_new_here = (    y_old_ave[1]*(x_old[2]-x_new[0])
                    + y_old_ave[2]*(x_new[1]-x_old[2]) )

    assert_allclose(y_new, y_new_here)
Beispiel #8
0
def test_rebin_err(wave, flux, err):
    begin, end = wave[0], wave[-1]
    length = len(wave)
    new_wave = np.linspace(begin, end, length / 5)
    # new_wave = wave
    new_flux = np.array(rebin.rebin(wave, flux, new_wave))
    # print(new_wave)
    print(len(new_wave))
    print(len(new_flux))
    new_err = np.array(rebin.rebin_err(wave, err, new_wave))
    plt.plot(wave, flux)
    plt.plot(new_wave, new_flux)

    y1 = flux - err
    y2 = flux + err

    ny1 = new_flux - new_err
    ny2 = new_flux + new_err

    plt.fill_between(wave, y1, y2, color='C0', alpha=0.3)
    plt.plot(wave, y1, color='C0', alpha=0.3)
    plt.plot(wave, y2, color='C0', alpha=0.3)
    plt.fill_between(new_wave, ny1, ny2, color='C1', alpha=0.2)
    plt.plot(new_wave, ny1, color='C1', alpha=0.3)
    plt.plot(new_wave, ny2, color='C1', alpha=0.3)
    plt.show()
Beispiel #9
0
 def test1(self):
     a = np.arange(56).reshape(7, 8)
     actual = rebin(a, factor=(2, 3), func=np.sum)
     expected = np.array([[30, 48],
                          [126, 144],
                          [222, 240]])
     assert_array_equal(expected, actual)
Beispiel #10
0
 def test1(self):
     a = np.linspace(1, 24, num=24,
                     dtype=np.float64).reshape(4, 6)
     actual = rebin(a, (2, 3))
     expected = np.array([[5., 8.],
                          [17., 20.]])
     assert_array_equal(expected, actual)
Beispiel #11
0
def test_x2_surrounds_x1():
    """
    x2 range surrounds x1 range
    """
    # old size
    m = 2
    
    # new size
    n = 3
    
    # bin edges 
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(-0.1, 1.2, n+1)
    
    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
    
    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_old_ave  = y_old / np.ediff1d(x_old)
    y_new_here = [y_old_ave[0]*(x_new[1]-0.), 
                  y_old_ave[0]*(x_old[1]-x_new[1]) + y_old_ave[1]*(x_new[2]-x_old[1]),
                  y_old_ave[1]*(x_old[-1]-x_new[-2])]

    assert_allclose(y_new, y_new_here)
    assert_allclose(y_new.sum(), y_old.sum())
Beispiel #12
0
def test_x2_right_overlap_x1_with_constant_distribution():
    """
    x2 domain overlaps x1 domain from the right
    """
    # old size
    m = 20

    # new size
    n = 30

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(0.95, 1.05, n+1)

    # constant spline
    mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)

    y_old = np.array(
                 [ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])

    y_new_mms = np.array(
                 [ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])


    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    assert_allclose(y_new, y_new_mms, atol=1e-15)
Beispiel #13
0
def test_x2_in_x1():
    """
    x2 only has one bin, and it is surrounded by x1 range
    """
    # old size
    m = 4

    # new size
    n = 1

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(0.3, 0.65, n + 1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_old_ave = y_old / np.ediff1d(x_old)
    y_new_here = (y_old_ave[1] * (x_old[2] - x_new[0]) + y_old_ave[2] *
                  (x_new[1] - x_old[2]))

    assert_allclose(y_new, y_new_here)
Beispiel #14
0
def test_GH9():
    x_old = np.array([1.5, 2.5, 3.5, 4.5, 5.5, 6.5])
    y_old = np.array([10, 10, 10, 10, 10])
    x_new = np.array(
        [1.7, 2.27332857, 2.84665714, 3.41998571, 3.99331429, 4.56664286])
    y_new = rebin.rebin(x_old, y_old, x_new)
    assert_allclose(y_new, [5.7332857] * 5)

    # with uncertainties
    y_old = np.array([11., 12., 13., 14., 15.])

    y_old = unp.uarray(y_old, 0.1 * y_old)

    # rebin
    y_new = rebin.rebin_piecewise_constant(x_old, y_old, x_new)

    # compute answer here to check rebin
    y_old_ave = y_old / np.diff(x_old)
    y_new_here = np.array([
        y_old_ave[0] * (x_new[1] - x_new[0]), y_old_ave[0] *
        (x_old[1] - x_new[1]) + y_old_ave[1] * (x_new[2] - x_old[1]),
        y_old_ave[1] * (x_new[3] - x_new[2]), y_old_ave[1] *
        (x_old[2] - x_new[3]) + y_old_ave[2] * (x_new[4] - x_old[2]),
        y_old_ave[3] * (x_new[5] - x_old[3]) + y_old_ave[2] *
        (x_old[3] - x_new[4])
    ])

    # mean or nominal value comparison
    # assert_allclose(unp.nominal_values(y_new),
    #                 unp.nominal_values(y_new_here))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new), unp.std_devs(y_new_here))
Beispiel #15
0
def test_x2_in_x1_2():
    """
    x2 has a couple of bins, each of which span more than one original bin
    """
    # old size
    m = 10

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.array([0.25, 0.55, 0.75])

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

    y_old = unp.uarray(y_old, 0.1 * y_old * uniform((m, )))

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_new_here = unp.uarray(np.zeros(2), np.zeros(2))
    y_new_here[0] = 0.5 * y_old[2] + y_old[3] + y_old[4] + 0.5 * y_old[5]
    y_new_here[1] = 0.5 * y_old[5] + y_old[6] + 0.5 * y_old[7]

    assert_allclose(unp.nominal_values(y_new), unp.nominal_values(y_new_here))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new), unp.std_devs(y_new_here))
Beispiel #16
0
def test_x2_right_overlap_x1_with_constant_distribution():
    """
    x2 domain overlaps x1 domain from the right
    """
    # old size
    m = 20

    # new size
    n = 30

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(0.95, 1.05, n + 1)

    # constant spline
    mms_spline = BoundedUnivariateSpline([0, .1, .2, 1], [1, 1, 1, 1], s=0.)

    y_old = np.array(
        [mms_spline.integral(x_old[i], x_old[i + 1]) for i in range(m)])

    y_new_mms = np.array(
        [mms_spline.integral(x_new[i], x_new[i + 1]) for i in range(n)])

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    assert_allclose(y_new, y_new_mms, atol=1e-15)
Beispiel #17
0
def test_x1_surrounds_x2_with_constant_distribution():
    """
    x1 domain surrounds x2
    """
    # old size
    m = 20

    # new size
    n = 30

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(0.05, 0.26, n + 1)

    # constant spline
    mms_spline = BoundedUnivariateSpline([0, .1, .2, 1], [1, 1, 1, 1], s=0.)

    y_old = np.array(
        [mms_spline.integral(x_old[i], x_old[i + 1]) for i in range(m)])

    y_new_mms = np.array(
        [mms_spline.integral(x_new[i], x_new[i + 1]) for i in range(n)])

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    assert_allclose(y_new, y_new_mms)
Beispiel #18
0
def test_x2_surrounds_x1():
    """
    x2 range surrounds x1 range
    """
    # old size
    m = 2

    # new size
    n = 3

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(-0.1, 1.2, n + 1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_old_ave = y_old / np.ediff1d(x_old)
    y_new_here = [
        y_old_ave[0] * (x_new[1] - 0.), y_old_ave[0] * (x_old[1] - x_new[1]) +
        y_old_ave[1] * (x_new[2] - x_old[1]),
        y_old_ave[1] * (x_old[-1] - x_new[-2])
    ]

    assert_allclose(y_new, y_new_here)
    assert_allclose(y_new.sum(), y_old.sum())
Beispiel #19
0
def test_x1_surrounds_x2_with_constant_distribution():
    """
    x1 domain surrounds x2
    """
    # old size
    m = 20

    # new size
    n = 30

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(0.05, 0.26, n+1)

    # constant spline
    mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)

    y_old = np.array(
                 [ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])

    y_new_mms = np.array(
                 [ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])


    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    assert_allclose(y_new, y_new_mms)
Beispiel #20
0
    def __call__(self, xarr, origin=0., oversample=10):
        """ 
		Evaluate I(r). Only the 1-D case is implemented for now. (12/18/12)
		xarr -- pixel coordinates; starts with zero; each value represents the lower edge
		        of each pixel.
		origin -- the coordinate of the center of the profile, in pixels
		oversample -- the oversampling factor (the factor by which we subdivide the pixels
		              to attain higher accuracy).
		"""
        self.xarr = xarr  # xarr must be regularly spaced
        dx = xarr[1] - xarr[0]  # this should be 1...
        #assert xarr[0] == IntType
        xarr_ctr = xarr + 0.5  # the pixel centers
        if oversample > 1:
            # calculate I(r) on a new grid
            dx_new = float(dx) / oversample
            xarr_new = np.arange(xarr[0], xarr[-1] + dx, dx_new)
            xarr_ctr_new = xarr_new + dx_new / 2.
            # calculate I(r) at the centers of each pixel, using the origin
            r = xarr_ctr_new - origin
            f = self.func(r)
            # now rebin the array
            f = rebin(f, len(self.xarr))
        else:
            r = xarr_ctr - origin
            f = self.func(r)  # evaluate at the centers of each pixel
        return f
Beispiel #21
0
def make_2d(x,y):
    """Change from 1-d indexing to 2-d indexing
    (translated from IDL to Python).

    Convert an N element X vector, and an M element Y vector, into
    N x M arrays giving all possible combination of X and Y pairs.
    Useful for obtaining the X and Y positions of each element of
    a regular grid.

    CALLING SEQUENCE:
       xx,yy = make_2d.make_2d(x,y)

    INPUTS:
         x - N element vector of X positions
         y - M element vector of Y positions

    RETURNS:
         xx - N x M element array giving the X position at each pixel
         yy - N x M element array giving the Y position of each pixel
               If only 2 parameters are supplied then X and Y will be
               updated to contain the output arrays

    EXAMPLE:
         To obtain the X and Y position of each element of a 30 x 15 array

         import make_2d
         x = numpy.arange(30)  ;  y = numpy.arange(15)     
         xx,yy = make_2d.make_2d( x, y ) 

    REVISION HISTORY:
         Written                     Wayne Landsman,ST Systems Co.    May,            1988
         Added /NOZERO keyword       W. Landsman                      March,          1991
         Converted to IDL V5.0       W. Landsman                      September,      1997
         Improved speed              P. Broos                         July,           2000
         Converted to Python         D. Jones                         January,        2014
"""


    ny = len(y)
    nx = len(x)
    xx = x.reshape(1,nx)
    yy = y.reshape(ny,1)

    xx = rebin.rebin(xx, [ny, nx])
    yy = rebin.rebin(yy, [ny, nx])

    return(xx,yy)
Beispiel #22
0
def make_2d(x, y):
    """Change from 1-d indexing to 2-d indexing
    (translated from IDL to Python).

    Convert an N element X vector, and an M element Y vector, into
    N x M arrays giving all possible combination of X and Y pairs.
    Useful for obtaining the X and Y positions of each element of
    a regular grid.

    CALLING SEQUENCE:
       xx,yy = make_2d.make_2d(x,y)

    INPUTS:
         x - N element vector of X positions
         y - M element vector of Y positions

    RETURNS:
         xx - N x M element array giving the X position at each pixel
         yy - N x M element array giving the Y position of each pixel
               If only 2 parameters are supplied then X and Y will be
               updated to contain the output arrays

    EXAMPLE:
         To obtain the X and Y position of each element of a 30 x 15 array

         import make_2d
         x = numpy.arange(30)  ;  y = numpy.arange(15)     
         xx,yy = make_2d.make_2d( x, y ) 

    REVISION HISTORY:
         Written                     Wayne Landsman,ST Systems Co.    May,            1988
         Added /NOZERO keyword       W. Landsman                      March,          1991
         Converted to IDL V5.0       W. Landsman                      September,      1997
         Improved speed              P. Broos                         July,           2000
         Converted to Python         D. Jones                         January,        2014
"""

    ny = len(y)
    nx = len(x)
    xx = x.reshape(1, nx)
    yy = y.reshape(ny, 1)

    xx = rebin.rebin(xx, [ny, nx])
    yy = rebin.rebin(yy, [ny, nx])

    return (xx, yy)
Beispiel #23
0
def test_x2_surrounds_x1_sine_spline():
    """
    x2 range is completely above x1 range
    using a random vector to build spline
    """
    # old size
    m = 5

    # new size
    n = 6

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])

    subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])

    y_old = 1.+np.sin(x_old[:-1]*np.pi)

    # compute spline ----------------------------------
    x_mids = x_old[:-1] + 0.5*np.ediff1d(x_old)
    xx = np.hstack([x_old[0], x_mids, x_old[-1]])
    yy = np.hstack([y_old[0], y_old, y_old[-1]])

    # build spline
    spl = splrep(xx, yy)

    area_old = np.array(
              [ splint(x_old[i],x_old[i+1], spl) for i in range(m) ])

    # computing subbin areas
    area_subbins = np.zeros((subbins.size-1,))
    for i in range(area_subbins.size):
        a, b = subbins[i:i+2]
        a = max([a,x_old[0]])
        b = min([b,x_old[-1]])
        if b>a:
            area_subbins[i] = splint(a, b, spl)

    # summing subbin contributions in y_new_ref
    y_new_ref = np.zeros((x_new.size-1,))
    y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
    y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
    y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
    y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]

    y_new_ref[5]  = y_old[1] * area_subbins[6] / area_old[1]
    y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
    y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]

    # call rebin function
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    assert_allclose(y_new, y_new_ref)
 def setCldMask(self, ref470std, threshold=None):
     #spatial variability cloud mask
     #ref470 std of the reflectance in 470 band (676 by 451)
     shp = self.RefSB.shape
     RefSB = np.empty((shp[0], shp[1] / 3, shp[2] / 3), dtype=float)
     RefSB_rad = np.empty_like(RefSB)
     if threshold == None:
         threshold = 0.01
     self.cldMask = ref470std > threshold
     for i in np.arange(0, shp[0]):
         RefSB[i, :, :] = rebin(self.RefSB[i, :, :], factor=(3, 3))
         RefSB[i, self.cldMask] = np.nan
         RefSB_rad[i, :, :] = rebin(self.RefSB_rad[i, :, :], factor=(3, 3))
         RefSB_rad[i, self.cldMask] = np.nan
     swath_lon = rebin(self.swath_lon, factor=(3, 3))
     swath_lat = rebin(self.swath_lat, factor=(3, 3))
     self.RefSB = RefSB
     self.RefSB_rad = RefSB_rad
     self.swath_lon = swath_lon
     self.swath_lat = swath_lat
Beispiel #25
0
def test_x2_surrounds_x1_sine_spline():
    """
    x2 range is completely above x1 range
    using a random vector to build spline
    """
    # old size
    m = 5

    # new size
    n = 6

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])

    subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])

    y_old = 1. + np.sin(x_old[:-1] * np.pi)

    # compute spline ----------------------------------
    x_mids = x_old[:-1] + 0.5 * np.ediff1d(x_old)
    xx = np.hstack([x_old[0], x_mids, x_old[-1]])
    yy = np.hstack([y_old[0], y_old, y_old[-1]])

    # build spline
    spl = splrep(xx, yy)

    area_old = np.array(
        [splint(x_old[i], x_old[i + 1], spl) for i in range(m)])

    # computing subbin areas
    area_subbins = np.zeros((subbins.size - 1, ))
    for i in range(area_subbins.size):
        a, b = subbins[i:i + 2]
        a = max([a, x_old[0]])
        b = min([b, x_old[-1]])
        if b > a:
            area_subbins[i] = splint(a, b, spl)

    # summing subbin contributions in y_new_ref
    y_new_ref = np.zeros((x_new.size - 1, ))
    y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
    y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
    y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
    y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]

    y_new_ref[5] = y_old[1] * area_subbins[6] / area_old[1]
    y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
    y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]

    # call rebin function
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    assert_allclose(y_new, y_new_ref)
Beispiel #26
0
 def rebin(self):
     rebin_values = np.arange(
         self.df.index.min() - (self.rebin_width / 2.0),
         self.df.index.max() + (self.rebin_width / 2.0), self.rebin_width)
     rebin_centres = np.arange(self.df.index.min(), self.df.index.max(),
                               self.rebin_width)
     x_bin_bounds = np.append(
         np.array(self.df.index) - (self.binSize / 2.0),
         self.df.index.max() + (self.binSize / 2.0))
     rebinned_values = rebin.rebin(x_bin_bounds, np.array(self.df),
                                   rebin_values)  #, 'piecewise_constant')
     return pd.Series(rebinned_values, index=rebin_centres)
Beispiel #27
0
 def get_spectrum_pre(self, wave, arrshift, arrsigma, arrscale):
     arrsigma = np.abs(arrsigma)
     new_wave = self.get_wave(arrshift)
     new_flux = self.convol_spectrum(arrsigma)
     # scale = self.get_scale(new_wave, arrscale)
     scale = self.get_legendre_scale(wave, arrscale)
     # print('flag1')
     # print(len(new_wave))
     # print(len(new_flux))
     # print(len(wave))
     flux_rebin = np.array(rebin.rebin(new_wave, new_flux, wave))
     # print('flag2')
     flux_aftscale = flux_rebin * scale
     return flux_aftscale
Beispiel #28
0
def spectra_df(direc):
    #direc = './spectra'
    specfiles, polfiles = read_all_spec(direc)
#create the array for the wavelength and frequency
    c = 2.99792458e+18 # angstroms/s
    redshift = 0.435 # for 1222+216 
#read in the first spectrum to get the length
    (fspec, ferr, fend, fheader, fmjd) = readspectrum_combinedspec(specfiles[0])
    (pspec, perr, pend, pheader, pmjd) = readspectrum_combinedspec(polfiles[0])
    wavend = fend*4.0 + 4000.0
    wave = np.linspace(4000.0, wavend,fend)
    restwave = wave/(1.0+redshift)
    restnu = c/restwave
    nuspec = np.float64((fspec/c)*np.power(restwave,2))
#create a dataframe so that all spectra can be read into it
#make one for polarized flux as well
    d = {'wl': restwave.tolist(), str(fmjd)+'_fl': fspec.tolist()}
    p = {'wl': restwave.tolist(), str(pmjd)+'_pfl': pspec.tolist()}

    for i in range(1, len(specfiles)):
        (fspec, ferr, fend, fheader, fmjd) = readspectrum_combinedspec(specfiles[i])
        (pspec, perr, pend, pheader, pmjd) = readspectrum_combinedspec(polfiles[i])
        if fspec.shape != restwave.shape:
            fspec = rebin(fspec, restwave.shape)
        if pspec.shape != restwave.shape:
            pspec = rebin(pspec, restwave.shape)
        d.update({str(fmjd)+'_fl': fspec.tolist()})

        p.update({str(pmjd)+'_pfl': pspec.tolist()})

    flux_df = df(data = d)
    cols = flux_df.columns.tolist()
    cols.insert(0, cols.pop(cols.index('wl')))
    pol_df = df(data = p)
    cols = pol_df.columns.tolist()
    cols.insert(0, cols.pop(cols.index('wl')))
    return(flux_df, pol_df)
Beispiel #29
0
    def __call__(self, sample):
        image = sample

        h, w = image.shape[:2]
        if isinstance(self.output_size, int):
            if h > w:
                new_h, new_w = self.output_size * h / w, self.output_size
            else:
                new_h, new_w = self.output_size, self.output_size * w / h
        else:
            new_h, new_w = self.output_size

        new_h, new_w = int(new_h), int(new_w)
        facw = int(w / new_h)
        fach = int(w / new_h)
        # img = F.interpolate(image, new_w)
        img = rebin.rebin(image, (facw, fach, 1))

        return img
Beispiel #30
0
def test_y1_uncertainties():
    """
    x2 range surrounds x1 range, y1 has uncertainties
    """
    # old size
    m = 2

    # new size
    n = 3

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(-0.1, 1.2, n+1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)

    # with uncertainties
    y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_old_ave  = y_old / np.ediff1d(x_old)
    y_new_here = np.array(
                 [y_old_ave[0]*(x_new[1]-0.),
                  y_old_ave[0]*(x_old[1]-x_new[1]) + y_old_ave[1]*(x_new[2]-x_old[1]),
                  y_old_ave[1]*(x_old[-1]-x_new[-2])]
                  )


    # mean or nominal value comparison
    assert_allclose(unp.nominal_values(y_new),
                       unp.nominal_values(y_new_here))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new),
                       unp.std_devs(y_new_here))
    assert_allclose(unp.nominal_values(y_new).sum(),
                       unp.nominal_values(y_new_here).sum())
Beispiel #31
0
def test_x2_same_as_x1():
    """
    x2 same as x1
    """
    # old size
    m = 6
    
    # new size
    n = 6
    
    # bin edges 
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(0., 1., n+1)
    
    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
    
    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    assert_allclose(y_new, y_old)
Beispiel #32
0
def test_x2_same_as_x1():
    """
    x2 same as x1
    """
    # old size
    m = 6

    # new size
    n = 6

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(0., 1., n + 1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    assert_allclose(y_new, y_old)
Beispiel #33
0
def test_y1_uncertainties():
    """
    x2 range surrounds x1 range, y1 has uncertainties
    """
    # old size
    m = 2

    # new size
    n = 3

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(-0.1, 1.2, n + 1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

    # with uncertainties
    y_old = unp.uarray(y_old, 0.1 * y_old * uniform((m, )))

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_old_ave = y_old / np.ediff1d(x_old)
    y_new_here = np.array([
        y_old_ave[0] * (x_new[1] - 0.), y_old_ave[0] * (x_old[1] - x_new[1]) +
        y_old_ave[1] * (x_new[2] - x_old[1]),
        y_old_ave[1] * (x_old[-1] - x_new[-2])
    ])

    # mean or nominal value comparison
    assert_allclose(unp.nominal_values(y_new), unp.nominal_values(y_new_here))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new), unp.std_devs(y_new_here))
    assert_allclose(
        unp.nominal_values(y_new).sum(),
        unp.nominal_values(y_new_here).sum())
Beispiel #34
0
def test_x2_above_x1():
    """
    x2 range is completely above x1 range
    """
    # old size
    m = 20

    # new size
    n = 30

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(1.2, 10., n + 1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    assert_allclose(y_new, np.zeros((n, )))
    assert_allclose(y_new.sum(), 0.)
Beispiel #35
0
def test_x2_lower_than_x1():
    """
    x2 range is completely lower than x1 range
    """
    # old size
    m = 2

    # new size
    n = 3

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.linspace(-0.2, -0.0, n + 1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    assert_allclose(y_new, [0., 0., 0.])
    assert_allclose(y_new.sum(), 0.)
Beispiel #36
0
def test_x2_lower_than_x1():
    """
    x2 range is completely lower than x1 range
    """
    # old size
    m = 2

    # new size
    n = 3

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(-0.2, -0.0, n+1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')


    assert_allclose(y_new, [0.,0.,0.])
    assert_allclose(y_new.sum(), 0.)
Beispiel #37
0
def test_x2_above_x1():
    """
    x2 range is completely above x1 range
    """
    # old size
    m = 20

    # new size
    n = 30

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.linspace(1.2, 10., n+1)

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')


    assert_allclose(y_new, np.zeros((n,)))
    assert_allclose(y_new.sum(), 0.)
Beispiel #38
0
def Resize_Cutout(arg):
    fileName, outName, oShape, rShape = arg

    oRows, oCols = oShape[0], oShape[1]
    iData = fits.getdata(fileName)
    iShape = iData.shape
    # difference in rows and columns from desired shape
    dRows, dCols = iShape[0] - oRows, iShape[1] - oCols
    hRows, hCols = int(dRows / 2), int(dCols / 2)

    if dRows % 2 == 0:  # take equal number of rows from bottom and top
        oData = iData[hRows:-hRows, :]
    else:  # take 1 more pixel from the top always
        oData = iData[hRows + 1:-hRows, :]

    if dCols % 2 == 0:  # take equal number of rows from left and right
        oData = oData[:, hCols:-hCols]
    else:  # take 1 more pixel from the left always
        oData = oData[:, hCols:-(hCols + 1)]

    oShape = oData.shape
    oHdr = fits.getheader(fileName, 1)
    # use rebin tool if new shape is smaller than input
    if rShape[0] < oShape[0]:
        oData = rebin(oData, rShape) * float(
            rShape[0] * rShape[1] / oShape[0] / oShape[1])
    # use interp2d if new shape is smaller than input
    if rShape[0] > oShape[0]:
        x = np.linspace(0, 1, oShape[0])
        y = np.linspace(0, 1, oShape[1])
        f = interpolate.interp2d(x, y, oData, kind='linear')
        xn = np.linspace(0, 1, rShape[0])
        yn = np.linspace(0, 1, rShape[1])
        oData = f(xn, yn)
    oHdr['NAXIS1'] = rShape[1]
    oHdr['NAXIS2'] = rShape[0]
    fits.writeto(outName, data=oData, header=oHdr)
Beispiel #39
0
def estimateWindow(x,
                   binEdges,
                   windowFunc,
                   meanRange=np.arange(100, 150, 5),
                   NEntries=10000,
                   plot=False):
    windowTauList = []
    for idx, mean in enumerate(meanRange):
        data = [mean] * NEntries
        bins, hist = getRebinedHist(binEdges, data)
        y = np.nan_to_num(
            rb.rebin(bins, hist, x, interp_kind='piecewise_constant'))
        # y = scipy.signal.savgol_filter(y, 31, 3)

        p0 = (max(y), mean, 5)
        popt, pcov = scipy.optimize.curve_fit(windowFunc, x[:-1], y, p0=p0)
        print(p0, popt)
        windowTau = popt[-1]
        windowTauList.append(windowTau)

        if plot:
            color = 'C%d' % idx
            plt.plot(bins[:-1],
                     np.asarray(hist, dtype=float) / np.max(hist),
                     color=color)
            plt.plot(x[:-1], y / np.max(y), ls='--', color=color)

            yFit = windowFunc(bins[:-1], *popt)
            plt.plot(bins[:-1], yFit / np.max(yFit), ls='-.', color=color)
            plt.xlabel('Energy (keV)')
            plt.ylabel('Normalized counts')
            plt.xlim(20, 80)

    windowTau = np.mean(windowTauList)
    print(windowTau)
    return windowTau
Beispiel #40
0
 def rebin(self):
     rebin_values = np.arange(self.df.index.min()-(self.rebin_width/2.0), self.df.index.max()+(self.rebin_width/2.0), self.rebin_width)
     rebin_centres = np.arange(self.df.index.min(), self.df.index.max(), self.rebin_width)
     x_bin_bounds = np.append(np.array(self.df.index) - (self.binSize / 2.0),self.df.index.max()+(self.binSize/2.0))
     rebinned_values = rebin.rebin(x_bin_bounds , np.array(self.df), rebin_values)#, 'piecewise_constant')
     return pd.Series(rebinned_values, index=rebin_centres)
Beispiel #41
0
def _local_coverage(reader, features, read_strand=None, fragment_size=None,
                    shift_width=0, bins=None, use_score=False, accumulate=True,
                    preserve_total=False, method=None, processes=None,
                    stranded=True, verbose=False):
    """
    Returns a binned vector of coverage.

    Computes a 1D vector of coverage at the coordinates for each feature in
    `features`, extending each read by `fragmentsize` bp.

    Some arguments cannot be used for bigWig files due to the structure of
    these files.  The parameters docstring below indicates whether or not an
    argument can be used with bigWig files.

    Depending on the arguments provided, this method can return a vector
    containing values from a single feature or from concatenated features.

    An example of the flexibility afforded by the latter case:

        `features` can be a 3-tuple of pybedtools.Intervals representing (TSS
        + 1kb upstream, gene, TTS + 1kb downstream) and `bins` can be [100,
        1000, 100].  This will return a vector of length 1200 containing the
        three genomic intervals binned into 100, 1000, and 100 bins
        respectively.  Note that is up to the caller to construct the right
        axes labels in the final plot!

    Parameters
    ----------
    features : str, interval-like object, or list

        Can be a single interval or an iterable yielding intervals.

        Interval-like objects must have chrom, start, and stop attributes, and
        optionally a strand attribute.  One exception to this that if
        `features` is a single string, it can be of the form "chrom:start-stop"
        or "chrom:start-stop[strand]".

        If `features` is a single interval, then return a 1-D array for that
        interval.

        If `features` is an iterable of intervals, then return a 1-D
        array that is a concatenation of signal for these intervals.

        Available for bigWig.

    bins : None, int, list
        If `bins` is None, then each value in the returned array will
        correspond to one bp in the genome.

        If `features` is a single Interval, then `bins` is an integer or None.

        If `features` is an iterable of Intervals, `bins` is an iterable of
        integers of the same length as `features`.

        Available for bigWig.

    fragment_size : None or int
        If not None, then each item from the genomic signal (e.g., reads from
        a BAM file) will be extended `fragment_size` bp in the 3' direction.
        Higher fragment sizes will result in smoother signal.  Not available
        for bigWig.

    shift_width : int
        Each item from the genomic signal (e.g., reads from a BAM
        file) will be shifted `shift_width` bp in the 3' direction.  This can
        be useful for reconstructing a ChIP-seq profile, using the shift width
        determined from the peak-caller (e.g., modeled `d` in MACS). Not
        available for bigWig.

    read_strand : None or str
        If `read_strand` is one of "+" or "-", then only items from the genomic
        signal (e.g., reads from a BAM file) on that strand will be considered
        and reads on the opposite strand ignored.  Useful for plotting genomic
        signal for stranded libraries. Not available for bigWig.

    stranded : bool
        If True, then the profile will be reversed for features whose strand
        attribute is "-".

    use_score : bool
        If True, then each bin will contain the sum of the *score* attribute of
        genomic features in that bin instead of the *number* of genomic
        features falling within each bin. Not available for bigWig.

    accumulate : bool
        If False, then only record *that* there was something there, rather
        than acumulating reads.  This is useful for making matrices with called
        peaks. Available for bigWig.

    preserve_total : bool
        If True, re-scales the returned value so that each binned row's total
        is equal to the sum of the original, un-binned data.  The units of the
        returned array will be in "total per bin".  This is useful for, e.g.,
        counting reads in features.  If `preserve_total` is False, then the
        returned array will have units of "density"; this is more generally
        useful and is the default behavior.  Available for bigWig, but not when
        using method="ucsc_summarize".

    method : str; one of [ "summarize" | "get_as_array" | "ucsc_summarize" ]
        Only used for bigWig.  The method specifies how data are extracted from
        the bigWig file.  "summarize" is the default.  It's quite fast, but may
        yield slightly different results when compared to running this same
        function on the BAM file from which the bigWig was created.

        "summarize" uses bx-python.  The values returned will not be exactly
        the same as the values returned when local_coverage is called on a BAM,
        BED, or bigBed file, but they will be close.  This method is quite
        fast, and is the default when bins is not None.

        "get_as_array" uses bx-python, but does a separate binning step.  This
        can be slower than the other two methods, but the results are exactly
        the same as those from a BAM, BED, or bigBed file.  This method is
        always used if bins=None.

        "ucsc_summarize" is an alternative version of "summarize".  It uses the
        UCSC program `bigWigSummary`, which must already installed and on your
        path.

    processes : int or None
        The feature can be split across multiple processes.

    Returns
    -------

    1-d NumPy array


    Notes
    -----
    If a feature has a "-" strand attribute, then the resulting profile will be
    *relative to a minus-strand feature*.  That is, the resulting profile will
    be reversed.

    Returns arrays `x` and `y`.  `x` is in genomic coordinates, and `y` is
    the coverage at each of those coordinates after extending fragments.

    The total number of reads is guaranteed to be the same no matter how it's
    binned.

    (with ideas from
    http://www-huber.embl.de/users/anders/HTSeq/doc/tss.html)

    """
    # bigWig files are handled differently, so we need to know if we're working
    # with one; raise exeception if a kwarg was supplied that's not supported.
    if isinstance(reader, filetype_adapters.BigWigAdapter):
        is_bigwig = True
        defaults = (
            ('read_strand', read_strand, None),
            ('fragment_size', fragment_size, None),
            ('shift_width', shift_width, 0),
            ('use_score', use_score, False),
            ('preserve_total', preserve_total, False),
        )
        for name, check, default in defaults:
            if (
                ((default is None) and (check is not default))
                or
                (check != default)
            ):
                raise ArgumentError(
                    "Argument '%s' not supported for bigWig" % name)

        if method == 'ucsc_summarize':
            if preserve_total:
                raise ArgumentError(
                    "preserve_total=True not supported when using "
                    "method='ucsc_summarize'")
    else:
        is_bigwig = False

    if isinstance(reader, filetype_adapters.BamAdapter):
        if use_score:
            raise ArgumentError("Argument 'use_score' not supported for "
                                "bam")

    # e.g., features = "chr1:1-1000"
    if isinstance(features, basestring):
        features = helpers.tointerval(features)

    if not ((isinstance(features, list) or isinstance(features, tuple))):
        if bins is not None:
            if not isinstance(bins, int):
                raise ArgumentError(
                    "bins must be an int, got %s" % type(bins))
        features = [features]
        bins = [bins]
    else:
        if bins is None:
            bins = [None for i in features]
        if not len(bins) == len(features):
            raise ArgumentError(
                "bins must have same length as feature list")

    # nomenclature:
    #   "window" is region we're getting data for
    #   "alignment" is one item in that region
    #
    profiles = []
    xs = []
    for window, nbin in zip(features, bins):
        window = helpers.tointerval(window)
        chrom = window.chrom
        start = window.start
        stop = window.stop
        strand = window.strand

        if not is_bigwig:
            # Extend the window to catch reads that would extend into the
            # requested window
            _fs = fragment_size or 0
            padded_window = pybedtools.Interval(
                chrom,
                max(start - _fs - shift_width, 0),
                stop + _fs + shift_width,
            )
            window_size = stop - start

            # start off with an array of zeros to represent the window
            profile = np.zeros(window_size, dtype=float)

            for interval in reader[padded_window]:

                if read_strand:
                    if interval.strand != read_strand:
                        continue

                # Shift interval by modeled distance, if specified.
                if shift_width:
                    if interval.strand == '-':
                        interval.start -= shift_width
                        interval.stop -= shift_width
                    else:
                        interval.start += shift_width
                        interval.stop += shift_width

                # Extend fragment size from 3'
                if fragment_size:
                    if interval.strand == '-':
                        interval.start = interval.stop - fragment_size
                    else:
                        interval.stop = interval.start + fragment_size

                # Convert to 0-based coords that can be used as indices into
                # array
                start_ind = interval.start - start

                # If the feature goes out of the window, then only include the
                # part that's inside the window
                start_ind = max(start_ind, 0)

                # Same thing for stop
                stop_ind = interval.stop - start
                stop_ind = min(stop_ind, window_size)

                # Skip if the feature is shifted outside the window. This can
                # happen with large values of `shift_width`.
                if start_ind >= window_size or stop_ind < 0:
                    continue

                # Finally, increment profile
                if use_score:
                    score = float(interval.score)
                else:
                    score = 1

                if accumulate:
                    if preserve_total:
                        profile[start_ind:stop_ind] += (score / float((stop_ind - start_ind)))
                    else:
                        profile[start_ind:stop_ind] += score

                else:
                    profile[start_ind:stop_ind] = score

        else:  # it's a bigWig
            profile = reader.summarize(
                window, method=method, bins=(nbin or len(window)))

        # If no bins, return genomic coords
        if (nbin is None):
            x = np.arange(start, stop)

        # Otherwise do the downsampling; resulting x is stll in genomic
        # coords
        else:
            if preserve_total:
                total = float(profile.sum())
            if not is_bigwig or method == 'get_as_array':
                xi, profile = rebin(
                    x=np.arange(start, stop), y=profile, nbin=nbin)
                if not accumulate:
                    nonzero = profile != 0
                    profile[profile != 0] = 1
                x = xi

            else:
                x = np.linspace(start, stop - 1, nbin)

        # Minus-strand profiles should be flipped left-to-right.
        if stranded and strand == '-':
            profile = profile[::-1]
        xs.append(x)
        if preserve_total:
            scale = profile.sum() / total
            profile /= scale
        profiles.append(profile)

    stacked_xs = np.hstack(xs)
    stacked_profiles = np.hstack(profiles)
    del xs
    del profiles
    return stacked_xs, stacked_profiles
Beispiel #42
0
 def do_test(self, shape, factor, func=None):
     a = 2*np.random.randint(-1000, 1000, shape)
     actual = rebin(a, factor, np.sum)
     expected = my_rebin(a, factor, np.sum)
     assert_array_equal(expected, actual)
Beispiel #43
0
def fit_radial_velocity(wave,spec1,spec2,err1,err2,minWL,maxWL,norm_bands,vmin=-1500,vmax=1500,numpoints=1000):
    '''
    given 2 spectra, and a wavelength range for fitting, fits the change in radial velocity from spec1 to spec2.
    Note: Blueshifts are negative, if an emission line gets more redshifted from spec1 to spec2, it will have a positive value.
    
    params:
        wave - vector of common wavelengths for both spectra
        spec1 - first spectrum, rebinned onto common wavelengths
        spec2 - second spectrum, rebinned onto common wavelengths
        err1 - errors for first spectrum, rebinned onto common wavelengths (can be None if no errors)
        err2 - errors for second spectrum, rebinned onto common wavelengths (can be None if no errors)
        minWL - minimum wavelength value for fitting chi^2
        maxWL - maximum wavelength value for fitting chi^2
        norm_bands - list of lists each entry is of length 2 and contains [minWL,maxWL] for a band of wavelengths used for normalization
        NOTE: THIS SHOULD INCLUDE THE BAND FOR THE EMISSION LINE, AND THE EMISSION LINE BAND SHOULD BE LAST
        vmin - minimum velocity offset, or most blueshifted velocity for spec2 (km/s)
        vmax - maximum velocity offset, or most redshifted velocity for spec2 (km/s)
        numpoints - number of calculated velocity shifts
        
    returns:
        velocities - list of sampled radial velocities for spec2
        chisqs - list of chi^2 values for each radial velocity
        wave_fit - common sampled wavelengths for best fit offset
        spec1_fit_norm - spec1_norm resampled at wave_fit
        spec2_fit_norm - spec2_norm shifted to best fit velocity and resampled at wave_fit
        err1_fit_norm - errors for first spectrum resampled at wavefit and normalized
        err2_fit_norm - errors for second spectrum shifted to best fit velocity and resampled at wave_fit
        vel_min - best fit change in radial velocity
        min_CI_vel - lower limit for confidence interval for change in radial velocity
        max_CI_vel - upper limit for confidence interval for change in radial velocity
    
    '''
    
    #first, calculate desired shifts in wavelength
    velocities=np.linspace(vmin,vmax,numpoints)
    chisqs=np.asarray([])
    
    # THIS ASSUMES THE HBETA BAND IS THE LAST ENTRY IN norm_bands
    # use rms deviation between the two spectra in the continuum regions as an estimate of overall uncertainty
    if ((err1 is None) or (err2 is None)):
        
        # use both the continuum bands and the band for the emission line for normalization       
        a,b=normalize(wave,spec1,spec2,norm_bands)
        spec1_norm=a*spec1+b
        spec2_norm=spec2
        
        continuum_bands=norm_bands[:-1]
        sigmasq=calculate_sigmasq(wave,spec1_norm,spec2_norm,continuum_bands) #estimate of overall variance
    
    for i in range(numpoints):
                
        #calculated shifted wavelengths for spec2
        wave2=calculate_shifted_wave(wave,velocities[i])
           
        #rebin both spectra onto common wave
        if ((err1 is None) or (err2 is None)):
            wave_iter,spec1_iter,spec2_iter=rebin(wave,wave2,spec1,spec2)
        else:
            wave_iter,spec1_iter,err1_iter,spec2_iter,err2_iter=rebin(wave,wave2,spec1,spec2,err1,err2)
                
        #normalize spectra
        
        # use both the continuum bands and the band for the emission line for normalization  
        a,b=normalize(wave_iter,spec1_iter,spec2_iter,norm_bands)
        spec1_iter_norm=a*spec1_iter+b
        spec2_iter_norm=spec2_iter
        if ((err1 is not None) and (err2 is not None)):
            # propogate uncertainty
            # z=ax+b dz=adx
            err1_iter_norm=a*err1_iter
            err2_iter_norm=err2_iter
        
        #find min and max idx for WL range for fitting
        min_idx,max_idx=find_common_idx_range(wave_iter,minWL,maxWL)
        
        #calculate chi^2 of shifted spectra
        #slicing the flux arrays to only include the wavelength region with the emission line
        spec1_iter_cut=spec1_iter_norm[min_idx:max_idx]
        spec2_iter_cut=spec2_iter_norm[min_idx:max_idx]
        if ((err1 is not None) and (err2 is not None)):
            err1_iter_cut=err1_iter_norm[min_idx:max_idx]
            err2_iter_cut=err2_iter_norm[min_idx:max_idx]
             
        if ((err1 is None) or (err2 is None)):
            # In the case where no spectral uncertainties are provided,
            # use an estimate of the overall uncertainty to calculate chisq
            chisq=calculate_chisq_no_err(spec1_iter_cut,spec2_iter_cut,sigmasq)
        else:
            # If spectral uncertainties are provided,
            # Error in quadrature of the two spectra is used for denominator of chisq sum
            chisq=calculate_chisq(spec1_iter_cut,spec2_iter_cut,err1_iter_cut,err2_iter_cut)
        chisqs=np.append(chisqs,chisq)
        
        
    # Find radial velocity that minimizes chisq
    vel_min,chisq_min=calculate_min_velocity(velocities,chisqs)
    
    # Calculate confidence interval for radial velocities
    min_CI_vel,max_CI_vel=calculate_velocity_CI(velocities,chisqs)
    

    # For plotting, return spectra at best fit radial velocity
    best_fit_velocity=vel_min
    best_fit_wave2=calculate_shifted_wave(wave,best_fit_velocity)
    wave_fit,spec1_fit,err1_fit,spec2_fit,err2_fit=rebin(wave,best_fit_wave2,spec1,spec2,err1,err2)
    a,b=normalize(wave_fit,spec1_fit,spec2_fit,norm_bands)
    #print("a: "+str(round(a,2)))
    #print("b: "+str(round(b,2)))
    spec1_fit_norm=a*spec1_fit+b
    spec2_fit_norm=spec2_fit
    #z=ax+b
    #dz=a*dx
    err1_fit_norm=a*err1_fit
    err2_fit_norm=err2_fit
    
        
    return velocities,chisqs,wave_fit,spec1_fit_norm,spec2_fit_norm,err1_fit_norm,err2_fit_norm,vel_min,min_CI_vel,max_CI_vel
        
        
    
    
    
    
    
    
Beispiel #44
0
def RealSim_CFIS(
        inputName,
        outputName,
        cosmo=FlatLambdaCDM(H0=70, Om0=0.3),  # cosmology
        redshift=0.05,  # mock observation redshift
        apply_dimming=False,  # apply redshift dimming 
        rebin_to_CCD=True,  # rebin to CCD angular scale
        CCD_scale=0.187,  # CCD angular scale in [arcsec/pixel]
        add_false_sky=False,  # add gaussian sky
        false_sky_sig=24.2,  # gaussian sky standard dev [AB mag/arcsec2]
        add_false_psf=True,  # convolve with gaussian psf
        false_psf_fwhm=0.6,  # gaussian psf FWHM [arcsec]
        add_poisson=False,  # add poisson noise to galaxy
        add_cfis_sky=True,  # insert into real CFIS sky (using sdss_args)
):
    '''
    Keyword description.
    
    `inputName` -- File path for single-band idealized FITS image [mag/arcsec2]
    `outputName` -- File path for FITS output [nanomaggies].
    `cosmo` -- cosmology.
    `redshift` -- Target redshift of image in the survey. Used to compute dimming (optional) and
    the angular size of the image for a given physical size.
    `apply_dimming` -- Apply (1+z)**-5 surface brightness dimming from redshift.
    `rebin_to_CCD` -- If True, rebins the image to the target angular size and pixel scale. If False,
    the output image is the same dimensions as the input.
    `add false_sky` -- Add gaussian sky to the image.
    `false_sky_sig` -- Standard deviation in the sky brightness, expressed in AB mag/arcsec2
    `add_false_psf` -- Convolve with a gaussian PSF.
    `false_psf_fwhm` -- fwhm of the gaussian PSF in arcseconds.
    `add_poisson` -- Add Poisson shot noise to to the source flux after convolution. This currently
    does not work with CFIS images because we don't know what the GAIN is.
    `add_cfis_sky` -- if True, insert image into a real CFIS FOV.
    '''

    # speed of light [m/s]
    speed_of_light = 2.99792458e8
    # kiloparsec per arcsecond scale
    kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(
        z=redshift).value / 60.  # [kpc/arcsec]
    # luminosity distance in Mpc
    luminosity_distance = cosmo.luminosity_distance(z=redshift)  # [Mpc]

    # img header and data
    with fits.open(inputName, mode='readonly') as hdul:
        # img header
        header = hdul[0].header
        # img data
        img_data = hdul[0].data

    # collect physical pixel scale
    kpc_per_pixel = header['CDELT1'] / 1000.  # [kpc/pixel]
    # compute angular pixel scale from cosmology
    arcsec_per_pixel = kpc_per_pixel / kpc_per_arcsec  # [arcsec/pixel]

    # img in AB nanomaggies per arcsec2
    img_nanomaggies = 10**(-0.4 * (img_data - 22.5))  # [nmgys/arcsec2]
    # if not already dimmed, apply here
    if apply_dimming:
        img_nanomaggies *= (1 + redshift)**(-5)
    # apply pixel scale [arcsec/pixel]2 to convert to calibrated flux
    img_nanomaggies *= arcsec_per_pixel**2  # [nmgs]
    # update units of image header to linear calibrated scale
    header['BUNIT'] = 'AB nanomaggies'

    # Add levels of realism
    if rebin_to_CCD:
        '''
        Rebin image to a given angular CCD scale
        '''
        # telescope ccd angular scale
        ccd_scale = CCD_scale
        # axes of original image
        nPixelsOld = img_nanomaggies.shape[0]
        # axes of regridded image
        nPixelsNew = int(np.floor((arcsec_per_pixel / ccd_scale) * nPixelsOld))
        # rebin to new ccd scale
        img_nanomaggies = rebin(img_nanomaggies, (nPixelsNew, nPixelsNew))
        # new kpc_per_pixel on ccd
        kpc_per_pixel = kpc_per_arcsec * ccd_scale
        # new arcsec per pixel
        arcsec_per_pixel = ccd_scale
        # header updates
        if nPixelsNew % 2: CRPIX = float(nPixelsNew / 2)
        else: CRPIX = float(nPixelsNew / 2) + 0.5
        header['CRPIX1'] = CRPIX
        header['CRPIX2'] = CRPIX
        header['CDELT1'] = kpc_per_pixel * 1000
        header['CDELT2'] = kpc_per_pixel * 1000

    # convolve with gaussian psf
    if add_false_psf:
        '''
        Add Gaussian PSF to image with provided FWHM in
        arcseconds.
        '''
        std = false_psf_fwhm / arcsec_per_pixel / 2.355
        kernel = Gaussian2DKernel(x_stddev=std, y_stddev=std)
        img_nanomaggies = convolve(img_nanomaggies, kernel)

        # add poisson noise to image
    if add_poisson and not add_cfis_sky:
        '''
        Add shot noise to image assuming the average SDSS
        field properties for zeropoint, airmass, atmospheric
        extinction, and gain. The noise calculation assumes
        that the number of counts in the converted image is 
        the mean number of counts in the Poisson distribution.
        Thereby, the standard error in that number of counts 
        is the square root of the number of counts in each 
        pixel.
        
        !!! Needs change for CFIS.
        '''
        # !!! average CFIS photometric field properties (gain is inverse gain)
        airmass = {'u': 1.178, 'g': 1.178, 'r': 1.177, 'i': 1.177, 'z': 1.178}
        aa = {'u': -23.80, 'g': -24.44, 'r': -24.03, 'i': -23.67, 'z': -21.98}
        kk = {'u': 0.5082, 'g': 0.1898, 'r': 0.1032, 'i': 0.0612, 'z': 0.0587}
        gain = {'u': 1.680, 'g': 3.850, 'r': 4.735, 'i': 5.111, 'z': 4.622}
        exptime = 53.907456  # seconds
        # conversion factor from nanomaggies to counts
        counts_per_nanomaggy = exptime * 10**(
            -0.4 * (22.5 + aa[band] + kk[band] * airmass[band]))
        # image in counts for given field properties
        img_counts = np.clip(img_nanomaggies * counts_per_nanomaggy,
                             a_min=0,
                             a_max=None)
        # poisson noise [adu] computed accounting for gain [e/adu]
        img_counts = np.random.poisson(lam=img_counts *
                                       gain[band]) / gain[band]
        # convert back to nanomaggies
        img_nanomaggies = img_counts / counts_per_nanomaggy

    # add gaussian sky to image
    if add_false_sky:
        '''
        Add sky with noise level set by "false_sky_sig" 
        keyword. "false_sky_sig" should be in relative  
        AB magnitudes/arcsec2 units. In other words,
        10**(-0.4*false_sky_sig) gives the sample 
        standard deviation in the sky in linear flux units
        [maggies/arcsec2] around a sky level of zero.
        '''
        # conversion from mag/arcsec2 to nanomaggies/arcsec2
        false_sky_sig = 10**(0.4 * (22.5 - false_sky_sig))
        # account for pixel scale in final image
        false_sky_sig *= arcsec_per_pixel**2
        # create false sky image
        sky = false_sky_sig * np.random.randn(*img_nanomaggies.shape)
        # add false sky to image in nanomaggies
        img_nanomaggies += sky
        header.append(
            ('SKY', 0.0, 'Average sky in full CFIS tile [nanomaggies]'),
            end=True)
        header.append(('SKYSIG', false_sky_sig,
                       'Average sky uncertainty per pixel [nanomaggies]'),
                      end=True)

    if add_cfis_sky:
        cfis_argList = genCFIS_argList(use_sql=False)
        ra, dec, tile = random.choice(cfis_argList)
        cutoutName = getCutoutImage(ra, dec, tile)
        # cutout data, converted from counts/s (AB zeropoint=30) to nanomaggies
        cutoutData = fits.getdata(cutoutName) * 10**(-0.4 * (30 - 22.5))
        # segmentation map
        segMap = genSegmap(cutoutName)
        # injection coords
        colc, rowc = getInjectCoords(segMap)

        # add real sky pixel by pixel to image in nanomaggies
        corr_ny, corr_nx = cutoutData.shape
        ny, nx = img_nanomaggies.shape
        for xx in range(nx):
            for yy in range(ny):
                corr_x = int(colc - nx / 2 + xx)
                corr_y = int(rowc - ny / 2 + yy)
                if corr_x >= 0 and corr_x <= corr_nx - 1 and corr_y >= 0 and corr_y <= corr_ny - 1:
                    img_nanomaggies[yy, xx] += cutoutData[corr_y, corr_x]
                else:
                    img_nanomaggies[yy, xx] = 0.
        if os.access(cutoutName, 0): os.remove(cutoutName)
        # add field info to image header
        warnings.simplefilter('ignore', category=AstropyWarning)
        header.append(('TILE', tile, 'CFIS tile ID'), end=True)
        header.append(('RA', float(ra), 'Cutout centroid RA'), end=True)
        header.append(('DEC', float(dec), 'Cutout centroid DEC'), end=True)
        header.append(('COLC', colc, 'CFIS tile column center'), end=True)
        header.append(('ROWC', rowc, 'CFIS tile row center'), end=True)
        header.append(('GAIN', 'N/A', 'CFIS CCD GAIN'), end=True)
        header.append(('ZERO', 30.0, 'CFIS image zeropoint'), end=True)
        header.append(
            ('EXTC', 'N/A', 'CFIS image atm. extinction coefficient'),
            end=True)
        header.append(('AIRM', 'N/A', 'CFIS image airmass'), end=True)
        header.append(
            ('SKY', -999, 'Average sky in full CFIS tile [nanomaggies]'),
            end=True)
        header.append(('SKYSIG', -999,
                       'Average sky uncertainty per pixel [nanomaggies]'),
                      end=True)

    if os.access(outputName, 0): os.remove(outputName)
    hdu_pri = fits.PrimaryHDU(img_nanomaggies)
    header['REDSHIFT'] = (redshift, 'Redshift')
    header.append(('COSMO', 'FLAT_LCDM', 'Cosmology'), end=True)
    header.append(('OMEGA_M', cosmo.Om(0), 'Matter density'), end=True)
    header.append(('OMEGA_L', cosmo.Ode(0), 'Dark energy density'), end=True)
    header.append(('SCALE_1', arcsec_per_pixel, '[arcsec/pixel]'), end=True)
    header.append(('SCALE_2', kpc_per_pixel, '[kpc/pixel]'), end=True)
    header.append(('SCALE_3', kpc_per_arcsec, '[kpc/arcsec]'), end=True)
    header.append(('LUMDIST', cosmo.luminosity_distance(z=redshift).value,
                   'Luminosity Distance [Mpc]'),
                  end=True)
    warnings.simplefilter('ignore', category=AstropyWarning)
    header.append(('apply_dimming', apply_dimming), end=True)
    header.append(('rebin_to_CCD', rebin_to_CCD), end=True)
    header.append(('CCD_scale', CCD_scale), end=True)
    header.append(('add_false_sky', add_false_sky), end=True)
    header.append(('false_sky_sig ', false_sky_sig), end=True)
    header.append(('add_false_psf', add_false_psf), end=True)
    header.append(('false_psf_fwhm', false_psf_fwhm), end=True)
    header.append(('add_poisson', add_poisson), end=True)
    header.append(('add_cfis_sky', add_cfis_sky), end=True)
    hdu_pri.header = header
    hdu_pri.writeto(outputName)
Beispiel #45
0
def test_y1_uncertainties_spline_with_constant_distribution():
    """

    """
    # old size
    m = 5

    # new size
    n = 6

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])

    subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])

    y_old = 1. + np.sin(x_old[:-1] * np.pi)

    # compute spline ----------------------------------
    x_mids = x_old[:-1] + 0.5 * np.ediff1d(x_old)
    xx = np.hstack([x_old[0], x_mids, x_old[-1]])
    yy = np.hstack([y_old[0], y_old, y_old[-1]])

    # build spline
    spl = splrep(xx, yy)

    area_old = np.array(
        [splint(x_old[i], x_old[i + 1], spl) for i in range(m)])

    # with uncertainties
    y_old = unp.uarray(y_old, 0.1 * y_old * uniform((m, )))

    # computing subbin areas
    area_subbins = np.zeros((subbins.size - 1, ))
    for i in range(area_subbins.size):
        a, b = subbins[i:i + 2]
        a = max([a, x_old[0]])
        b = min([b, x_old[-1]])
        if b > a:
            area_subbins[i] = splint(a, b, spl)

    # summing subbin contributions in y_new_ref
    a = np.zeros((x_new.size - 1, ))
    y_new_ref = unp.uarray(a, a)
    y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
    y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
    y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
    y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]

    y_new_ref[5] = y_old[1] * area_subbins[6] / area_old[1]
    y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
    y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]

    # call rebin function
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    # mean or nominal value comparison
    assert_allclose(unp.nominal_values(y_new), unp.nominal_values(y_new_ref))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new), unp.std_devs(y_new_ref))
Beispiel #46
0
 def test_factor_not_tuple_of_ints(self):
     with self.assertRaises(ValueError):
         rebin(self.a, factor=(2, 1.5))
Beispiel #47
0
 def test_invalid_length_of_factor(self):
     with self.assertRaises(ValueError):
         rebin(self.a, factor=(1, 2, 3))
Beispiel #48
0
    wave = np.linspace(4000, wavend,fend)
    origcorrected = correct_extinction(inputs.item()[3].strip(), wave, fspec)
 #   origcorrected   = fspec
    #subtract the emission lines template
    restwave = wave/(1.0+redshift)
    restnu = c/restwave
    interpfunc=interp1d(x, y, kind='cubic', bounds_error=False)
    newtemplate= interpfunc(restwave)
    thisspec = origcorrected-newtemplate
    specerr = (ferr/c)*restwave**2
    nuspec = np.float64((thisspec/c)*np.power(restwave,2))
    nuorig = np.float64((origcorrected/c)*np.power(restwave,2))
    plot_spectra_orig(origcorrected, thisspec, nuorig, nuspec, restwave, restnu
    , plotout, qmjd)
    if qspec.shape != fspec.shape:
       qspec = rebin(qspec, fspec.shape)
       print 'rebinned q'
    if uspec.shape[0] != fspec.shape[0]:
       uspec = rebin(uspec, fspec.shape)
       print 'rebinned u' 
    #correct polarization for statistical bias (Wardle and Kronberg, 1974)
    #p = np.sqrt(qspec**2 + uspec**2)
    # bin q and u in order to determine polarization and the direction of polarization
    boxcarwindow = 15
    p, sp, thetp, sthetp, qspec, sqspec, uspec, suspec, oldq, oldu = defpol(
    qspec, uspec, boxcarwindow)    
    #bin the q, u, and flux arrays
    numberofbins = len(nuorig)/boxcarwindow
    polarflux, spflux = bin_array(nuorig, numberofbins)
    polarnu, spolarnu = bin_array(restnu, numberofbins)    
    fitflux, sfitflux = bin_array(nuspec, numberofbins)
from math import sqrt

from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages

from rebin import rebin

filenames = sys.argv[1:]
dataframes = [pd.read_csv(x) for x in sys.argv[1:]]
if len(filenames) > 1:
    #filenames += ["CONCAT"]
    #dataframes += [pd.concat(dataframes)]
    filenames = ["CONCAT"]
    dataframes = [pd.concat(dataframes)]

dataframes = [rebin(df, dict(zip(range(1, 8), [1, 1, 4, 4, 4, 7, 7]))) for df in dataframes]

# go ahead and open the big pdf
pp = PdfPages("subjects.pdf")
pyplot.locator_params(tight=True)

for fileno, (filename, data) in enumerate(zip(filenames, dataframes)):
    judgement_columns = data.columns[2:]

    means = data[judgement_columns].transpose().mean()
    stds = data[judgement_columns].transpose().std()

    # graphs and stats for individual subjects
    output = []
    for j in judgement_columns:
        ratings = data[j]
    origcorrected = correct_extinction(inputs.item()[2].strip(), wave, fspec)
 #   origcorrected   = fspec
    #subtract the emission lines template
    restwave = wave/(1.0+redshift)
    restnu = c/restwave
    interpfunc=interp1d(x, y, kind='cubic', bounds_error=False)
    newtemplate= interpfunc(restwave)
    thisspec = origcorrected-newtemplate
    specerr = (ferr/c)*restwave**2
    nuspec = np.float64((thisspec/c)*np.power(restwave,2))
    polspec = np.float64((polspec/c)*np.power(restwave,2))
    nuorig = np.float64((origcorrected/c)*np.power(restwave,2))
    plot_spectra_orig(origcorrected, thisspec, nuorig, nuspec, restwave, restnu
    , plotout, fmjd)
    if nuspec.shape != fspec.shape:
        nuspec= rebin(nuspec, fspec.shape)
    
    #correct polarization for statistical bias (Wardle and Kronberg, 1974)
    #p = np.sqrt(qspec**2 + uspec**2)
    # bin q and u in order to determine polarization and the direction of polarization
    boxcarwindow = 15
   # p, sp, thetp, sthetp, qspec, sqspec, uspec, suspec, oldq, oldu = defpol(
    #qspec, uspec, boxcarwindow)  
    #print len(nuorig), 'len nuorig'
    #lnorig = len(nuspec)
    #print lnorig
    #otherpolspec, otherpolerr = bin_array(nuorig*np.sqrt(oldu**2 + oldq**2), lnorig/boxcarwindow)  
    #bin the q, u, and flux arrays
    numberofbins = len(nuorig)/boxcarwindow
    polarflux, spflux = bin_array(nuorig, numberofbins)
    polarnu, spolarnu = bin_array(restnu, numberofbins)    
Beispiel #51
0
 def test_factor_not_int(self):
     with self.assertRaises(ValueError):
         rebin(self.a, factor=1.5)
Beispiel #52
0
 def do_test(self, shape, factor, func=None, nulp=1):
     a = 2*np.random.rand(*shape)-1
     actual = rebin(a, factor, np.sum)
     expected = my_rebin(a, factor, np.sum)
     assert_array_almost_equal_nulp(expected, actual, nulp=nulp)
 def scores(self, df):
     return rebin(df, self.bin_mapping)
Beispiel #54
0
def fit(
        fileroot='/export/scratch0/ps1sn1/data/v10.0/GPC1v3/eventsv1/workspace/PSc560121/g/PSc560121.md01s043.g.ut090831e.1917665_14.sw',
        xpos=None, ypos=None, radius=10, pdf_pages=None, ra=None, dec=None, title='', returnstamps = False, maskfile=None, mysky=None,mysig=None):
    # xpos = xpos +1
    # ypos = ypos +1
    # from matplotlib.backends.backend_pdf import PdfPages
    #pdf_pages = PdfPages('daophot_resid.pdf')
    dofcmp = False
    good = False

    im = pyfits.getdata('%s.fits' % fileroot)
    mask = pyfits.getdata(maskfile)
    impsf = pyfits.getdata('%s.dao.psf.fits' % fileroot)
    fullpsf, hpsf = rdpsf.rdpsf('%s.dao.psf.fits' % fileroot)
    imhdr = pyfits.getheader('%s.fits' % fileroot)

    if dofcmp:
        p = pyfits.open('%s.fcmp' % fileroot)
        p.verify("fix")


        if os.path.exists('test.fcmp'):
            os.remove('test.fcmp')
        p.writeto('test.fcmp', output_verify='fix')
        # fcmp = p[0].header
        # print p[1]

        fcmp = txtobj('test.fcmp', cmpheader=True)
        # print fcmp.__dict__['class']
        # print fcmp['class']
        # raw_input()


        w = wcs.WCS('%s.fits' % fileroot)
        #results2 = w.wcs_world2pix(np.array([[ra, dec]]), 0)
        # xpos,ypos =results2[0][0], results2[0][1]


        psfsize = np.shape(impsf)[0]

        fcmp.Xpos = fcmp.Xpos[1:].astype(float)
        fcmp.Ypos = fcmp.Ypos[1:].astype(float)
        fcmp.__dict__['class'] = fcmp.__dict__['class'][1:].astype(float)

        fcmp.flux = fcmp.flux[1:].astype(float)
        fcmp.dflux = fcmp.dflux[1:].astype(float)
        # for x,y,flux,fluxerr in zip(fcmp.Xpos,fcmp.Ypos,
        #                            fcmp.flux,fcmp.dflux):


        # print fcmp.Xpos-xpos
        # print fcmp.Ypos-ypos
        # raw_input()
        #print fcmp.__dict__['class']
        #print fcmp.Xpos
        #print xpos
        #raw_input()
        ww = (abs(fcmp.Xpos - xpos) < 1.) & (abs(fcmp.Ypos - ypos) < 1.)
        thisclass = fcmp.__dict__['class'][ww]
        #print 'THIS CLASS IS', thisclass
        #print 'all classes', fcmp.__dict__['class']
        # flux = fcmp.flux
        # fluxerr = fcmp.dflux

        if len(thisclass) == 1:
            if thisclass[0] == 1:
                good = True

    fluxerr = 100.
    chisq = 1.
    dms = 1.

    x = xpos
    y = ypos

    ny, nx = np.shape(im)
    psfy, psfx = np.shape(impsf)
    ixlo, iylo = int(x - radius), int(y - radius)
    if ixlo < 0: ixlo = 0
    if iylo < 0: iylo = 0
    ixhi = int(x + radius) + 1
    iyhi = int(y + radius) + 1
    if ixhi > (nx - 1): ixhi = nx - 1
    if iyhi > (ny - 1): iyhi = ny - 1
    ixx = ixhi - ixlo + 1
    iyy = iyhi - iylo + 1
    dx = np.arange(ixx) + ixlo - x
    dy = np.arange(iyy) + iylo - y
    psf1d = impsf.reshape(np.shape(impsf)[0] ** 2.)
    gauss = [hpsf['GAUSS1'], hpsf['GAUSS2'], hpsf['GAUSS3'],
             hpsf['GAUSS4'], hpsf['GAUSS5']]
    dx = dx.reshape(1, len(dx))
    dy = dy.reshape(len(dy), 1)
    dx = rebin.rebin(dx, [np.shape(dx)[1], np.shape(dx)[1]])
    dy = rebin.rebin(dy, [len(dy), len(dy)])
    try:
        model = dao_value.dao_value(dx, dy, gauss,
                                    impsf,  # psf1d=psf1d,
                                    deriv=False)  # ,ps1d=False)
    except:
        return 1, 1, 0, 0, False, 0, 0, 0


    subim = im[iylo - 1:iyhi, ixlo - 1:ixhi]
    #print 'modelshape', model.shape, 'imshape', subim.shape
    #raw_input()
    submask = mask[iylo - 1:iyhi, ixlo - 1:ixhi]
    submask[submask != 0] = 9
    submask[submask == 0 ] = 1
    submask[submask == 9 ] = 0
    # scaledpsf = model+impsf[psfy/2+1-radius:psfy/2+1+radius+1,
    #                        psfx/2+1-radius:psfx/2+1+radius+1]
    # print model.shape
    # print flux.shape
    # print hpsf['PSFMAG']
    # print imhdr['SKYADU']
    chisqvec = []
    fluxvec = []
    substamp = model.shape[0]
    fitrad = np.zeros([substamp, substamp])
    radius = 4
    for x in np.arange(substamp):
        for y in np.arange(substamp):
            if np.sqrt((substamp / 2. - x) ** 2 + (substamp / 2. - y) ** 2) < radius:
                fitrad[int(x), int(y)] = 1.
    '''
    for flux in range(1,500000,200):
        scaledpsf = model*flux/10**(-0.4*(hpsf['PSFMAG']-25)) + imhdr['SKYADU']
        chisq = np.sum(fitrad*(subim-scaledpsf)**2/imhdr['SKYSIG']**2)
        chisqvec.append(chisq)
        fluxvec.append(flux)

    chisqvec = np.array(chisqvec)
    fluxvec = np.array(fluxvec)
    flux = fluxvec[np.argmin(chisqvec)]
    scaledpsf = model*flux/10**(-0.4*(hpsf['PSFMAG']-25)) + imhdr['SKYADU']

    #resid(param,psf,im,sigma,fitrad,sky,psfmag)

    #print model, subim, imhdr['SKYSIG']
    '''
    # fluxls, cov = opti.leastsq(resid, 100000,
    #                            args=(model, subim, imhdr['SKYSIG'], fitrad, imhdr['SKYADU'], hpsf['PSFMAG']),full_output=False)

    fluxls, cov = opti.leastsq(resid, 100000,args=(model, subim, mysig, fitrad, mysky, hpsf['PSFMAG']),full_output=False)

    #print cov.shape
    #print fluxls, cov
    #raw_input('covshape')
    # print 'flux fit comparo',flux,fluxls,
    scaledpsf = model*fluxls/10**(-0.4*(hpsf['PSFMAG']-25)) + imhdr['SKYADU']



    dontplot = False
    if not pdf_pages is None:
        if not dontplot:
            print 'plottingggg'
            fig = plt.figure()
            plt.clf()
            axim = plt.subplot(131)
            axpsf = plt.subplot(132)
            axdiff = plt.subplot(133)
            for ax,title in zip([axim,axpsf,axdiff],['image','model','difference']):
                ax.set_title(title)
                axim.imshow(subim,
                cmap='gray',interpolation='nearest')
                axpsf.imshow(model,cmap='gray',interpolation='nearest')
                axdiff.imshow(subim-scaledpsf,cmap='gray',interpolation='nearest')
                #plt.colorbar()
                axim = plt.subplot(131)
                axpsf = plt.subplot(132)
                axdiff = plt.subplot(133)
            #for ax,title in zip([axim,axpsf,axdiff],['image','model','difference']):
                if good:
                    ax.set_title(title + 'GOOD')
                else:
                    ax.set_title(title + 'BADD')
                #ax.set_title(title)
            axim.imshow(subim,cmap='gray',interpolation='nearest')
            axpsf.imshow(scaledpsf,cmap='gray',interpolation='nearest')
            ax = axdiff.imshow(subim-scaledpsf,cmap='gray',interpolation='nearest')
            cbar = fig.colorbar(ax)
            #plt.imshow((subim-scaledpsf)/imhdr['SKYSIG'],cmap='gray',interpolation='nearest')
            #plt.colorbar()
            if good:
                plt.title(title + 'GOOD' )
            else:
                plt.title(title + 'BADD' )
            pdf_pages.savefig(fig)

            #pdf_pages.close()
    #plt.savefig('')




    if returnstamps:
        rpsf = model
        good = True#we know this wont be in the starcat file so set to good is true
        #print 'fluxls', fluxls
        #print 'maxpsf', np.max(rpsf)
        return fluxls,fluxerr,chisq,dms,good,subim, rpsf, imhdr['SKYSIG'], fitrad, imhdr['SKYADU'], hpsf['PSFMAG'], submask
    #print fluxls
    #print np.max(model)
    #raw_input('fluxls')
    sstamp = simstamp(fluxls,model, subim, imhdr['SKYSIG'], fitrad, imhdr['SKYADU'], hpsf['PSFMAG'])

    return fluxls, fluxerr, chisq, dms, good, subim, sstamp, model
Beispiel #55
0
        dir_create(plot_dir)
        print 'directory build complete!'
##############################################################################################
    if comm_rank == 0:
        print 'load data over ,begin to distribute them to multiprocess!'
    d_sets, t_sets, freq = distribute_data(d_sets, t_sets, freq, p_n, t_len,
                                           datatype)
    if comm_rank == 0:
        print 'Data load and scatter to multiprocess over. rank:', comm_rank

    #     if comm_rank == 0:    print 'Begin to calibrate... rank:',comm_rank
    #     chan_equaliz = np.load('/home/nch/FFT_search/src/chan_equaliz.npy')
    #     d_sets = calibration(chan_equaliz,d_sets)

    if comm_rank == 0: print 'Begin to rebin... rank:', comm_rank
    re_sets, f_axis, nbin = rebin(d_sets, t_sets, freq)

    if comm_rank == 0: print 'Rebin over. rank:', comm_rank

    if comm_rank == 0:
        print 'Begin to do 1st 2-D FFT on rebin data... rank :', comm_rank
    FFT1st_sets = FFT(re_sets, 2, msk_cycle, comm_rank)

    if comm_rank == 0: print '1st FFT over. rank :', comm_rank

    if comm_rank == 0:
        print 'Begin to transform rectangular coordinates into polar coordinates.....  rank:', comm_rank
    polar_sets = polar_coordinates_convert(FFT1st_sets, rad_grid, ang_grid,
                                           ang_min, ang_max)

    if comm_rank == 0: print 'Polar transform over. rank:', comm_rank
Beispiel #56
0
def test_y1_uncertainties_spline_with_constant_distribution():
    """

    """
    # old size
    m = 5

    # new size
    n = 6

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])

    subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])

    y_old = 1.+np.sin(x_old[:-1]*np.pi)

    # compute spline ----------------------------------
    x_mids = x_old[:-1] + 0.5*np.ediff1d(x_old)
    xx = np.hstack([x_old[0], x_mids, x_old[-1]])
    yy = np.hstack([y_old[0], y_old, y_old[-1]])

    # build spline
    spl = splrep(xx, yy)

    area_old = np.array(
              [ splint(x_old[i],x_old[i+1], spl) for i in range(m) ])

    # with uncertainties
    y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))

    # computing subbin areas
    area_subbins = np.zeros((subbins.size-1,))
    for i in range(area_subbins.size):
        a, b = subbins[i:i+2]
        a = max([a,x_old[0]])
        b = min([b,x_old[-1]])
        if b>a:
            area_subbins[i] = splint(a, b, spl)

    # summing subbin contributions in y_new_ref
    a = np.zeros((x_new.size-1,))
    y_new_ref = unp.uarray(a,a)
    y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
    y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
    y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
    y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]

    y_new_ref[5]  = y_old[1] * area_subbins[6] / area_old[1]
    y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
    y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]

    # call rebin function
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    # mean or nominal value comparison
    assert_allclose(unp.nominal_values(y_new),
                       unp.nominal_values(y_new_ref))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new),
                       unp.std_devs(y_new_ref))