コード例 #1
0
    def buildSection(self, xo = None, yo = None, xm = None, ym = None,
                    pts = 100, gfilter = 5):
        """
        Extract a slice from the 3D data set and compute the stratigraphic layers.
        Parameters
        ----------
        variable: xo, yo
            Lower X,Y coordinates of the cross-section.
        variable: xm, ym
            Upper X,Y coordinates of the cross-section.
        variable: pts
            Number of points to discretise the cross-section.
        variable: gfilter
            Gaussian smoothing filter.
        """

        if xm > self.x.max():
            xm = self.x.max()

        if ym > self.y.max():
            ym = self.y.max()

        if xo < self.x.min():
            xo = self.x.min()

        if yo < self.y.min():
            yo = self.y.min()

        xsec, ysec = self._cross_section(xo, yo, xm, ym, pts)
        self.dist = np.sqrt(( xsec - xo )**2 + ( ysec - yo )**2)
        self.xsec = xsec
        self.ysec = ysec
        for k in range(self.nz):
            # Thick
            rect_B_spline = RectBivariateSpline(self.yi, self.xi, self.th[:,:,k])
            data = rect_B_spline.ev(ysec, xsec)
            secTh = filters.gaussian_filter1d(data,sigma=gfilter)
            secTh[secTh < 0] = 0
            self.secTh.append(secTh)

            # Elev
            rect_B_spline1 = RectBivariateSpline(self.yi, self.xi, self.elev[:,:,k])
            data1 = rect_B_spline1.ev(ysec, xsec)
            secElev = filters.gaussian_filter1d(data1,sigma=gfilter)
            self.secElev.append(secElev)

            # Depth
            rect_B_spline2 = RectBivariateSpline(self.yi, self.xi, self.dep[:,:,k])
            data2 = rect_B_spline2.ev(ysec, xsec)
            secDep = filters.gaussian_filter1d(data2,sigma=gfilter)
            self.secDep.append(secDep)

        # Ensure the spline interpolation does not create underlying layers above upper ones
        topsec = self.secDep[self.nz-1]
        for k in range(self.nz-2,-1,-1):
            secDep = self.secDep[k]
            self.secDep[k] = np.minimum(secDep, topsec)
            topsec = self.secDep[k]

        return
コード例 #2
0
ファイル: loth_baker_2013.py プロジェクト: ynthdhj/shakemap
    def __init__(self, periods):
        """
        Create an instance of LB13.

        Args:
            periods (numpy.array): An array of periods that will be requested
                from the function. Values must be [0.01 -> 10.0], and must me
                sorted from smallest to largest.

        Returns:
            An instance of :class:`LothBaker2013`.
        """

        if np.any(periods < 0.01):
            raise ValueError('The periods must be greater or equal to 0.01s')
        if np.any(periods > 10):
            raise ValueError('The periods must be less or equal to 10s')

        rbs1 = RectBivariateSpline(Tlist, Tlist, B1, kx=1, ky=1)
        rbs2 = RectBivariateSpline(Tlist, Tlist, B2, kx=1, ky=1)
        rbs3 = RectBivariateSpline(Tlist, Tlist, B3, kx=1, ky=1)

        #
        # Build new tables with entries at the periods we will use
        #
        tlist = list(zip(*it.product(periods, periods)))
        nper = np.size(periods)
        self.b1 = rbs1.ev(tlist[0], tlist[1]).reshape((nper, nper))
        self.b2 = rbs2.ev(tlist[0], tlist[1]).reshape((nper, nper))
        self.b3 = rbs3.ev(tlist[0], tlist[1]).reshape((nper, nper))
コード例 #3
0
ファイル: DS_EffectiveArea.py プロジェクト: dspolyar/Main
def main():
    
    filenameEffArea='aeff_P7REP_ULTRACLEAN_V15_back.fits'
    directoryEffectiveArea='/Users/dspolyar/Documents/IRF/EffectiveArea/' 
    print pyfits.info( directoryEffectiveArea+filenameEffArea) 
    CTHETA_LO, CTHETA_HI, energyLow, energyHigh, EFFAREA = importEffectiveArea(directoryEffectiveArea+filenameEffArea)
    energylog, Ctheta=centeringDataAndConvertingToLog(energyHigh,energyLow,CTHETA_HI,CTHETA_LO)
    SplineEffectiveArea=RectBivariateSpline(Ctheta,energylog,EFFAREA)
    plotofEffectiveArea(SplineEffectiveArea,EFFAREA,energylog,Ctheta)
    print SplineEffectiveArea.ev(1.,5.)
コード例 #4
0
def LucasKanadeAffine(It, It1):
    # Input:
    #   It: template image
    #   It1: Current image
    # Output:
    #   M: the Affine warp matrix [2x3 numpy array]
    # put your implementation here
    M = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
    p = M.flatten()

    x1, y1, x2, y2 = 0, 0, It.shape[1] - 1, It.shape[0] - 1
    threshold = 1e-3
    dp = np.array([float("inf")] * 6)

    interpolated_It1 = RectBivariateSpline(
        x=np.array([i for i in range(int(It1.shape[0]))]),
        y=np.array([i for i in range(int(It1.shape[1]))]),
        z=It1)

    while np.sum(np.square(dp)) >= threshold:
        x = np.arange(x1, x2 + .5)
        y = np.arange(y1, y2 + .5)
        X = np.array([x for i in range(len(y))])
        Y = np.array([y for i in range(len(x))]).T
        warp_X = p[0] * X + p[1] * Y + p[2]
        warp_Y = p[3] * X + p[4] * Y + p[5]
        valid_points = (warp_X >= x1) & (warp_X <= x2) & (warp_Y >=
                                                          y1) & (warp_Y <= y2)
        X, Y = X[valid_points], Y[valid_points]
        warp_X, warp_Y = warp_X[valid_points], warp_Y[valid_points]
        warp_It1x = interpolated_It1.ev(warp_Y, warp_X)

        dx = interpolated_It1.ev(warp_Y, warp_X, dx=0, dy=1).flatten()
        dy = interpolated_It1.ev(warp_Y, warp_X, dx=1, dy=0).flatten()

        A = np.array([
            dx * X.flatten(),
            dx * Y.flatten(),
            dx,
            dy * X.flatten(),
            dy * Y.flatten(),
            dy,
        ]).T
        b = (It[valid_points] - warp_It1x).flatten()

        dp = np.dot(np.linalg.inv(np.dot(A.T, A)), np.dot(A.T, b))
        p += dp.flatten()

    M = np.copy(p).reshape(2, 3)
    M = np.vstack((M, np.array([[0, 0, 1]])))
    return M
コード例 #5
0
def LucasKanade(It, It1, rect, threshold, num_iters, p0=np.zeros(2)):
    """
    :param It: template image
    :param It1: Current image
    :param rect: Current position of the car (top left, bot right coordinates)
    :param threshold: if the length of dp is smaller than the threshold, terminate the optimization
    :param num_iters: number of iterations of the optimization
    :param p0: Initial movement vector [dp_x0, dp_y0]
    :return: p: movement vector [dp_x, dp_y]
    """

    # Put your implementation here
    p = p0

    x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]

    It_spline = RectBivariateSpline(np.arange(It.shape[0]),
                                    np.arange(It.shape[1]), It)

    It1_spline = RectBivariateSpline(np.arange(It1.shape[0]),
                                     np.arange(It1.shape[1]), It1)

    dp = np.array([float("inf"), float("inf")])

    count = 0
    x = np.arange(x1, x2 + 1)
    y = np.arange(y1, y2 + 1)
    x, y = np.meshgrid(x, y)

    while np.linalg.norm(dp) >= threshold and count <= num_iters:
        count += 1

        Itx = It_spline.ev(y, x).flatten()
        It1x_warp = It1_spline.ev(y + p[1], x + p[0]).flatten()
        A = np.array([
            It1_spline.ev(y + p[1], x + p[0], dx=0, dy=1).flatten(),
            It1_spline.ev(y + p[1], x + p[0], dx=1, dy=0).flatten()
        ]).T

        # print(np.shape(A))

        b = (Itx - It1x_warp).flatten()

        # print(np.shape(b))

        dp = (np.linalg.inv(A.T @ A)) @ (A.T @ b)

        p += dp

    return p
コード例 #6
0
    def getEroDep(self,
                  xo=None,
                  yo=None,
                  xm=None,
                  ym=None,
                  pts=100,
                  gfilter=5):
        """
        Extract a slice from the 3D data set and compute its deposition thicknesses.
        Parameters
        ----------
        variable: xo, yo
            Lower X,Y coordinates of the cross-section
        variable: xm, ym
            Upper X,Y coordinates of the cross-section
        variable: pts
            Number of points to discretise the cross-section
        variable: gfilter
            Gaussian smoothing filter
        """

        if xm > self.x.max():
            xm = self.x.max()

        if ym > self.y.max():
            ym = self.y.max()

        if xo < self.x.min():
            xo = self.x.min()

        if yo < self.y.min():
            yo = self.y.min()

        xsec, ysec = self._cross_section(xo, yo, xm, ym, pts)
        self.dist = np.sqrt((xsec - xo)**2 + (ysec - yo)**2)

        # Surface
        rect_B_spline = RectBivariateSpline(self.y[:, 0], self.x[0, :], self.z)
        datatop = rect_B_spline.ev(ysec, xsec)
        self.top = filters.gaussian_filter1d(datatop, sigma=gfilter)

        # Cumchange
        rect_B_spline = RectBivariateSpline(self.y[:, 0], self.x[0, :],
                                            self.cumchange)
        cumdat = rect_B_spline.ev(ysec, xsec)
        gcum = filters.gaussian_filter1d(cumdat, sigma=gfilter)
        self.depo = gcum.clip(min=0)

        return
コード例 #7
0
ファイル: surface_shape.py プロジェクト: t4qjXH8N/pyrate
class GridSag(ExplicitShape):
    """
    Class for gridsag
    """
    def __init__(self, lc, xxx_todo_changeme, name="", *args, **kwargs):

        (xlinspace, ylinspace, Zgrid) = xxx_todo_changeme
        kwargs_dict = kwargs
        name = kwargs_dict.pop('name', '')

        self.interpolant = RectBivariateSpline(xlinspace, ylinspace, Zgrid)

        #self.interpolant = interp2d(xlinspace, ylinspace, Zgrid, kind=kind, *args, **kwargs_dict)

        def gsf(x, y):
            res = self.interpolant.ev(x, y)

            return res

        def gradgsf(x, y,
                    z):  # gradient for implicit function z - af(x, y) = 0
            res = np.zeros((3, len(x)))

            res[0, :] = -self.interpolant.ev(x, y, dx=1)
            res[1, :] = -self.interpolant.ev(x, y, dy=1)
            res[2, :] = 1.

            return res

        def hessgsf(x, y, z):
            res = np.zeros((3, 3, len(x)))

            res[0, 0, :] = -self.interpolant.ev(x, y, dx=2)
            res[0, 1, :] = res[1,
                               0, :] = -self.interpolant.ev(x, y, dx=1, dy=1)
            res[1, 1, :] = -self.interpolant.ev(x, y, dy=2)

            return res

        super(GridSag, self).__init__(lc,
                                      gsf,
                                      gradgsf,
                                      hessgsf,
                                      eps=1e-4,
                                      iterations=10,
                                      name=name)

    def setKind(self):
        self.kind = "shape_GridSag"
コード例 #8
0
def SubtractDominantMotion(image1, image2):
    # Input:
    #   Images at time t and t+1
    #  Output:
    #   mask: [nxm]
    #  put your implementation here

    mask = np.zeros(image1.shape, dtype=bool)

    # M = LucasKanadeAffine.LucasKanadeAffine(image1, image2)
    # use efficient method
    M = InverseCompositionAffine.InverseCompositionAffine(image1, image2)

    if M.shape[0] < 3:
        M = np.vstack((M, np.array([[0, 0, 1]])))
    M = np.linalg.inv(M)

    interp_spline_image1 = RectBivariateSpline(np.arange(image1.shape[0]),
                                               np.arange(image1.shape[1]),
                                               image1)
    interp_spline_image2 = RectBivariateSpline(np.arange(image2.shape[0]),
                                               np.arange(image2.shape[1]),
                                               image2)
    # project coordinates to interpolate the values
    x = np.arange(0, image2.shape[1])
    y = np.arange(0, image2.shape[0])
    X, Y = np.meshgrid(x, y)
    X_ = M[0, 0] * X + M[0, 1] * Y + M[0, 2]
    Y_ = M[1, 0] * X + M[1, 1] * Y + M[1, 2]
    # get the invalid positions that are not common ares in two images
    invalid = (X_ < 0) | (X_ >=
                          image1.shape[1]) | (Y_ < 0) & (Y_ >= image1.shape[0])
    interped_I1 = interp_spline_image1.ev(Y_, X_)
    interped_I2 = interp_spline_image2.ev(Y, X)
    interped_I1[invalid] = 0
    interped_I2[invalid] = 0

    # calculate the difference
    diff = abs(interped_I2 - interped_I1)
    th = 0.1
    ind = (diff > th) & (interped_I2 != 0)
    mask[ind] = 1
    ker = np.array(([0, 0, 1, 0, 0], [0, 1, 1, 1,
                                      0], [1, 1, 1, 1,
                                           1], [0, 1, 1, 1,
                                                0], [0, 0, 1, 0, 0]))
    mask = mp.binary_dilation(mask, structure=ker).astype(mask.dtype)

    return mask
コード例 #9
0
def InverseCompositionAffine(It, It1, threshold, num_iters):
    """
    :param It: template image
    :param It1: Current image
    :param threshold: if the length of dp is smaller than the threshold, terminate the optimization
    :param num_iters: number of iterations of the optimization
    :return: M: the Affine warp matrix [2x3 numpy array]
    """

    # put your implementation here
    p = np.zeros(6)
    M = np.array([[1.0 + p[0], p[1], p[2]], [p[3], 1.0 + p[4], p[5]]])

    template_height, template_width = It.shape[0], It.shape[1]
    image_height, image_width = It1.shape[0], It1.shape[1]
    x_range = np.arange(0, template_width, 1)
    y_range = np.arange(0, template_height, 1)

    # get rectangular splines
    interpolated_template_spline = RectBivariateSpline(y_range, x_range, It)
    interpolated_current_image_spline = RectBivariateSpline(np.arange(0, image_height, 1),
                                                            np.arange(0, image_width, 1), It1)

    # get template grid
    xv, yv = np.meshgrid(x_range, y_range)
    xv, yv = xv.reshape(1, -1), yv.reshape(1, -1)
    template = np.vstack((xv, yv, np.ones((1, template_height * template_width))))
    grad_x = interpolated_current_image_spline.ev(yv, xv, dy=1).flatten()
    grad_y = interpolated_current_image_spline.ev(yv, xv, dx=1).flatten()
    jacobian_gd = np.vstack((xv * grad_x,
                             yv * grad_x,
                             grad_x,
                             xv * grad_y,
                             yv * grad_y,
                             grad_y))
    h = np.linalg.pinv(jacobian_gd)

    for i in range(num_iters):
        warped_image = np.dot(M, template)
        interpolated_template = interpolated_template_spline.ev(yv, xv).flatten()
        interpolated_current_image = interpolated_current_image_spline.ev(warped_image[1, :],
                                                                          warped_image[0, :]).flatten()
        difference = interpolated_template - interpolated_current_image
        dp = np.dot(h.T, difference)
        p += dp
        M = np.array([[1.0 + p[0], p[1], p[2]], [p[3], 1.0 + p[4], p[5]]])
        if np.linalg.norm(dp) <= threshold:
            break
    return M
コード例 #10
0
ファイル: brane.py プロジェクト: achael/scatterbrane
  def setModel(self,model,dx,think_positive=False):
    '''
    Set new model for the source.

    :param model: ``(n, n)``
      Numpy image array.
    :param dx: scalar
      Pixel size in microarcseconds.
    :param think_positive: (optional) bool
        Should we enforce that the source image has no negative pixel values?
    '''
    self.nx = int(ceil(model.shape[-1] * dx / self.dx))          # number of image pixels
    self.model = model                                           # source model
    self.model_dx = dx                                           # source model resolution

    # load source image that has size and resolution compatible with the screen.
    self.isrc = np.empty(2*(self.nx,))
    self.think_positive = think_positive

    M = self.model.shape[1]       # size of original image array
    f_img = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
                                self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
                                self.model)

    xx_,yy_ = np.meshgrid((np.arange(self.nx) - 0.5*(self.nx-1)),\
                          (np.arange(self.nx) - 0.5*(self.nx-1)),indexing='xy')

      
    m  = f_img.ev(yy_.flatten(),xx_.flatten()).reshape(2*(self.nx,))
    self.isrc  = m * (self.dx/self.model_dx)**2     # rescale for change in pixel size

    if self.think_positive:
      self.isrc[self.isrc < 0] = 0

    if not self.live_dangerously: self._checkSanity()
コード例 #11
0
    def transform(self, sourceImage, destinationImage):
        #  - Verify that both input arguments are numpy arrays,
        if not isinstance(sourceImage, np.ndarray) or not isinstance(
                destinationImage, np.ndarray):
            raise TypeError('One or more images not np arrays')

        #  - affine transformation from the triangle in the source image to its corresponding triangle

        # 1st step: create mask
        length, width = destinationImage.shape
        img = Image.new('L', (width, length), 0)
        vertices = [(x, y) for [x, y] in self.destination]
        ImageDraw.Draw(img).polygon(vertices, outline=255, fill=255)
        mask = np.array(img)

        # 2nd step: create H inverse and get bivariate spline approximation
        H_inv = np.linalg.inv(self.matrix)
        x = np.arange(length)
        y = np.arange(width)
        approx = RectBivariateSpline(x, y, sourceImage, kx=1, ky=1)

        #get each point in mask whose value is greater than 0
        for yp, xp in np.transpose(np.nonzero(mask)):
            #H^-1 * [x' y' 1] = [x y 1]
            P = H_inv.dot([xp, yp, 1])[0:2]

            #2d interpolation to obtain a value if x y are floats
            shade = np.round(approx.ev(P[1], P[0]))

            #assign that value to x' y'
            destinationImage[yp][xp] = shade
コード例 #12
0
ファイル: geoExtract.py プロジェクト: piMoll/SEILAPLAN
def calcAnker(IS, inputPoints, rasterdata, gp):
    """
    """
    dhm = rasterdata['subraster']
    [Xa, Ya, Xe, Ye] = inputPoints
    # Letzte Koordinate in xi/yi entspricht nicht exakt den Endkoordinaten
    Xe_ = gp['xi'][-1]
    Ye_ = gp['yi'][-1]

    AnkA_dist = IS['d_Anker_A'][0]
    AnkE_dist = IS['d_Anker_E'][0]
    stueA_H = IS['HM_Anfang'][0]
    stueE_H = IS['HM_Ende_max'][0]

    # X- und Y-Koordinate der Geodaten im Projektionssystem berechnen
    dx = float(Xe - Xa)
    dy = float(Ye - Ya)
    if dx == 0:
        dx = 0.0001
    azimut = math.atan(dy/dx)
    if dx > 0:
        azimut += 2 * math.pi
    else:
        azimut += math.pi
    # X- und Y-Koordinaten der beiden Ankerpunkte am Boden
    AnkXa = Xa - AnkA_dist * math.cos(azimut)
    AnkYa = Ya - AnkA_dist * math.sin(azimut)
    AnkXe = Xe_ + AnkE_dist * math.cos(azimut)
    AnkYe = Ye_ + AnkE_dist * math.sin(azimut)

    # Linear Interpolation
    # Koordinatenarrays des DHMs
    coordX = gp['linspaces'][0]
    coordY = gp['linspaces'][1]
    # kx, ky bezeichnen grad der interpolation, 1=linear
    spline = RectBivariateSpline(-coordY, coordX, dhm, kx=1, ky=1)
    xi = np.array([AnkXa, Xa, Xe_, AnkXe])
    yi = np.array([AnkYa, Ya, Ye_, AnkYe])
    # Z-Koordinate der Anker für Anfangs- und Endpunkte
    zAnker = spline.ev(-yi, xi)     # Höhenangaben am Boden

    AnkA_z = stueA_H + 0.1*(zAnker[1] - zAnker[0])
    AnkE_z = stueE_H + 0.1*(zAnker[2] - zAnker[3])

    if AnkA_dist == 0:
        AnkA_z = 0.0
    if AnkE_dist == 0:
        AnkE_z = 0.0

    Ank = [AnkA_dist, AnkA_z, AnkE_dist, AnkE_z]

    # Ausdehnungen der Anker Felder, alles in [m]
    #Ank = [d_Anker_A, z_Anker_A * 0.1, d_Anker_E, z_Anker_E * 0.1]
    Laenge_Ankerseil = (AnkA_dist**2 + AnkA_z**2)**0.5 + \
                       (AnkE_dist**2 + AnkE_z**2)**0.5

    # Eventuell nicht nötig
    #IS['z_Anker_A'][0] = z_Anker_A
    #IS['z_Anker_E'][0] = z_Anker_E
    return [Ank, Laenge_Ankerseil, zAnker]
コード例 #13
0
def skybg_phot(x0, y0, data, r=25, dr=5, samp=3, debug=False):

    # determine img indexes for aperture region
    xv, yv = mesh_box([x0, y0], (r + dr) + 2)

    # derive indexs on a higher resolution grid and create aperture mask
    px, py, mask = sky_annulus(x0, y0, r=r, samp=xv.shape[0] * samp)

    # interpolate original data onto higher resolution grid
    subdata = data[yv, xv]
    model = RectBivariateSpline(np.unique(xv), np.unique(yv), subdata)

    # evaluate data on highres grid
    pz = model.ev(px, py)

    # zero out pixels larger than radius
    pz[~mask] = 0
    pz[pz < 0] = 0

    # scale area back to original grid, total flux in sky annulus
    parea = pz.sum() * np.diff(px).mean() * np.diff(py[:, 0]).mean()

    if debug:
        print('mask area=',
              mask.sum() * np.diff(px).mean() * np.diff(py[:, 0]).mean())
        print('true area=', 2 * np.pi * r * dr)
        print('subdata flux=', subdata.sum())
        print('bg phot flux=', parea)
        import pdb
        pdb.set_trace()

    # return bg value per pixel
    return pz.sum() / mask.sum()
コード例 #14
0
ファイル: sweeps.py プロジェクト: lsolanka/ei-attractor
    def plot(self, ax, V=None, **kwargs):
        '''Plot the contours into matplotlib axis.

        Parameters
        ----------
        ax : matplotlib.Axes
            Axes to plot into
        V : array-like
            A list of contour values to plot. If not None, the internal contour
            values will be overriden during plotting, but not inside the
            object.
        kwargs : dict
            Keyword arguments to pass on to the ax.contour() method.
        '''
        if V is None:
            V = self.V
        d, X, Y = self.data.getData()
        # hack - add zero value to close contours
        d = np.hstack((d, np.zeros((d.shape[0], 1))))
        d = np.vstack((d, np.zeros((1, d.shape[1]))))
        dx = X[0, 1] - X[0, 0]
        dy = Y[1, 0] - Y[0, 0]
        x_longer = X[0, :].tolist()
        x_longer.append(X[0, -1] + dx)
        y_longer = Y[:, 0].tolist()
        y_longer.append(Y[-1, 0] + dy)
        x_interp, y_interp = np.meshgrid(
            np.linspace(x_longer[0], x_longer[-1],
                        len(x_longer) * self.upsample_factor),
            np.linspace(x_longer[0], y_longer[-1],
                        len(y_longer) * self.upsample_factor))
        spl = RectBivariateSpline(x_longer, y_longer, d.T)
        d_interp = spl.ev(x_interp, y_interp)
        ax.contour(x_interp, y_interp, d_interp, V, **kwargs)
コード例 #15
0
    def interpolate_individual(self, image):
        # unpacking
        ogridx, ogridy = self.ogrid
        ngridx, ngridy = self.ngrid

        f = RectBivariateSpline(ogridy, ogridx, image, kx=1, ky=1)
        return f.ev(ngridy.flatten(), ngridx.flatten()).reshape(ngridx.shape)
コード例 #16
0
def _mesh_interpolate_worker(args):
    coords, raster, crs, chunk_size = args
    raster = Raster(raster)
    raster.warp(crs)
    results = []
    for window in raster.iter_windows(chunk_size=chunk_size, overlap=2):
        xi = raster.get_x(window)
        yi = raster.get_y(window)
        zi = raster.get_values(window=window)
        f = RectBivariateSpline(
            xi,
            np.flip(yi),
            np.fliplr(zi).T,
            bbox=[np.min(xi), np.max(xi),
                  np.min(yi), np.max(yi)],
            kx=3,
            ky=3,
            s=0)
        idxs = np.where(
            np.logical_and(
                np.logical_and(
                    np.min(xi) < coords[:, 0],
                    np.max(xi) > coords[:, 0]),
                np.logical_and(
                    np.min(yi) < coords[:, 1],
                    np.max(yi) > coords[:, 1])))[0]

        values = f.ev(coords[idxs, 0], coords[idxs, 1])
        results.append((idxs, values))
    return results
コード例 #17
0
    def _get_above_cloud_r_and_dr(self,
                                  P_profile,
                                  T_profile,
                                  abundances,
                                  planet_mass,
                                  planet_radius,
                                  star_radius,
                                  above_cloud_cond,
                                  T_star=None):
        assert (len(P_profile) == len(T_profile))
        # First, get atmospheric weight profile
        mu_profile = np.zeros(len(P_profile))

        for species_name in abundances:
            interpolator = RectBivariateSpline(self.P_grid,
                                               self.T_grid,
                                               abundances[species_name],
                                               kx=1,
                                               ky=1)
            atm_abundances = interpolator.ev(P_profile, T_profile)
            mu_profile += atm_abundances * self.mass_data[species_name]

        return _hydrostatic_solver._solve(P_profile, T_profile,
                                          self.ref_pressure, mu_profile,
                                          planet_mass, planet_radius,
                                          star_radius, above_cloud_cond,
                                          T_star)
コード例 #18
0
def regrid_imagery(image,
                   x_image,
                   y_image,
                   x_regrid,
                   y_regrid,
                   image_proj,
                   regrid_proj,
                   spline_kws=None):
    """
    For a given image, regrid it to another projection using spline interpolation.

    Args:
        image:
        x_image:
        y_image:
        x_regrid:
        y_regrid:
        image_proj:
        regrid_proj:
        spline_kws:

    Returns:

    """
    if spline_kws is None:
        spline_kws = dict()
    x_regrid_image, y_regrid_image = transform(image_proj, regrid_proj,
                                               x_regrid.ravel(),
                                               y_regrid.ravel())
    rbs = RectBivariateSpline(x_image, y_image, image, **spline_kws)
    regridded_image = rbs.ev(x_regrid_image,
                             y_regrid_image).reshape(x_regrid.shape)
    return regridded_image
コード例 #19
0
    def get_pixels(self, DU, DV, image):
        """Return pixel values for each xyz point from the image

        Arguments:
            DU (np.ndarray): Pixel location in camera orientation and coordinate system
            DV (np.ndarray): Pixel location in cmaera orientation and coorindate system

        Returns:
            K (np.ndarray): Pixel intensity for each point in the image
        """
        K = np.zeros((self.target_grid.X.shape[0], self.target_grid.X.shape[1],
                      self.ncolors))
        for c, _ in enumerate(['r', 'b', 'g']):
            rbs = RectBivariateSpline(
                # use this range to match matlab exactly
                np.arange(1, image.shape[0] + 1),
                np.arange(1, image.shape[1] + 1),
                image[:, :, c],
                kx=1,
                ky=1)
            K[:, :, c] = rbs.ev(DV, DU)

        # mask out values out of range like matlab
        # avoid runtime nan comparison warning (UV, DV already have nans)
        with np.errstate(invalid='ignore'):
            mask_u = np.logical_or(DU <= 1, DU >= image.shape[1])
            mask_v = np.logical_or(DV <= 1, DV >= image.shape[0])
        mask = np.logical_or(mask_u, mask_v)
        K[mask] = np.nan

        return K
コード例 #20
0
def get_elevations(subpaths, ufnames, flagsmissing):
    interpvalues = np.empty(0, dtype=int)
    for i in range(len(subpaths)):
        # open srtmdata filename according to subpath
        path = Path.joinpath(hgtpath, ufnames[i])
        if not flagsmissing[i]:
            with open(path, 'rb') as data:
                # data is stored in srtmdata files in big-endian format:
                # SRTM1 (1-arc sampling) is used, so each file contains 3601x3601 individual elevations
                # after reading all elevatin ins file, we reshape it into a rectangular numpy array
                elevationdata = np.fromfile(data, np.dtype('>i2'),
                                            3601**2).reshape(3601, 3601)

            # interpolate elevation values with scipy.interpolate.griddata
            f1 = RectBivariateSpline(x, x, elevationdata, kx=1, ky=1)
            latcorr = int(ufnames[i][1:3]) + 1
            loncorr = int(ufnames[i][4:7])
            interpvalues = np.hstack((interpvalues,
                                      f1.ev(latcorr - subpaths[i][0],
                                            subpaths[i][1] - loncorr)))
        else:
            interpvalues = np.hstack(
                (interpvalues, np.zeros_like(subpaths[i][0])))

    return interpvalues
コード例 #21
0
def put_psf_on_subarray(psf, y, frame_height=256):
    """Make a 2D SOSS trace from a sequence of psfs and trace center locations

    Parameters
    ----------
    psf: sequence
        The 2D psf
    y: float
        The grid y value to place the center of the psf
    grid: sequence
        The [x, y] grid ranges

    Returns
    -------
    np.ndarray
        The 2D frame with the interpolated psf
    """
    # Create spline generator
    dim = psf.shape[0]
    mid = (dim - 1.0) / 2.0
    arr = np.arange(dim, dtype=np.float)
    spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)

    # Create output frame, shifted as necessary
    yg, xg = np.indices((frame_height, dim), dtype=np.float64)
    yg += mid - y

    # Resample onto the subarray
    frame = spline.ev(xg, yg)

    # Fill resampled points with zeros
    extrapol = (((xg < -0.5) | (xg >= dim - 0.5)) | ((yg < -0.5) | (yg >= dim - 0.5)))
    frame[extrapol] = 0

    return frame
コード例 #22
0
    def Transformation(self, originalImage, targetImage, Target,
                       original):  # this is image
        # print(targetimage)
        # print(xp,yp)
        #NbyTwo[:, [0, 1]] = NbyTwo[:, [1, 0]]
        NbyTwo = Target.getPoints()
        #getCoord = np.vectorize(lambda x, y, a: self.inverseMatrix[a, 0] * y + self.inverseMatrix[a, 1] * x + self.inverseMatrix[a, 2], otypes=[np.float64])
        #NbyTwo[:, [1, 0]] = NbyTwo[:, [0, 1]]
        xp, yp = np.transpose(
            NbyTwo
        )  ## this will return the points reside in one triangle, in morpher
        x = self.calcXPoints(xp, yp)
        y = self.calcYPoints(xp, yp)
        # xdim = np.array([a for a in range(int(min(original.vertices[:, 1])), int(max(original.vertices[:, 1])))])
        # # print(len(xdim))
        # ydim = np.array([a for a in range(int(min(original.vertices[:, 0])), int(max(original.vertices[:, 0])))])
        #
        # xyVal = originalImage[xdim[0]:xdim[len(xdim) - 1]+1, ydim[0]:ydim[len(ydim) - 1]+1]

        xdim = np.arange(np.amin(original.vertices[:, 1]),
                         np.amax(original.vertices[:, 1]), 1)
        ydim = np.arange(np.amin(original.vertices[:, 0]),
                         np.amax(original.vertices[:, 0]), 1)
        xyVal = originalImage[int(xdim[0]):int(xdim[-1] + 1),
                              int(ydim[0]):int(ydim[-1] + 1)]
        bilinear = RectBivariateSpline(xdim, ydim, xyVal, kx=1, ky=1)
        targetImage[xp, yp] = bilinear.ev(x, y)
コード例 #23
0
    def _get_above_cloud_profiles(self, P_profile, T_profile, abundances,
                                  planet_mass, planet_radius, star_radius,
                                  above_cloud_cond, T_star=None):
        
        assert(len(P_profile) == len(T_profile))
        # First, get atmospheric weight profile
        mu_profile = np.zeros(len(P_profile))
        atm_abundances = {}
        
        for species_name in abundances:
            interpolator = RectBivariateSpline(
                np.log10(self.P_grid), self.T_grid,
                np.log10(abundances[species_name]), kx=1, ky=1)
            abund = 10**interpolator.ev(np.log10(P_profile), T_profile)
            atm_abundances[species_name] = abund
            mu_profile += abund * self.mass_data[species_name]

        radii, dr = _hydrostatic_solver._solve(
            P_profile, T_profile, self.ref_pressure, mu_profile, planet_mass,
            planet_radius, star_radius, above_cloud_cond, T_star)
        
        for key in atm_abundances:
            atm_abundances[key] = atm_abundances[key][above_cloud_cond]
            
        return radii, dr, atm_abundances, mu_profile
コード例 #24
0
def get_subpixel(res):
    mgx, mgy = np.meshgrid(np.arange(-1, 1.01, 0.1),
                           np.arange(-1, 1.01, 0.1),
                           indexing='xy')  # sub-pixel mesh

    minval, _, minloc, _ = cv2.minMaxLoc(res)
    rbs_halfsize = 3  # size of peak area used for spline for subpixel peak loc
    rbs_order = 4  # polynomial order for subpixel rbs interpolation of peak location

    if ((np.array([n - rbs_halfsize
                   for n in minloc]) >= np.array([0, 0])).all()
            & (np.array([(n + rbs_halfsize)
                         for n in minloc]) < np.array(list(res.shape))).all()):
        rbs_p = RBS(
            range(-rbs_halfsize, rbs_halfsize + 1),
            range(-rbs_halfsize, rbs_halfsize + 1),
            res[(minloc[1] - rbs_halfsize):(minloc[1] + rbs_halfsize + 1),
                (minloc[0] - rbs_halfsize):(minloc[0] + rbs_halfsize + 1)],
            kx=rbs_order,
            ky=rbs_order)

        b = rbs_p.ev(mgx.flatten(), mgy.flatten())
        mml = cv2.minMaxLoc(b.reshape(21, 21))
        # mgx,mgy: meshgrid x,y of common area
        # sp_delx,sp_dely: subpixel delx,dely
        sp_delx = mgx[mml[3][0], mml[3][1]]
        sp_dely = mgy[mml[3][0], mml[3][1]]
    else:
        sp_delx = 0.0
        sp_dely = 0.0
    return sp_delx, sp_dely
コード例 #25
0
def imresize2D(im0, h0, dim):

    h0 = h0[0:2]
    dim = dim[0:2]

    # Image size
    dim0 = im0.shape

    # Create a regular grid
    x0, y0, x0lin, y0lin = gengrid.centered2D(dim0, h0)

    # Require the same FOV
    fov0 = dim0 * h0

    # New voxelsize
    h = fov0 / dim

    # Create a regular grid where we want the new values
    x, y, xlin, ylin = gengrid.centered2D(dim, h)

    # Interpolation
    f = RectBivariateSpline(x0lin, y0lin, im0)
    x = x.flatten()
    y = y.flatten()

    # Must revert the order of x and y, otherwise its rotated
    im = f.ev(y, x).reshape(dim[0], dim[1])

    return im
コード例 #26
0
    def _p(self, K, c):
        if self.params['use_interp']:
            data_path = os.path.join(os.path.dirname(__file__), 'data')
            data = np.load(os.path.join(data_path, "uKc_einasto.npz"))

            pk = data['pk']
            _k = data['K']
            _c = data['c']

            c = np.atleast_1d(c)
            if np.isscalar(K):
                K = np.atleast_2d(K)
            if K.ndim < 2:
                if len(K) != len(c):
                    K = np.atleast_2d(K).T  # should be len(rs) x len(k)
                else:
                    K = np.atleast_2d(K)
            pk[pk <= 0] = 1e-8

            spl = RectBivariateSpline(np.log(_k), np.log(_c), np.log(pk))
            cc = np.repeat(c, K.shape[0])
            return np.exp(
                self._reduce(
                    spl.ev(np.log(K.flatten()), np.log(cc)).reshape(K.shape)))
        else:  #Numerical version.
            return super(Einasto, self)._p(K, c)
コード例 #27
0
    def test_spline_2d_outofbounds(self):
        x = np.array([.5, 2., 3., 4., 5.5])
        y = np.array([.5, 2., 3., 4., 5.5])
        z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
                      [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
        lut = RectBivariateSpline(x, y, z)

        xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
                       [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
        actual = interpn((x, y),
                         z,
                         xi,
                         method="splinef2d",
                         bounds_error=False,
                         fill_value=999.99)
        expected = lut.ev(xi[:, 0], xi[:, 1])
        expected[2:4] = 999.99
        assert_array_almost_equal(actual, expected)

        # no extrapolation for splinef2d
        assert_raises(ValueError,
                      interpn, (x, y),
                      z,
                      xi,
                      method="splinef2d",
                      bounds_error=False,
                      fill_value=None)
コード例 #28
0
ファイル: __init__.py プロジェクト: woozey/pycrystem
def correlate(image,
              pattern,
              include_direct_beam=False,
              sim_threshold=1e-5,
              interpolate=False,
              **kwargs):
    """The correlation between a diffraction pattern and a simulation.
    Calculated using
        .. math::
            \frac{\sum_{j=1}^m P(x_j, y_j) T(x_j, y_j)}{\sqrt{\sum_{j=1}^m P^2(x_j, y_j)} \sqrt{\sum_{j=1}^m T^2(x_j, y_j)}}
    Parameters
    ----------
    image : :class:`np.ndarray`
        A single electron diffraction signal. Should be appropriately scaled
        and centered.
    pattern : :class:`DiffractionSimulation`
        The pattern to compare to.
    sim_threshold : float
        The threshold simulation intensity to consider for correlation
    interpolate : bool
        If True, perform sub-pixel interpolation of the image.
    **kwargs
        Arguments to pass to scipy.interpolate.RectBivariateSpline
    Returns
    -------
    float
        The correlation coefficient.
    References
    ----------
    E. F. Rauch and L. Dupuy, “Rapid Diffraction Patterns identification through
        template matching,” vol. 50, no. 1, pp. 87–99, 2005.
    """
    shape = image.shape
    half_shape = tuple(i // 2 for i in shape)

    pixel_coordinates = pattern.calibrated_coordinates.astype(
        int)[:, :2] + half_shape
    in_bounds = np.product(
        (pixel_coordinates > 0) * (pixel_coordinates < shape[0]),
        axis=1).astype(bool)
    pattern_intensities = pattern.intensities
    large_intensities = pattern_intensities > sim_threshold
    mask = np.logical_and(in_bounds, large_intensities)

    if interpolate:
        x = np.arange(shape[0], dtype='float') - half_shape[0]
        y = np.arange(shape[1], dtype='float') - half_shape[1]
        for ar, i in zip([x, y], shape):
            if not i % 2:
                ar += 0.5
        x = x * pattern.calibration[0]
        y = y * pattern.calibration[1]
        ip = RectBivariateSpline(x, y, image.T, **kwargs)
        image_intensities = ip.ev(pattern.coordinates[:, 0][mask],
                                  pattern.coordinates[:, 1][mask])
    else:
        image_intensities = image.T[pixel_coordinates[:, 0][in_bounds],
                                    pixel_coordinates[:, 1][in_bounds]]
    pattern_intensities = pattern_intensities[mask]
    return np.nan_to_num(_correlate(image_intensities, pattern_intensities))
コード例 #29
0
def SubtractDominantMotion(image1, image2):
    # Input:
    #	Images at time t and t+1
    # Output:
    #	mask: [nxm]
    # put your implementation here
    M = InverseCompositionAffine(image1, image2)
    tol = 0.35

    h, w = image1.shape
    x = np.arange(w)
    y = np.arange(h)
    mask = np.ones((h, w), dtype=bool)

    x_temp = np.arange(0, w)
    y_temp = np.arange(0, h)
    xi, yi = np.meshgrid(x_temp, y_temp)
    ones = np.ones(w * h)
    xy1 = np.array([yi.flatten(), xi.flatten(), ones])

    xy1_new = np.matmul(np.vstack((M, np.array([0, 0, 1]))), xy1)
    y_new = xy1_new[0, :]
    x_new = xy1_new[1, :]

    image2_spline = RectBivariateSpline(y, x, image2)
    temp = np.array(image2_spline.ev(y_new, x_new).tolist())
    image1_new = np.reshape(temp, (h, w))

    abs_diff = np.absolute(image1_new, image1)
    ind = (abs_diff >= tol)

    abs_diff[ind] = 1
    abs_diff[~ind] = 0
    mask = abs_diff
    return mask
コード例 #30
0
def distort(image, center, angle, lengths):
    """Takes an image and distorts the image based on an elliptical distortion

    Parameters
    ---------------
    image: array-like
        The image to apply the elliptical distortion to
    center: list
        The center of the ellipse
    angle: float
        The angle of the major axis in radians
    lengths: The lengths of the major and minor axis of the ellipse

    Returns
    ------------
    distorted:array-like
        The elliptically distorted image
    """
    img_shape = np.shape(image)
    initial_y, initial_x = range(-center[1], img_shape[-2] - center[1]), range(
        -center[0], img_shape[-1] - center[0])
    spline = RectBivariateSpline(initial_x, initial_y, image, kx=1, ky=1)
    xInd, yInd = cartesian_to_ellipse(center=center,
                                      angle=angle,
                                      lengths=lengths)
    distorted = np.array(spline.ev(yInd, xInd))
    return distorted
コード例 #31
0
 def interpolate_grid(self, in_lon, in_lat):
     """
     Interpolates MRMS data to a different grid using cubic bivariate splines
     """
     out_data = np.zeros(
         (self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))
     for d in range(self.data.shape[0]):
         print("Loading ", d, self.variable, self.start_date)
         if self.data[d].max() > -999:
             step = self.data[d]
             step[step < 0] = 0
             if self.lat[-1] < self.lat[0]:
                 spline = RectBivariateSpline(self.lat[::-1],
                                              self.lon,
                                              step[::-1],
                                              kx=3,
                                              ky=3)
             else:
                 spline = RectBivariateSpline(self.lat,
                                              self.lon,
                                              step,
                                              kx=3,
                                              ky=3)
             print("Evaluating", d, self.variable, self.start_date)
             flat_data = spline.ev(in_lat.ravel(), in_lon.ravel())
             out_data[d] = flat_data.reshape(in_lon.shape)
             del spline
         else:
             print(d, " is missing")
             out_data[d] = -9999
     return out_data
コード例 #32
0
def main():

    basis = fits.open('basis.fits')[0].data
    aperture = fits.open('aperture.fits')[0].data

    name = sys.argv[1]
    size = basis.shape[1]
    size_ = int(sys.argv[2])

    x_old_res = np.linspace(-size / 2, size / 2, size)
    y_old_res = np.linspace(-size / 2, size / 2, size)

    x_new_res = np.linspace(-size / 2, size / 2, size_)
    y_new_res = np.linspace(-size / 2, size / 2, size_)

    abscissae_lr, ordinates_lr = np.meshgrid(x_old_res, y_old_res)
    abscissae_nr, ordinates_nr = np.meshgrid(x_new_res, y_new_res)

    basis_new = np.zeros((basis.shape[0], size_, size_))
    for i in range(basis.shape[0]):
        spline_basis_functions = RectBivariateSpline(x_old_res, y_old_res,
                                                     basis[i])
        basis_new[i] = spline_basis_functions.ev(ordinates_nr,
                                                 abscissae_nr) * aperture

    writefits(basis_new, name)
コード例 #33
0
ファイル: util.py プロジェクト: oxford-pcs/psf-simulator
def resample2d(i_data, i_s, i_e, i_i, o_s, o_e, o_i, kx=3, ky=3, s=0, 
               gauss_sig=0, median_boxcar_size=0, clip=True):
  '''
    Resample a square 2D input grid with extents defined by [i_s] and [i_e] with 
    increment [i_i] to a new 2D grid with extents defined by [o_s] and [o_e] 
    with increment [o_i].
    
    Returns a 2D resampled array, with options for smoothing (gaussian and 
    median) and clipping.
  '''
  
  # calculate bivariate spline, G, using input grid and data
  grid_pre_rebin = np.arange(i_s, i_e, i_i)
  G = RectBivariateSpline(grid_pre_rebin, grid_pre_rebin, i_data, kx=kx, ky=ky)

  # evaluate this spline at new points on output grid
  grid_x, grid_y = np.mgrid[o_s:o_e:o_i, o_s:o_e:o_i]

  data = G.ev(grid_x, grid_y)
  
  if gauss_sig != 0:
    data = gaussian_filter(data, gauss_sig)
    
  if median_boxcar_size != 0:
    data = median_filter(data, median_boxcar_size)
    
  if clip:
    input_max = np.max(i_data)
    input_min = np.min(i_data)
    
    data[np.where(data>input_max)] = input_max
    data[np.where(data<input_min)] = input_min

  return data
コード例 #34
0
    def _interpolate_array(self, target) -> np.array:
        """
        Interpolates SLSTR data arrays based on cartesian information
        contained within the sensor product using the RectBivariateSpline
        approach.

        Args:
            target: the product to be interpolated

        Returns:
            The interpolated data
        """
        sat_zn = self.product['geometry_tn'][target][:]

        tx_x_var = self.product['cartesian_tx']['x_tx'][0, :]
        tx_y_var = self.product['cartesian_tx']['y_tx'][:, 0]

        an_x_var = self.product['cartesian_an']['x_an'][:]
        an_y_var = self.product['cartesian_an']['y_an'][:]

        spl = RectBivariateSpline(tx_y_var, tx_x_var[::-1],
                                  sat_zn[:, ::-1].filled(0))
        interpolated = spl.ev(an_y_var.compressed(), an_x_var.compressed())
        interpolated = np.ma.masked_invalid(interpolated, copy=False)
        sat = np.ma.empty(an_y_var.shape, dtype=sat_zn.dtype)
        sat[np.logical_not(np.ma.getmaskarray(an_y_var))] = interpolated
        sat.mask = an_y_var.mask
        return sat
コード例 #35
0
ファイル: imagetransform.py プロジェクト: neurokernel/retina
    def interpolate_individual(self, image):
        # unpacking
        ogridx, ogridy = self.ogrid
        ngridx, ngridy = self.ngrid

        f = RectBivariateSpline(ogridy, ogridx, image, kx=1, ky=1)
        return f.ev(ngridy.flatten(), ngridx.flatten()).reshape(ngridx.shape)
コード例 #36
0
def LucasKanade(It, It1, rect, p0=np.zeros(2)):
    # Input:
    #	It: template image
    #	It1: Current image
    #	rect: Current position of the car
    #	(top left, bot right coordinates)
    #	p0: Initial movement vector [dp_x0, dp_y0]
    # Output:
    #	p: movement vector [dp_x, dp_y]

    # Put your implementation here
    p = p0

    x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
    threshold = 1e-3
    dp = np.array([float("inf"), float("inf")])
    interpolated_It = RectBivariateSpline(
        x=np.array([i for i in range(int(It.shape[0]))]),
        y=np.array([i for i in range(int(It.shape[1]))]),
        z=It)
    interpolated_It1 = RectBivariateSpline(
        x=np.array([i for i in range(int(It1.shape[0]))]),
        y=np.array([i for i in range(int(It1.shape[1]))]),
        z=It1)

    while np.sum(np.square(dp)) >= threshold:
        warp_x = np.arange(x1 + p[0], x2 + p[0] + .5)
        warp_y = np.arange(y1 + p[1], y2 + p[1] + .5)
        warp_X = np.array([warp_x for i in range(len(warp_y))])
        warp_Y = np.array([warp_y for i in range(len(warp_x))]).T
        warp_It1x = interpolated_It1.ev(warp_Y, warp_X)

        x = np.arange(x1, x2 + .5)
        y = np.arange(y1, y2 + .5)
        X = np.array([x for i in range(len(y))])
        Y = np.array([y for i in range(len(x))]).T
        Itx = interpolated_It.ev(Y, X)

        A = np.array([
            interpolated_It1.ev(warp_Y, warp_X, dx=0, dy=1).flatten(),
            interpolated_It1.ev(warp_Y, warp_X, dx=1, dy=0).flatten()
        ]).T
        b = (Itx - warp_It1x).flatten()

        dp = np.dot(np.linalg.inv(np.dot(A.T, A)), np.dot(A.T, b))
        p += dp
    return p
コード例 #37
0
def InverseCompositionAffine(It, It1, threshold, num_iters):
    """
    :param It: template image
    :param It1: Current image
    :param threshold: if the length of dp is smaller than the threshold, terminate the optimization
    :param num_iters: number of iterations of the optimization
    :return: M: the Affine warp matrix [3x3 numpy array]
    """

    # put your implementation here
    M = np.eye(3)
    H0, W0 = It.shape
    H1, W1 = It1.shape

    It_interpolate = RectBivariateSpline(np.arange(0, H0, 1), np.arange(0, W0, 1), It)
    It1_interpolate = RectBivariateSpline(np.arange(0, H1, 1), np.arange(0, W1, 1), It1)

    # Gradient of warpped It
    x_warp, y_warp = np.meshgrid(np.arange(0, W0, 1), np.arange(0, H0, 1))
    delta_I = np.array([It_interpolate.ev(y_warp, x_warp, dx=0, dy=1).flatten(),
                        It_interpolate.ev(y_warp, x_warp, dx=1, dy=0).flatten()]).T
    A_origin = np.array([delta_I[:, 0] * x_warp.flatten(), delta_I[:, 0] * y_warp.flatten(), delta_I[:, 0], delta_I[:, 1] * x_warp.flatten(), delta_I[:, 1] * y_warp.flatten(), delta_I[:, 1]]).T

    for _ in range(int(num_iters)):
        # Warp It+1
        x1, y1 = np.meshgrid(np.arange(0, W1, 1), np.arange(0, H1, 1))
        x1_warp = M[0, 0] * x1 + M[0, 1] * y1 + M[0, 2]
        y1_warp = M[1, 0] * x1 + M[1, 1] * y1 + M[1, 2]

        # Only get common region for It and warpped It+1
        overlap = (0 <= x1_warp) & (x1_warp < W1) & (0 <= y1_warp) & (y1_warp < H1)
        x1_warp = x1_warp[overlap]
        y1_warp = y1_warp[overlap]
        It1_warp = It1_interpolate.ev(y1_warp, x1_warp)

        A = A_origin[overlap.flatten()]
        b = It1_warp - It[overlap]
        delta_p = np.dot(np.dot(np.linalg.inv(np.dot(A.T, A)), A.T), b.flatten())
        delta_M = np.concatenate((delta_p, np.array([0, 0, 1]))).reshape((3, 3))
        delta_M[0, 0] += 1
        delta_M[1, 1] += 1
        M = np.dot(M, np.linalg.inv(delta_M))

        if np.linalg.norm(delta_p) < threshold:
            break

    return M
コード例 #38
0
def getStraightenWormInt(worm_img, skeleton, half_width = -1, cnt_widths  = np.zeros(0), width_resampling = 7, ang_smooth_win = 12, length_resampling = 49):
    '''
        Code to straighten the worm worms.
        worm_image - image containing the worm
        skeleton - worm skeleton
        half_width - half width of the worm, if it is -1 it would try to calculated from cnt_widths
        cnt_widths - contour widths used in case the half width is not given
        width_resampling - number of data points used in the intensity map along the worm width
        length_resampling - number of data points used in the intensity map along the worm length
        ang_smooth_win - window used to calculate the skeleton angles. 
            A small value will introduce noise, therefore obtaining bad perpendicular segments.
            A large value will over smooth the skeleton, therefore not capturing the correct shape.
        
    '''
    #if np.all(np.isnan(skeleton)):
    #    buff = np.empty((skeleton.shape[0], width_resampling))
    #    buff.fill(np.nan)
    #    return buff
    assert half_width>0 or cnt_widths.size>0
    assert not np.any(np.isnan(skeleton))
    
    if ang_smooth_win%2 == 1:
        ang_smooth_win += 1; 
    
    if skeleton.shape[0] != length_resampling:
        skeleton, _ = curvspace(np.ascontiguousarray(skeleton), length_resampling)
    
    skelX = skeleton[:,0];
    skelY = skeleton[:,1];
    
    assert np.max(skelX) < worm_img.shape[0]
    assert np.max(skelY) < worm_img.shape[1]
    assert np.min(skelY) >= 0
    assert np.min(skelY) >= 0
    
    #calculate smoothed angles
    skel_angles = angleSmoothed(skelX, skelY, ang_smooth_win)
    
    #%get the perpendicular angles to define line scans (orientation doesn't
    #%matter here so subtracting pi/2 should always work)
    perp_angles = skel_angles - np.pi/2;
    
    #%for each skeleton point get the coordinates for two line scans: one in the
    #%positive direction along perpAngles and one in the negative direction (use
    #%two that both start on skeleton so that the intensities are the same in
    #%the line scan)
    
    #resample the points along the worm width
    if half_width <= 0:
        half_width = (np.median(cnt_widths[10:-10])/2.) #add half a pixel to get part of the contour
    r_ind = np.linspace(-half_width, half_width, width_resampling)
    
    #create the grid of points to be interpolated (make use of numpy implicit broadcasting Nx1 + 1xM = NxM)
    grid_x = skelX + r_ind[:, np.newaxis]*np.cos(perp_angles);
    grid_y = skelY + r_ind[:, np.newaxis]*np.sin(perp_angles);
    
    
    f = RectBivariateSpline(np.arange(worm_img.shape[0]), np.arange(worm_img.shape[1]), worm_img)
    return f.ev(grid_y, grid_x) #return interpolated intensity map
コード例 #39
0
class SplineEstimator(object):

    def fit(self, x, y):
        self.lut = RectBivariateSpline(x[1], x[0], y)
        return self

    def predict(self, X):
        return self.lut.ev(X[:, 1], X[:, 0])
コード例 #40
0
    def getEroDep(self, xo = None, yo = None, xm = None, ym = None,
                    pts = 100, gfilter = 5):
        """
        Extract a slice from the 3D data set and compute its deposition thicknesses.
        Parameters
        ----------
        variable: xo, yo
            Lower X,Y coordinates of the cross-section
        variable: xm, ym
            Upper X,Y coordinates of the cross-section
        variable: pts
            Number of points to discretise the cross-section
        variable: gfilter
            Gaussian smoothing filter
        """

        if xm > self.x.max():
            xm = self.x.max()

        if ym > self.y.max():
            ym = self.y.max()

        if xo < self.x.min():
            xo = self.x.min()

        if yo < self.y.min():
            yo = self.y.min()


        xsec, ysec = self._cross_section(xo, yo, xm, ym, pts)
        self.dist = np.sqrt(( xsec - xo )**2 + ( ysec - yo )**2)

        # Surface
        rect_B_spline = RectBivariateSpline(self.y[:,0], self.x[0,:], self.z)
        datatop = rect_B_spline.ev(ysec, xsec)
        self.top = filters.gaussian_filter1d(datatop,sigma=gfilter)

        # Cumchange
        rect_B_spline = RectBivariateSpline(self.y[:,0], self.x[0,:], self.cumchange)
        cumdat = rect_B_spline.ev(ysec, xsec)
        gcum = filters.gaussian_filter1d(cumdat,sigma=gfilter)
        self.depo = gcum.clip(min=0)

        return
コード例 #41
0
ファイル: psi_map.py プロジェクト: pyIPP/trgui
    def __init__(self, R_in, z_in, Raxis, zaxis, psi_in, R_out, z_out, psi_sep=0):

        print('2d interp')

        self.error = 0

# Check input dimensions, R_out z_out must be flat

        if len(R_out) != len(z_out):
            print('R and z must have the same dimensions')
            self.error = 1
            return
        if np.array(R_out).ndim > 1:
            print('R_out must be flat')
            self.error = 2
            return
        if np.array(z_out).ndim > 1:
            print('z_out must be flat')
            self.error = 3
            return
            
        nz_psi, nR_psi = psi_in.shape
        if len(R_in) != nR_psi:
            print('Inconsistent R axis for psi_in')
            self.error = 5
            return
        if len(z_in) != nz_psi:
            print('Inconsistent z axis for psi_in')
            self.error = 6
            return

        nRz = len(R_out)
        self.psi_red = np.zeros(nRz)

# Bilinear interpolation

        bisp = RectBivariateSpline(z_in, R_in, psi_in)
        self.psi_axis = bisp.ev(zaxis, Raxis)
        for jRz, R in enumerate(R_out):
            z = z_out[jRz]
            self.psi_red[jRz] = bisp.ev(z, R)

        self.psi_norm = (self.psi_red - self.psi_axis)/(psi_sep - self.psi_axis)
        self.rho_pol = np.sqrt(self.psi_norm)
コード例 #42
0
ファイル: simulations.py プロジェクト: wmorning/EvilLens
def assign_phases_to_antennas(ant1,ant2,antX,antY,PhaseGrid,phase_x,phase_y,velocity,time):
    '''
    Given antenna IDs, coordinates, and the phase grid and its coordinates:  Translate
    the antennas across the grid and record the phase for each antenna.  Returns a 1D array
    of antenna phases (uncalibrated) for the 1st and 2nd antenna in each observation
    '''
    
    f_interp = RectBivariateSpline(phase_y,phase_x,PhaseGrid,kx=1,ky=1)
    antenna1_phase = f_interp.ev(antY[ant1],antX[ant1]+velocity*time)
    antenna2_phase = f_interp.ev(antY[ant2],antX[ant2]+velocity*time)
    
    # Also want to get a 2d array of the phase of each antenna with time, to ease with calibration
    Ntsteps = len(time)/(len(antX)*(len(antX)-1)/2)
    antennaphases = np.zeros([len(antX),Ntsteps],float)
    tstepsize = np.unique(time)[1]-np.unique(time[0])
    for i in range(len(antX)):
        antennaphases[i,:] = f_interp.ev(antY[i]*np.ones(Ntsteps),antX[i]+tstepsize*velocity*np.arange(Ntsteps))
    
    return antenna1_phase,antenna2_phase,antennaphases
コード例 #43
0
ファイル: brane.py プロジェクト: achael/scatterbrane
  def scatter(self,move_pix=0,scale=1):
    '''
    Generate the scattered image which is stored in the ``iss`` member.

    :param move_pix: (optional) int 
      Number of pixels to roll the screen (for time evolution).
    :param scale: (optional) scalar
      Scale factor for gradient.  To simulate the scattering effect at another 
      wavelength this is (lambda_new/lambda_old)**2
    '''

    M = self.model.shape[-1]       # size of original image array
    N = self.nx                    # size of output image array

    #if not self.live_dangerously: self._checkSanity()

    # calculate phase gradient
    dphi_x,dphi_y = self._calculate_dphi(move_pix=move_pix)

    if scale != 1:
        dphi_x *= scale/sqrt(2.)
        dphi_y *= scale/sqrt(2.)

    xx_,yy = np.meshgrid((np.arange(N) - 0.5*(N-1)),\
                         (np.arange(N) - 0.5*(N-1)),indexing='xy')

    # check whether we care about PA of scattering kernel
    if self.pa != None:
      f_model = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
                                    self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
                                    self.model)

      # apply rotation
      theta = -(90 * pi / 180) + np.radians(self.pa)     # rotate CW 90 deg, then CCW by PA
      xx_ += dphi_x
      yy  += dphi_y
      xx = cos(theta)*xx_ - sin(theta)*yy
      yy = sin(theta)*xx_ + cos(theta)*yy
      self.iss  = f_model.ev(yy.flatten(),xx.flatten()).reshape((self.nx,self.nx))

      # rotate back and clip for positive values for I
      if self.think_positive:
          self.iss  = clip(rotate(self.iss,-1*theta/np.pi*180,reshape=False),a_min=0,a_max=1e30) * (self.dx/self.model_dx)**2
      else:
          self.iss  = rotate(self.iss,-1*theta/np.pi*180,reshape=False) * (self.dx/self.model_dx)**2

    # otherwise do a faster lookup rather than the expensive interpolation.
    else:
      yyi = np.rint((yy+dphi_y+self.nx/2)).astype(np.int) % self.nx
      xxi = np.rint((xx_+dphi_x+self.nx/2)).astype(np.int) % self.nx
      if self.think_positive:
        self.iss = clip(self.isrc[yyi,xxi],a_min=0,a_max=1e30)
      else:
        self.iss = self.isrc[yyi,xxi]
コード例 #44
0
 def x_sig( self, x, sigma ):
     eps_list, mu_q = self.spirrid_response
     eps_sig = InterpolatedUnivariateSpline( mu_q[0, :], eps_list[1] )
     if max( mu_q ) > sigma:
         pass
     else:
         raise ValueError( 'applied stress higher than the maximum in micromechanical evaluation of a CB' )
     eps = eps_sig( sigma )
     spline = RectBivariateSpline( eps_list[0], eps_list[1], mu_q )
     sigma_f = spline.ev( x, ones( len( x ) ) * eps ) / self.V_f
     sigma_m = ( sigma - sigma_f * self.V_f ) / self.V_m
     return sigma_m
コード例 #45
0
def kde_histogram(events_x, events_y, xout=None, yout=None, bins=None):
    """ Histogram-based Kernel Density Estimation

    Parameters
    ----------
    events_x, events_y: 1D ndarray
        The input points for kernel density estimation. Input
        is flattened automatically.
    xout, yout: ndarray
        The coordinates at which the KDE should be computed.
        If set to none, input coordinates are used.
    bins: tuple (binsx, binsy)
        The number of bins to use for the histogram.

    Returns
    -------
    density: ndarray, same shape as `xout`
        The KDE for the points in (xout, yout)

    See Also
    --------
    `numpy.histogram2d`
    `scipy.interpolate.RectBivariateSpline`
    """
    valid_combi = ((xout is None and yout is None) or
                   (xout is not None and yout is not None)
                   )
    if not valid_combi:
        raise ValueError("Both `xout` and `yout` must be (un)set.")

    if yout is None and yout is None:
        xout = events_x
        yout = events_y

    if bins is None:
        bins = (max(5, bin_num_doane(events_x)),
                max(5, bin_num_doane(events_y)))

    # Compute the histogram
    hist2d, xedges, yedges = np.histogram2d(x=events_x,
                                            y=events_y,
                                            bins=bins,
                                            normed=True)
    xip = xedges[1:]-(xedges[1]-xedges[0])/2
    yip = yedges[1:]-(yedges[1]-yedges[0])/2

    estimator = RectBivariateSpline(x=xip, y=yip, z=hist2d)
    density = estimator.ev(xout, yout)
    density[density < 0] = 0

    return density.reshape(xout.shape)
コード例 #46
0
    def add_dem_2D(self, x, dem, y0=0., y1=np.infty, yref=None, kx=3, ky=1,
                   s=None):
        '''
        Add topography by vertically stretching the domain in the region [y0,
        y1] - points below y0 are kept fixed, points above y1 are moved as
        the DEM, points in between are interpolated.

        Usage: first call add_dem_2D for each boundary that is to be perturbed
            and finally call apply_dem to add the perturbation to the mesh
            coordinates.

        :param x: x coordinates of the DEM
        :type x: numpy array
        :param dem: the DEM
        :type dem: numpy array
        :param y0: vertical coordinate, at which the stretching begins
        :type y0: float
        :param y1: vertical coordinate, at which the stretching ends, can be
            infinity
        :type y1: float
        :param yref: vertical coordinate, at which the stretching ends
        :type yref: float
        :param kx: horizontal degree of the spline interpolation
        :type kx: integer
        :param ky: vertical degree of the spline interpolation
        :type ky: integer
        :param s: smoothing factor
        :type s: float
        '''

        if not self.ndim == 2:  # pragma: no cover
            raise ValueError('apply_dem_2D works on 2D meshes only')

        if yref is None:
            yref = self.points[:, 1].max()

        if y1 < np.infty:
            y = np.array([y0, yref, y1])
            d = np.c_[np.zeros(len(dem)), dem, np.zeros(len(dem))]
        else:
            y = np.array([y0, yref])
            d = np.c_[np.zeros(len(dem)), dem]

        xx, yy = np.meshgrid(x, y, indexing='ij')
        rbs = RectBivariateSpline(x, y, d, kx=kx, ky=ky, s=s)

        # add to topography
        if self.topography is None:
            self.topography = np.zeros_like(self.points[:, -1])

        self.points[:, 1] += rbs.ev(self.points[:, 0], self.points[:, 1])
コード例 #47
0
ファイル: calc_likelihood.py プロジェクト: jspilker/visilens
def fft_interpolate(visdata,immap,xmap,ymap,ug=None,scaleamp=1.,shiftphase=[0.,0.]):
      """
      Take a dataset and a map of a field, fft the image,
      and interpolate onto the uv-coordinates of the dataset.

      Returns:
      interpdata: Visdata object
            A dataset which samples the given immap
      """
      
      # Correct for PB attenuation            
      if visdata.PBfwhm is not None: 
            PBs = visdata.PBfwhm / (2.*np.sqrt(2.*np.log(2)))
            immap *= np.exp(-(xmap**2./(2.*PBs**2.)) - (ymap**2./(2.*PBs**2.)))
      
      #immap = immap[::-1,:] # Fixes issue of origin in tlc vs blc to match sky coords  
      imfft = fftshift(fft2(fftshift(immap)))
      
      # Calculate the uv points we need, if we don't already have them
      if ug is None:
            kmax = 0.5/((xmap[0,1]-xmap[0,0])*arcsec2rad)
            ug = np.linspace(-kmax,kmax,xmap.shape[0])

      # Interpolate the FFT'd image onto the data's uv points
      # Using RBS, much faster since ug is gridded
      spliner = RectBivariateSpline(ug,ug,imfft.real,kx=1,ky=1)
      splinei = RectBivariateSpline(ug,ug,imfft.imag,kx=1,ky=1)
      interpr = spliner.ev(visdata.v,visdata.u)
      interpi = splinei.ev(visdata.v,visdata.u)
      interpdata = Visdata(visdata.u,visdata.v,interpr,interpi,visdata.sigma,\
            visdata.ant1,visdata.ant2,visdata.PBfwhm,'interpolated_data')

      # Apply scaling, phase shifts; wrap phases to +/- pi.
      interpdata.amp *= scaleamp
      interpdata.phase += 2.*np.pi*arcsec2rad*(shiftphase[0]*interpdata.u + shiftphase[1]*interpdata.v)
      interpdata.phase = (interpdata.phase + np.pi) % (2*np.pi) - np.pi

      return interpdata
コード例 #48
0
    def _interp(self):
        """Interpolate the cartesian coordinates.
        """
        if np.all(self.hrow_indices == self.row_indices):
            return self._interp1d()
        
        xpoints, ypoints = np.meshgrid(self.hrow_indices,
                                       self.hcol_indices)
        spl = RectBivariateSpline(self.row_indices,
                                  self.col_indices,
                                  self.x__,
                                  s=0,
                                  kx=self.kx_,
                                  ky=self.ky_)

        self.newx = spl.ev(xpoints.ravel(), ypoints.ravel())
        self.newx = self.newx.reshape(xpoints.shape).T

        spl = RectBivariateSpline(self.row_indices,
                                  self.col_indices,
                                  self.y__,
                                  s=0,
                                  kx=self.kx_,
                                  ky=self.ky_)

        self.newy = spl.ev(xpoints.ravel(), ypoints.ravel())
        self.newy = self.newy.reshape(xpoints.shape).T

        spl = RectBivariateSpline(self.row_indices,
                                  self.col_indices,
                                  self.z__,
                                  s=0,
                                  kx=self.kx_,
                                  ky=self.ky_)

        self.newz = spl.ev(xpoints.ravel(), ypoints.ravel())
        self.newz = self.newz.reshape(xpoints.shape).T
コード例 #49
0
ファイル: slimscat.py プロジェクト: krosenfeld/slimscat
def run_slimscat(isrc,idx,screenfile='screen.bin'):
  '''
  Scatter source image.

  :param isrc: source image
  :param idx: source pixel scale
  :param screenfile: screen file

  '''

  # read in screen parameters
  f = open(screenfile,'rb')
  hdrsize = struct.unpack('i',f.read(4))[0]
  nphi = int(np.sqrt(struct.unpack('i',f.read(4))[0]))
  dx   = struct.unpack('d',f.read(8))[0]
  f.seek(hdrsize)

  # filter image

  # check fov
  iny,inx = isrc.shape
  ny = int(np.floor(iny*idx/dx))
  nx = int(np.floor(inx*idx/dx))
  assert idx*max([iny,inx]) < nphi*dx

  # read in screen
  dphi_x = np.empty((ny,nx),dtype=np.float64)
  dphi_y = np.empty((ny,nx),dtype=np.float64)
  for i in range(ny):
    f.seek(hdrsize + i*8*1*nphi,0)
    dphi_x[i,:] = struct.unpack('{0:d}d'.format(nx),f.read(nx*8))
  for i in range(ny):
    f.seek(hdrsize + 8*nphi*nphi + i*8*1*nphi,0)
    dphi_y[i,:] = struct.unpack('{0:d}d'.format(nx),f.read(nx*8))

  f.close()

  # construct spline fit to source image
  f_isrc = RectBivariateSpline(idx/dx*(np.arange(iny) - 0.5*(iny-1)),\
                               idx/dx*(np.arange(inx) - 0.5*(inx-1)),\
                               isrc)

  # scatter pixel coordinates
  yy,_xx = np.meshgrid((np.arange(ny) - 0.5*(ny-1)),\
                       (np.arange(nx) - 0.5*(nx-1)),indexing='ij')
  _xx += dphi_x
  yy += dphi_y

  return f_isrc.ev(yy.flatten(),_xx.flatten()).reshape((ny,nx))
コード例 #50
0
def get_interpolated_pixel_color_rbspline(pts, s_im, size):
    """given pts in floats, linear interpolate pixel values nearby to get a good colour"""
    pts = clamp(pts, size)

    s_im = np.atleast_3d(s_im)
    ys,xs = size
    ycoords, xcoords = np.arange(ys), np.arange(xs)
    out = np.empty(pts.shape[1:] + (s_im.shape[-1],),dtype=s_im.dtype)
    
    pts_vec = pts.reshape((2,-1))
    out_vec = out.reshape((-1,s_im.shape[-1])) #flatten for easier vectorization
    for i in range(s_im.shape[-1]): #loop over color channels
        rbspline = RectBivariateSpline(ycoords, xcoords, s_im[...,i])
        out_vec[:,i] = rbspline.ev(pts_vec[0],pts_vec[1])
    return out
コード例 #51
0
ファイル: rebound_twoD.py プロジェクト: dpostal/giapy
def interp3d_emergence(uplift, data, out_times, verbose=False):
    """Interpolate uplift surfaces (xyz data at a specific t) to data locations 
    (non-grid) and data times (between times calculated). 
    
    Uses progressive linear interpolations: first the uplift at each outputted 
    time is interpolated to the data locations in data.locs, then they are
    interpolated to the data times in each location.
    
    Parameters
    ----------
    uplift (array-like) - size (times, lon, lat) array of uplift surfaces
    data - data whose data.locs are the locations to interpolate to.
    out_times - the times for the first index of the uplift array (should be
            of uplift eventually, yes?)
    """
    time_start = time.clock()
    ##########################################
    # STUFF TO FIX HEEEEEEERE!!!!!!!!!!!!!  
    N = uplift[0].shape
    # TODO These should be gotten from somewhere, right? uplift.grid?? 
    X = np.linspace(0, 4900000, num=N[1], endpoint=True)
    Y = np.linspace(0, 4700000, num=N[0], endpoint=True)
    ##########################################
    
    # interp_data will be an array of size (N_output_times, N_locations)
    # for use in interpolating the calculated emergence to the locations and
    # times at which there are data in data
    interp_data = []
    # Interpolate the calculated uplift at each time on the Lat-Lon grid
    # to the data locations.
    for uplift_at_a_time in uplift:
        interp_func = RectBivariateSpline(X, Y, uplift_at_a_time.T)
        interp_data.append(interp_func.ev(data.locs[:,0], data.locs[:,1]))
    interp_data = np.array(interp_data).T
    
    calc_vector = []
    # Interpolate the calculated uplifted at each time and data location
    # to the times of the data location.
    for interp, loc in zip(interp_data, data):
        calc_vector.append(np.interp(loc['data_dict']['times'],
        out_times[::-1], interp[::-1]))
    
    # flatten the array    
    calc_vector = np.array([item for l in calc_vector for item in l])
    
    if verbose: print 'Interpolation time: {0}s'.format(time.clock()-time_start)

    return calc_vector
コード例 #52
0
ファイル: projectimage.py プロジェクト: zouhu/astrophot
	def project_interp(self,to_file=None,sys_a8=False,bok=True,clobber=False,**kwd):
		# projected image x,y
		to_nx,to_ny=self.to_size
		print('get XY in image for pixels in new WCS ...')
		xx,yy=np.meshgrid(np.arange(to_nx),np.arange(to_ny))
		# projected image ra dec
		ra,dec=self.to_wcs.all_pix2world(xx,yy,0)
		# image XY
		if sys_a8:
			imxx,imyy=a8.a8_ad2xy(self.head,ra,dec,bok=bok)
			imxx-=1.0
			imyy-=1.0
		else:
			imxx,imyy=self.wcs.all_world2pix(ra,dec,0)
		data=self.data.copy()
		ny,nx=self.data.shape
		inmask=(imyy > -0.5) & (imyy < ny-0.5) & (imxx > -0.5) & (imxx < nx-0.5)
		if not inmask.any(): return None,None

		int_data=np.zeros((to_ny,to_nx))
		if self.mask is not None:
			int_mask=np.zeros((to_ny,to_nx),dtype='bool')
			indr,indc=np.where(inmask)
			tmpyy=np.round(imyy[inmask]).astype('int')
			tmpxx=np.round(imxx[inmask]).astype('int')
			tmpmask=self.mask[tmpyy,tmpxx]
			int_mask[indr,indc]=tmpmask
			int_mask[np.logical_not(inmask)]=True
		
		print('project image to new WCS by interpolating ...')	
		ff=RectBivariateSpline(np.arange(ny),np.arange(nx),data,**kwd)
		inmaskval=ff.ev(imyy[inmask],imxx[inmask])
		#int_data[np.logical_not(inmask)]=np.nan
		int_data[inmask]=inmaskval
		if to_file is not None:
			hdu=self.to_wcs.to_fits()[0]
			hdu.data=int_data
			hdu.writeto(to_file,clobber=clobber)
			if self.mask is not None:
				f,ext=os.path.splitext(to_file)
				hdu.data=int_mask.astype('uint8')
				hdu.writeto(f+'-mask.fits',clobber=clobber)
			
		if self.mask is not None:
			return int_data,int_mask
		else:
			return int_data,None
コード例 #53
0
    def generate_score_map(structure):

        if structure == 'BackG':
            return None

        score_matrix = np.zeros((n_sample_x, n_sample_y))
        score_matrix[sample_location_indices[:,0], sample_location_indices[:,1]] = probs_allClasses[structure]

        spline = RectBivariateSpline(sample_locations_unique_xs/shrink_factor,
                                     sample_locations_unique_ys/shrink_factor,
                                     score_matrix,
                                     bbox=[interpolation_xmin/shrink_factor,
                                           interpolation_xmax/shrink_factor,
                                           interpolation_ymin/shrink_factor,
                                           interpolation_ymax/shrink_factor])

        t1 = time.time()
        dense_score_map = spline.ev(sample_locations_interpolatedArea_xs_matrix,
                                    sample_locations_interpolatedArea_ys_matrix)
        sys.stderr.write('evaluate spline: %.2f seconds\n' % (time.time() - t1)) # 5s for shrink_factor=4; doubling results in quadratic time reduction

        t1 = time.time()
        dense_score_map = resize(dense_score_map, (interpolation_h, interpolation_w)) # similar speed as rescale
#             dense_score_map = rescale(dense_score_map, shrink_factor)
        sys.stderr.write('scale up: %.2f seconds\n' % (time.time() - t1)) # 10s, very high penalty when multiprocessing

#             t = time.time()
        dense_score_map[dense_score_map < 1e-1] = 0
        dense_score_map[dense_score_map > 1.] = 1.
#             sys.stderr.write('threshold: %.2f seconds\n' % (time.time() - t))

        if np.count_nonzero(dense_score_map) < 1e5:
            sys.stderr.write('No %s is detected on section %d\n' % (structure, sec))
            return None

        t1 = time.time()

        scoremap_bp_filepath, scoremap_interpBox_filepath = \
        DataManager.get_scoremap_filepath(stack=stack, fn=fn, anchor_fn=anchor_fn, structure=structure,
                                          return_bbox_fp=True, setting=actual_setting)

        save_hdf(dense_score_map.astype(np.float16), scoremap_bp_filepath, complevel=5)
        np.savetxt(scoremap_interpBox_filepath,
               np.array((interpolation_xmin, interpolation_xmax, interpolation_ymin, interpolation_ymax))[None],
               fmt='%d')

        sys.stderr.write('save: %.2f seconds\n' % (time.time() - t1)) # 4s, very high penalty when multiprocessing
コード例 #54
0
def getStraightenWormIntT(worm_img, skeleton, half_width = -1, cnt_widths  = np.zeros(0), width_resampling = 7, ang_smooth_win = 12, length_resampling = 49):
    
    #if np.all(np.isnan(skeleton)):
    #    buff = np.empty((skeleton.shape[0], width_resampling))
    #    buff.fill(np.nan)
    #    return buff
    assert half_width>0 or cnt_widths.size>0
    assert not np.any(np.isnan(skeleton))
    
    if ang_smooth_win%2 == 1:
        ang_smooth_win += 1; 
    
    if skeleton.shape[0] != length_resampling:
        skeleton, _ = curvspace(np.ascontiguousarray(skeleton), length_resampling)
    
    skelX = skeleton[:,0];
    skelY = skeleton[:,1];
    
    assert np.max(skelX) < worm_img.shape[0]
    assert np.max(skelY) < worm_img.shape[1]
    assert np.min(skelY) >= 0
    assert np.min(skelY) >= 0
    
    #calculate smoothed angles
    skel_angles = angleSmoothed(skelX, skelY, ang_smooth_win)
    
    #%get the perpendicular angles to define line scans (orientation doesn't
    #%matter here so subtracting pi/2 should always work)
    perp_angles = skel_angles - np.pi/2;
    
    #%for each skeleton point get the coordinates for two line scans: one in the
    #%positive direction along perpAngles and one in the negative direction (use
    #%two that both start on skeleton so that the intensities are the same in
    #%the line scan)
    
    #resample the points along the worm width
    if half_width <= 0:
        half_width = (np.median(cnt_widths[10:-10])/2.) #add half a pixel to get part of the contour
    r_ind = np.linspace(-half_width, half_width, width_resampling)
    
    #create the grid of points to be interpolated (make use of numpy implicit broadcasting Nx1 + 1xM = NxM)
    grid_x = skelX + r_ind[:, np.newaxis]*np.cos(perp_angles);
    grid_y = skelY + r_ind[:, np.newaxis]*np.sin(perp_angles);
    
    
    f = RectBivariateSpline(np.arange(worm_img.shape[0]), np.arange(worm_img.shape[1]), worm_img)
    return f.ev(grid_y, grid_x), grid_x, grid_y #return interpolated intensity map
コード例 #55
0
ファイル: utilities.py プロジェクト: Jothy/electronfactors
def find_stop_power(**kwargs):
    data = TRS398_table7()
    stop_power_interp = RectBivariateSpline(
        np.array(data["depth/R50"]), np.array(data["R50"]), np.array(data["contents"])
    )

    energy = kwargs["energy"]
    depth_mm = np.array(kwargs["depth"])
    depth_cm = depth_mm / 10

    R50_mm = energy_to_R50(energy)
    R50_cm = R50_mm / 10

    depth_over_R50 = depth_cm / R50_cm
    stop_power = np.ravel(stop_power_interp.ev(depth_over_R50, R50_cm))

    return stop_power
コード例 #56
0
def getStraightenWormInt(worm_img, skeleton, half_width, width_resampling):
    '''
        Code to straighten the worm worms.
        worm_image - image containing the worm
        skeleton - worm skeleton
        half_width - half width of the worm, if it is -1 it would try to calculated from cnt_widths
        cnt_widths - contour widths used in case the half width is not given
        width_resampling - number of data points used in the intensity map along the worm width
        length_resampling - number of data points used in the intensity map along the worm length
        ang_smooth_win - window used to calculate the skeleton angles. 
            A small value will introduce noise, therefore obtaining bad perpendicular segments.
            A large value will over smooth the skeleton, therefore not capturing the correct shape.
        
    '''

    assert half_width>0 or cnt_widths.size>0
    assert not np.any(np.isnan(skeleton))
    
    
    dX = np.diff(skeleton[:,0])
    dY = np.diff(skeleton[:,1])
    
    skel_angles = np.arctan2(dY, dX)
    skel_angles = np.hstack((skel_angles[0], skel_angles))
    
    #%get the perpendicular angles to define line scans (orientation doesn't
    #%matter here so subtracting pi/2 should always work)
    perp_angles = skel_angles - np.pi/2;
    
    #%for each skeleton point get the coordinates for two line scans: one in the
    #%positive direction along perpAngles and one in the negative direction (use
    #%two that both start on skeleton so that the intensities are the same in
    #%the line scan)
    
    r_ind = np.linspace(-half_width, half_width, width_resampling)
    
    #create the grid of points to be interpolated (make use of numpy implicit broadcasting Nx1 + 1xM = NxM)
    grid_x = skeleton[:,0] + r_ind[:, np.newaxis]*np.cos(perp_angles);
    grid_y = skeleton[:,1] + r_ind[:, np.newaxis]*np.sin(perp_angles);
    
    #interpolated the intensity map
    f = RectBivariateSpline(np.arange(worm_img.shape[0]), np.arange(worm_img.shape[1]), worm_img)
    straighten_worm =  f.ev(grid_y, grid_x) 
    
    return straighten_worm, grid_x, grid_y 
コード例 #57
0
    def _interp(self):
        """Interpolate the cartesian coordinates.
        """
        if np.all(self.hrow_indices == self.row_indices):
            return self._interp1d()

        xpoints, ypoints = np.meshgrid(self.hrow_indices,
                                       self.hcol_indices)

        for num, data in enumerate(self.tie_data):
            spl = RectBivariateSpline(self.row_indices,
                                      self.col_indices,
                                      data,
                                      s=0,
                                      kx=self.kx_,
                                      ky=self.ky_)

            self.new_data[num] = spl.ev(xpoints.ravel(), ypoints.ravel())
            self.new_data[num] = self.new_data[num].reshape(xpoints.shape).T
コード例 #58
0
ファイル: rebound_twoD.py プロジェクト: dpostal/giapy
def interp_tilt(uplift, data, out_times, basemap, verbose=False):
    time_start = time.clock()

    ##########################################
    # STUFF TO FIX HEEEEEEERE!!!!!!!!!!!!!  
    N = uplift[0].shape
    # TODO These should be gotten from somewhere, right? uplift.grid?? 
    X = np.linspace(0, 4900000, num=N[1], endpoint=True)
    Y = np.linspace(0, 4700000, num=N[0], endpoint=True)
    ##########################################

    Lon, Lat = basemap(*np.meshgrid(X, Y), inverse=True)

    tilt = calc_tilts(uplift[np.where(out_times==13)[0][0]], Lon, Lat)

    interp_func = RectBivariateSpline(X, Y, tilt.T)
    calc_vector = interp_func.ev(data.locs[:,0], data.locs[:,1])

    return calc_vector
コード例 #59
0
ファイル: OptPath.py プロジェクト: wmoebius/OptPath
def optpath_scipyode(xs, ys, tt, phi, startpoint):

    # setting up interpolation
    tt_interp=RectBivariateSpline(xs,ys,tt.T)
    phi_interp=RectBivariateSpline(xs,ys,phi.T)

    # initial condition
    t0 = 0.0
    y0 = startpoint 
    signorigphi=np.asscalar(phi_interp.ev(y0[0],y0[1]))

    # trajectory
    ts = []
    ys = []

    # for trajectory and aborting condition
    def solout(t, y):
        # time of the solver is not time in the problem...
        ts.append(tt_interp.ev(y[0],y[1]))
        ys.append(y.copy())
        if not signorigphi*np.asscalar(phi_interp.ev(y0[0],y0[1]))>0:
            return -1
        else:
            return 0

    # rhs of ODE
    def rhs(t, y):
        gradx=tt_interp.ev(y[0],y[1],dx=1,dy=0)
        grady=tt_interp.ev(y[0],y[1],dx=0,dy=1)
        auxgrad=math.sqrt(gradx*gradx+grady*grady)
        return [-gradx/auxgrad, -grady/auxgrad]

    # the actual integration
    ig = ode(rhs).set_integrator('dopri5')
    ig.set_initial_value(y0, t0)
    ig.set_solout(solout)
    # throws a warning at the moment...
    ret = ig.integrate(1.0e8)
    # what an ugly hack to make a proper np array...
    npts=np.asarray(ts)
    npts.resize((len(ts),1))
    npys=np.asarray(ys)
    return np.concatenate((npts,npys),axis=1)