def elastic_transform2(image, gx, gy, s):
    """Elastic deformation as in pulling the image in a random coordinate
    """

    #if random_state is None:
    #    random_state = numpy.random.RandomState(None)
    shape = image.shape
    x, y = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1]))
    #s = random()*0.003
    dx = (gx - x)
    dy = (gy - y)
    dx = numpy.sign(dx)*dx**2
    dy = numpy.sign(dy)*dy**2
    dx = dx*s
    dy = dy*s
    
    #indices = 
    
    #dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
    #dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha

    
    indices = numpy.reshape(y+dy, (-1, 1)), numpy.reshape(x+dx, (-1, 1))
    if len(shape) == 3:
        ch1 = map_coordinates(image[:,:,0], indices, order=1).reshape(image[:,:,0].shape)
        ch2 = map_coordinates(image[:,:,1], indices, order=1).reshape(image[:,:,0].shape)
        ch3 = map_coordinates(image[:,:,2], indices, order=1).reshape(image[:,:,0].shape)
        return numpy.dstack((ch1,ch2,ch3))

    return map_coordinates(image, indices, order=1).reshape(shape)
Esempio n. 2
0
 def ijll(self):
     """Add vectors with lat-lon positions of particles."""
     from scipy.ndimage.interpolation import map_coordinates
     self.lon = map_coordinates(self.llon, [self.y-0.5, self.x-0.5])
     self.lat = map_coordinates(self.llat, [self.y-0.5, self.x-0.5])
     self.lon[self.lon==0] = np.nan
     self.lat[self.lat==0] = np.nan
Esempio n. 3
0
def elastic_transform(image, gt, alpha, sigma, random_state=None):
    """
    :param image: image
    :param gt: ground truth
    :param alpha: deformation coefficient (high alpha -> strong deformation)
    :param sigma: std of the gaussian filter. (high sigma -> smooth deformation)
    :param random_state:
    :return: deformation of the pair [image,mask]
    """

    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape

    d = 4
    sub_shape = (shape[0]/d, shape[0]/d)

    deformations_x = random_state.rand(*sub_shape) * 2 - 1
    deformations_y = random_state.rand(*sub_shape) * 2 - 1

    deformations_x = np.repeat(np.repeat(deformations_x, d, axis=1), d, axis = 0)
    deformations_y = np.repeat(np.repeat(deformations_y, d, axis=1), d, axis = 0)

    dx = gaussian_filter(deformations_x, sigma, mode="constant", cval=0) * alpha
    dy = gaussian_filter(deformations_y, sigma, mode="constant", cval=0) * alpha

    x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))

    elastic_image = map_coordinates(image, indices, order=1).reshape(shape)
    elastic_gt = map_coordinates(gt, indices, order=1).reshape(shape)
    elastic_gt = preprocessing.binarize(np.array(elastic_gt), threshold=0.5)

    return [elastic_image, elastic_gt]
Esempio n. 4
0
def errfun_warp_deriv(x, im1, im2, mask, reg_param=0.0):
	mask = gen_mask(im1.shape)
	x = np.array(x)
	ncoeffs = len(x)
	poly_deg = int(int(len(x)/2)**0.5)
	coeffs_x = x[:len(x)/2].reshape((poly_deg,poly_deg))
	coeffs_y = x[len(x)/2:].reshape((poly_deg,poly_deg))
	coords = coords_c(im1.shape, coeffs_x, coeffs_y)
	gx, gy = np.gradient(im1)
	im3 = map_coordinates(im1, coords, order=1)
	gx = map_coordinates(gx, coords, order=1)
	gy = map_coordinates(gy, coords, order=1)
	diffim = (im2 - im3)

	cd = chebderivs(im1.shape, poly_deg)

	derivs = np.zeros(ncoeffs)
	for i in range(ncoeffs):
		gradim = gy*cd[i,1] + gx*cd[i,0]
		derivs[i] = np.sum(-2.0*mask*diffim*gradim)/np.prod(im1.shape)

	nx, ny = np.meshgrid(range(poly_deg), range(poly_deg))
	total_deg = nx+ny
	weights = np.tile(total_deg.flatten(), 2)
	penalty = weights * np.sign(x)


	return derivs + reg_param*penalty
Esempio n. 5
0
def marsfilter(imfile):
    img = mpimg.imread(imfile)
    mask = np.ones_like(img)
    mask[:,:,1]*= 0.465
    mask[:,:,2]*= 0.25
    marsimg = img*mask 
    x = np.arange(marsimg.shape[0]).reshape(-1,1)
    y = np.arange(marsimg.shape[1]).reshape(-1,1)   
    xy = np.array(list(product(x,y))).reshape(-1,2)
    center = np.mean(xy, axis=0)
    xc, yc = (xy - center).T
    # Polar coordinates
    r = np.sqrt(xc**2 + yc**2)
    theta = np.arctan2(yc, xc)
    a = 0.00022219029946345079
    b = 422.33975030901962
    k = a/(b - (np.max([img.shape[0], img.shape[1]])))
    d = np.linspace(np.min(r) +k*np.min(r)**3, np.max(r)+k*np.max(r)**3, 10000)
    u = d + k*(d**3)
    f = interp1d(u, d, bounds_error=False)
    s = f(r)
    newx = (np.round(s*np.cos(theta))).astype(int).reshape(len(x), len(y)) + int(center[0])
    newy = (np.round(s*np.sin(theta))).astype(int).reshape(len(x), len(y)) + int(center[1])
    one_image = interpolation.map_coordinates(marsimg[:,:,0], coordinates=[newx.flatten(), newy.flatten()]).reshape(marsimg.shape[0],marsimg.shape[1], 1)
    two_image = interpolation.map_coordinates(marsimg[:,:,1], coordinates=[newx.flatten(), newy.flatten()]).reshape(marsimg.shape[0],marsimg.shape[1], 1)
    three_image = interpolation.map_coordinates(marsimg[:,:,2], coordinates=[newx.flatten(), newy.flatten()]).reshape(marsimg.shape[0],marsimg.shape[1], 1)
    new_image = np.append(one_image, np.append(two_image, three_image, axis=2), axis=2)
    imsavefile = 'marsfiter_'+imfile
    return mpimg.imsave(imsavefile, new_image, dpi=100)
Esempio n. 6
0
 def get_uv(self, lat, lon, t):
     # map_coordinates wants columns, so we have transpose and make the
     # coords array two dimensional
     coords = (((lat, lon, t) - self.p0) * self.inv_dp)[:,np.newaxis]
     u = map_coordinates(self.u_coeffs, coords, prefilter=False, mode='nearest')
     v = map_coordinates(self.v_coeffs, coords, prefilter=False, mode='nearest')
     return u, v
Esempio n. 7
0
 def evaluate(self, y, x):
     
     #Calculates the maximum length from point to the edges.
     
     maximum_length = np.min([self.shape[1]-x,np.min([y+1,self.shape[0]-y])])
     
     # Creates an indexing array from point to the edge with spacing of 1 pixel.
     stepsize = 1.0/(np.sqrt(2)*self.step)
     indexarray = np.r_[0:maximum_length:stepsize]
     
     # Coordinate arrays for both the positive and the negative e.
     coords1 = np.asarray([float(y)/self.step-indexarray,float(x)/self.step + indexarray])
     coords2 = np.asarray([float(y)/self.step+indexarray,float(x)/self.step + indexarray])
 
     # Values of the array at these locations.
     e1 = map_coordinates(self.data, coords1, output=float, order=3)
     e2 = map_coordinates(self.data, coords2, output=float, order=3)
     
     # Logarithmic strain
     e1log = np.log(e1)
     e2log = np.log(e2)
     
     index = np.log(r_[0:e1.shape[0]])
     k11, k12 = np.polyfit(index, e1log, 1)
     k21, k22 = np.polyfit(index, e2log, 1)
     
     norm = 
             
     return -norm
def interpolate_geolocation_cartesian(lon_array, lat_array):
    """Interpolate MODIS navigation from 1000m resolution to 250m.

    Python rewrite of the IDL function ``MODIS_GEO_INTERP_250`` but converts to cartesian (X, Y, Z) coordinates
    first to avoid problems with the anti-meridian/poles.

    :param nav_array: MODIS 1km latitude array or 1km longitude array

    :returns: MODIS 250m latitude array or 250m longitude array
    """
    num_rows,num_cols = lon_array.shape
    num_scans = num_rows / ROWS_PER_SCAN

    lons_rad = np.radians(lon_array)
    lats_rad = np.radians(lat_array)
    x_in = EARTH_RADIUS * np.cos(lats_rad) * np.cos(lons_rad)
    y_in = EARTH_RADIUS * np.cos(lats_rad) * np.sin(lons_rad)
    z_in = EARTH_RADIUS * np.sin(lats_rad)

    # Create an array of indexes that we want our result to have
    x = np.arange(RES_FACTOR * num_cols, dtype=np.float32) * 0.25
    y = np.arange(RES_FACTOR * ROWS_PER_SCAN, dtype=np.float32) * 0.25 - 0.375
    x,y = np.meshgrid(x,y)
    coordinates = np.array([y,x]) # Used by map_coordinates, major optimization

    new_x = np.empty( (num_rows * RES_FACTOR, num_cols * RES_FACTOR), dtype=np.float32 )
    new_y = new_x.copy()
    new_z = new_x.copy()
    nav_arrays = [(x_in, new_x), (y_in, new_y), (z_in, new_z)]

    # Interpolate each scan, one at a time, otherwise the math doesn't work well
    for scan_idx in range(num_scans):
        # Calculate indexes
        j0 = ROWS_PER_SCAN              * scan_idx
        j1 = j0 + ROWS_PER_SCAN
        k0 = ROWS_PER_SCAN * RES_FACTOR * scan_idx
        k1 = k0 + ROWS_PER_SCAN * RES_FACTOR

        for nav_array, result_array in nav_arrays:
            # Use bilinear interpolation for all 250 meter pixels
            map_coordinates(nav_array[ j0:j1, : ], coordinates, output=result_array[ k0:k1, : ], order=1, mode='nearest')

            # Use linear extrapolation for the first two 250 meter pixels along track
            m = (result_array[ k0 + 5, : ] - result_array[ k0 + 2, : ]) / (y[5,0] - y[2,0])
            b = result_array[ k0 + 5, : ] - m * y[5,0]
            result_array[ k0 + 0, : ] = m * y[0,0] + b
            result_array[ k0 + 1, : ] = m * y[1,0] + b

            # Use linear extrapolation for the last  two 250 meter pixels along track
            m = (result_array[ k0 + 37, : ] - result_array[ k0 + 34, : ]) / (y[37,0] - y[34,0])
            b = result_array[ k0 + 37, : ] - m * y[37,0]
            result_array[ k0 + 38, : ] = m * y[38,0] + b
            result_array[ k0 + 39, : ] = m * y[39,0] + b

    # Convert from cartesian to lat/lon space
    new_lons = get_lons_from_cartesian(new_x, new_y)
    new_lats = get_lats_from_cartesian(new_x, new_y, new_z)

    return new_lons, new_lats
Esempio n. 9
0
 def ijll(self,ps=None):
     from scipy.ndimage.interpolation import map_coordinates
     self.lon = map_coordinates(self.llon, [self.y,self.x])
     self.lat = map_coordinates(self.llat, [self.y,self.x])
     self.lon[self.lon<-180] = self.lon[self.lon<-180] + 360
     self.lon[self.lon> 180] = self.lon[self.lon> 180] - 360
     self.lon[self.lon==0] = np.nan
     self.lat[self.lat==0] = np.nan
Esempio n. 10
0
def warplk(src, target, warp, warp_jac, p0, mask=None):
	ny, nx = src.shape
	nparam = len(p0)
	grad_im = np.gradient(src)[::-1]
	grad_warped = np.zeros((2,) + src.shape)
	interp_order=1
	p = np.copy(p0)
	p_list = []
	sd_im = np.zeros((nparam,) + src.shape)

	eps = 1e-2
	iter_count = 0
	while iter_count < 20:
		p_list.append(p)
		iter_count += 1
		#print "p: " + str(p)
		coords = warp(src.shape, p)
		src_warped = map_coordinates(src, coords, order=interp_order)
		for i in range(2):
			grad_warped[i] = map_coordinates(grad_im[i], coords, order=interp_order)
		err_im = target - src_warped
		print iter_count, np.sum(np.abs(err_im))
		jac = warp_jac(src.shape, p)

		#compute steepest descent images
		for i in range(nparam):
			sd_im[i] = grad_warped[0]*jac[0,i] + grad_warped[1]*jac[1,i]

		#form hessian
		hess = np.zeros((nparam, nparam))
		if mask != None:
			for iy in range(ny):
				for ix in range(nx):
					hess += np.outer(sd_im[:,iy,ix],sd_im[:,iy,ix])*mask[iy,ix]
		else:	
			for iy in range(ny):
				for ix in range(nx):
					hess += np.outer(sd_im[:,iy,ix],sd_im[:,iy,ix])

		if mask != None:
			errgrad = np.sum(sd_im*err_im*mask, axis=(1,2))
		else:
			errgrad = np.sum(sd_im*err_im, axis=(1,2))
		hess_inv = linalg.inv(hess)

		delta_p = np.dot(hess_inv, errgrad)
		delta_norm = np.linalg.norm(delta_p)
		p = p + delta_p



		#print "delta_p: " + str(delta_p)
		if delta_norm < eps:
			break
	return p
Esempio n. 11
0
def interpolate(I1, I2, VF, n, VB=None):
  """Interpolate n frames between two images by using forward and backward 
  motion fields computed from two images. The time range between the images is 
  equally divided into n subintervals.
  
  Parameters
  ----------
  I1 : array-like
    The first input image.
  I2 : array-like
    The second input image (must have the same shape as I1).
  VF : array-like
    The forward motion field (must have the same shape as I1 and I2).
  n : int
    Number of frames to interpolate between the input images.
  VB : array-like
    The backward motion field (must have the same shape as I1 and I2). If not 
    given, VB is assumed to be -VF.
  
  Returns
  -------
  out : list
    List of length n containing the interpolated frames between the images. 
    The input images are not included in the list.
  """
  if len(I1.shape) != 2:
    raise ValueError("I1 and I2 must be two-dimensional arrays")
  if I1.shape != I2.shape:
    raise ValueError("I1 and I2 must have the same shape")
  if VF.shape[0:2] != I1.shape or VF.shape[0:2] != I2.shape or \
     (VB != None and (VB.shape[0:2] != I1.shape or VB.shape[0:2] != I2.shape)):
    raise ValueError("V must have the same shape as I1 and I2")
  
  if VB == None:
    VB = -VF
  
  X,Y = meshgrid(arange(size(I1, 1)), arange(size(I1, 0)))
  XY = dstack([X, Y])
  
  tws = 1.0*arange(1, n+1) / (n + 1)
  
  I_result = []
  for tw in tws:
    XYW = XY + tw*VB[:,:,0:2]
    XYW = [XYW[:, :, 1], XYW[:, :, 0]]
    I1_warped = reshape(map_coordinates(I1, XYW, order=1, mode="constant", 
                        cval=nan), I1.shape)
    XYW = XY + (1.0-tw)*VF[:,:,0:2]
    XYW = [XYW[:, :, 1], XYW[:, :, 0]]
    I2_warped = reshape(map_coordinates(I2, XYW, order=1, mode="constant", 
                        cval=nan), I2.shape)
    
    I_result.append((1.0-tw)*I1_warped + tw*I2_warped)
  
  return I_result
Esempio n. 12
0
def rotational_expand(vals,N,D,interp_order=1):
    interp_coords = n.sqrt(n.sum(gencoords(N,D).reshape((N**D,D))**2,axis=1)).reshape((1,) + D*(N,))
    if n.iscomplexobj(vals):
        rotexp = 1.0j*spinterp.map_coordinates(vals.imag, interp_coords, 
                                               order=interp_order, mode='nearest')
        rotexp += spinterp.map_coordinates(vals.real, interp_coords, 
                                           order=interp_order, mode='nearest')
    else:
        rotexp = spinterp.map_coordinates(vals, interp_coords, 
                                          order=interp_order, mode='nearest')
    return rotexp
 def interpolate_3d (self,l,m,thetaphi=False,rotate=None,time=None,freq=None,freqaxis=None,output=None,mask=None,extra_axes=0):
   """Interpolates beam into the given l/m (default) or theta/phi (thetaphi=True) coordinates,
   and optionally freq/time, etc.
   If extra_axis>0, then l/m arrays will have that many extra axes (at the front), i.e. the real freq
   axis is actually freqaxis+extra_axis
   See transformCoordinates() above for a description of the parameters. An output array
   may be specified (it will be resized to the correct shape just in case).
   Returns array of interpolated coordinates.
   """;
   # transform coordinates
   dprint(3,"transforming coordinates");
   coords,output_shape,mask = self.transformCoordinates(l,m,thetaphi=thetaphi,
                              rotate=rotate,time=time,freq=freq,freqaxis=freqaxis,extra_axes=extra_axes,mask=mask);
   # prepare output array
   if output is None:
     output = numpy.zeros(output_shape,complex);
   elif output.shape != output_shape:
     output.resize(output_shape);
   global _print_first;
   verbose = _print_first and _verbosity.get_verbose()>1;
   if verbose:
     l0,m0,freq0 = coords[:,0];
     dprint(1,"First interpolation point is at",l0,m0,freq0);
     dprint(1,"Frequencies are",self._freq_grid);
     for l in int(l0),int(l0)+1:
       for m in int(m0),int(m0)+1:
         dprint(1,l,m,"amplitudes",abs(self._beam[l,m,:]));
         dprint(1,l,m,"phases",numpy.angle(self._beam[l,m,:])/DEG);
   
   dprint(3,"interpolating %s coordinate points to output shape %s"%(coords.shape,output_shape));
   # interpolate real and imag parts separately
   output.real = interpolation.map_coordinates(self._beam_real,coords,order=self._spline_order,mode='nearest',
                   prefilter=(self._spline_order==1)).reshape(output_shape);
   output.imag = interpolation.map_coordinates(self._beam_imag,coords,order=self._spline_order,mode='nearest',
                   prefilter=(self._spline_order==1)).reshape(output_shape);
   output[~(numpy.isfinite(output))] = 0;
   if mask is not None:
     output[mask] = 0;
   output_ampl = interpolation.map_coordinates(self._beam_ampl,coords,order=self._spline_order,mode='nearest',
                   prefilter=(self._spline_order==1)).reshape(output_shape);
   output_ampl[~(numpy.isfinite(output_ampl))] = 0;
   phase_array = numpy.angle(output);
   output.real = output_ampl * numpy.cos(phase_array);
   output.imag = output_ampl * numpy.sin(phase_array);
   
   dprint(3,"interpolated value [0] is",output.ravel()[0]);
   # dprint(4,"interpolated value is",output);
   if verbose:
     _print_first = None;
     for i in range(coords.shape[1]):
       if coords[0,i] == l0 and coords[1,i] == m0:
         dprint(0,"Interpolated amplitude at frequency %f is %f"%(coords[2,i],abs(output.ravel()[i])));
   
   return output;
Esempio n. 14
0
def cmap_file2d(data, cmap, roll_x=0.):
    cmap[:, -1] = cmap[:, 0]
    data_dim, nrows, ncols = data.shape
    data2 = np.copy(data)
    #data2[1] = (data2[1] - roll_x) % 1.0
    data2[0] *= cmap.shape[0]
    data2[1] *= cmap.shape[1]
    data2 = data2.reshape(data_dim, nrows, ncols)
    r = map_coordinates(cmap[:, :, 0], data2, order=1, mode='nearest')
    g = map_coordinates(cmap[:, :, 1], data2, order=1, mode='nearest')
    b = map_coordinates(cmap[:, :, 2], data2, order=1, mode='nearest')
    rgb = np.array([r, g, b])
    rgb = rgb.reshape(3, nrows, ncols).transpose(1, 2, 0)

    return rgb
Esempio n. 15
0
def cart_to_irregular_spline(cartgrid, values, newgrid, **kwargs):
    """
    Map array ``values`` defined by cartesian coordinate array ``cartgrid``
    to new coordinates defined by ``newgrid`` using spline interpolation.

    Keyword arguments are fed through to
    :func:`scipy:scipy.ndimage.map_coordinates`

    Parameters
    ----------
    cartgrid : numpy ndarray
        3 dimensional array (nx, ny, lon/lat) of floats
    values : numpy 2d-array
        2 dimensional array (nx, ny) of data values
    newgrid : numpy ndarray
        Nx2 dimensional array (..., lon/lat) of floats
    kwargs : :func:`scipy:scipy.ndimage.map_coordinates`

    Returns
    -------
    interp : numpy ndarray
        array with interpolated values of size N

    Examples
    --------
    See :ref:`/notebooks/beamblockage/wradlib_beamblock.ipynb#\
Preprocessing-the-digitial-elevation-model`.
    """

    # TODO: dimension checking
    newshape = newgrid.shape[:-1]

    xi = newgrid[..., 0].ravel()
    yi = newgrid[..., 1].ravel()

    nx = cartgrid.shape[1]
    ny = cartgrid.shape[0]

    cxmin = np.min(cartgrid[..., 0])
    cxmax = np.max(cartgrid[..., 0])
    cymin = np.min(cartgrid[..., 1])
    cymax = np.max(cartgrid[..., 1])

    # this functionality finds the floating point
    # indices into the value array (0:nx-1)
    # can be transferred into separate function
    # if necessary
    xi = (nx - 1) * (xi - cxmin) / (cxmax - cxmin)

    # check origin to calculate y index
    if util.get_raster_origin(cartgrid) is 'lower':
        yi = (ny - 1) * (yi - cymin) / (cymax - cymin)
    else:
        yi = ny - (ny - 1) * (yi - cymin) / (cymax - cymin)

    # interpolation by map_coordinates
    interp = map_coordinates(values, [yi, xi], **kwargs)
    interp = interp.reshape(newshape)

    return interp
def mapping_on_plane(img_in,xi1,xi2,dsi):
	nsx, nsy = np.shape(img_in)
	xt1 = (xi1)/dsi+nsx/2.0
	xt2 = (xi2)/dsi+nsy/2.0
	points = np.array([np.array(xt1.flat),np.array(xt2.flat)])
	img_out = sn.map_coordinates(img_in,points,order=1,mode='constant')
	return img_out.reshape((nsx,nsy))
Esempio n. 17
0
def apply_shift(img_data, coords, shift_data, axis=1, sign=1):
    """
    Interpolates image along phase encoding direction

    Parameters
    ----------
    img_data: (x, y, z) array
        Image to interpolate
    coords: (3, p) list
        Coordinates to sample img_data at
    shift_data: (p, ) array
        Shifts to apply to coodinates in coords
    axis: int
        Dimension to apply shift_data to. Must be 1, 2, or 3
    sign: int
        Direction to apply shift_data to. Must to 1 or -1.

    Returns
    -------
    img_interp: (p,) array
        Interpolated img_data
    """

    #Apply shift to coordinates
    shift_coords = np.float32(coords)
    shift_coords[axis] += shift_data * sign

    #Interolate image at shifted coordinates
    return interp.map_coordinates(img_data, shift_coords, order=1)
Esempio n. 18
0
def elastic_transform(image, alpha=2000, sigma=40, alpha_affine=40, random_state=None):
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    for i in range(shape[2]):
        image[:,:,i] = cv2.warpAffine(image[:,:,i], M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
    image = image.reshape(shape)

    blur_size = int(4*sigma) | 1

    dx = cv2.GaussianBlur((random_state.rand(*shape_size) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
    dy = cv2.GaussianBlur((random_state.rand(*shape_size) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma) * alpha

    x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))

    def_img = np.zeros_like(image)
    for i in range(shape[2]):
        def_img[:,:,i] = map_coordinates(image[:,:,i], indices, order=1).reshape(shape_size)

    return def_img
Esempio n. 19
0
def get_rgb(im, y, x, order=1):
    im = np.asarray(im)
    ni, nj, nc = im.shape
    points = np.zeros((len(x), nc), dtype=im.dtype)
    for c in range(nc):
        points[:, c] = map_coordinates(im[:, :, c], [y, x], order=order)
    return points
Esempio n. 20
0
def get_image_values(img,x,y,normal,dist,npts):
    '''
    Get the image values along a normal line from a point x.
    Traverse along this line dist pixels with npts
    '''

    # normal is [dx, dy] but may have unknown length
    normal = normal/np.sqrt(normal[0]**2 + normal[1]**2)
    
    # now normal is of length 1, and is in the form [dx,dy]
    xMin = x
    xMax = x + dist*normal[0]
    yMin = y
    yMax = y + dist*normal[1]
    
    #print "original", [xMin, yMin]
    #print "ending", [xMax,yMax]
    #print "distance", np.sqrt((xMax - xMin)**2 + (yMax - yMin)**2)
    
    # make a new array
    xcoords = np.linspace(xMin, xMax, npts)
    ycoords = np.linspace(yMin, yMax, npts)
    
    #print xcoords
    #print ycoords
    
    # travel along points, get image values put them in array
    return imginterp.map_coordinates(img,[xcoords,ycoords])
Esempio n. 21
0
    def _interpolate_cube(self, lon, lat, egy=None, interp_log=True):
        """Perform interpolation on a healpix cube.  If egy is None
        then interpolation will be performed on the existing energy
        planes.

        """

        shape = np.broadcast(lon, lat, egy).shape
        lon = lon * np.ones(shape)
        lat = lat * np.ones(shape)
        theta = np.pi / 2. - np.radians(lat)
        phi = np.radians(lon)
        vals = []
        for i, _ in enumerate(self.hpx.evals):
            v = hp.pixelfunc.get_interp_val(self.counts[i], theta,
                                            phi, nest=self.hpx.nest)
            vals += [np.expand_dims(np.array(v, ndmin=1), -1)]

        vals = np.concatenate(vals, axis=-1)

        if egy is None:
            return vals.T

        egy = egy * np.ones(shape)

        if interp_log:
            xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy))
        else:
            xvals = utils.val_to_pix(self.hpx.evals, egy)

        vals = vals.reshape((-1, vals.shape[-1]))
        xvals = np.ravel(xvals)
        v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals],
                            order=1)
        return v.reshape(shape)
Esempio n. 22
0
def errfun(x):
	mask = gen_mask(im1.shape)
	coords = fill_coords2(im1.shape, x[0],x[1])
	im3 = map_coordinates(im1, coords)
	diffim = (im2 - im3)
	err = np.sum(np.abs(diffim)*mask)
	return err
Esempio n. 23
0
def warp_grid(grid, mapping_function, order=3, mode="nearest"):
    """
    warp grid with a mapping function
    phi_1 = grid
    phi_2 = mapping_function
    the result is
    phi_1(phi_2)

    Parameters
    ----------
    grid : ndarray
        a grid which is going to be warped
    mapping_function : ndarray
        the grid will be deformed by this mapping function

    Returns
    -------
    warped_grid : ndarray
        grid deformed by the mapping function
    """
    if len(grid) != len(mapping_function):
        raise ValueError("the dimension of the two inputs are the same")

    warped_grid = np.zeros_like(grid)
    for i, lines in enumerate(grid):
        warped_grid[i] = map_coordinates(lines, mapping_function, order=order, mode=mode)

    return warped_grid
Esempio n. 24
0
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]
    
    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dz = np.zeros_like(dx)

    x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))

    return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
Esempio n. 25
0
	def getE(self, axis, xcoord, ycoord, angle):
	
		derarray = np.diff(self.diffs[axis], 1, axis)
		
		xcoord = xcoord/self.step
		ycoord = ycoord/self.step
	
		angle = radians(angle)
	
		xcoords = np.array([], dtype=float)
		ycoords = np.array([], dtype=float)
		
		xcoordplus	= cos(angle)/self.step
		ycoordplus	= - sin(angle)/self.step
		maxX		= derarray.shape[1] - 1
		maxY		= derarray.shape[0] - 1
		
		while True:
			if xcoord < 0 or xcoord >= maxX or ycoord <= 0 or ycoord >= maxY:
				break
			xcoords = np.append(xcoords, xcoord)
			ycoords = np.append(ycoords, ycoord)
			
			xcoord += xcoordplus
			ycoord += ycoordplus
		
		return map_coordinates(derarray, [ycoords, xcoords], order=3)
Esempio n. 26
0
def grid_interpolation_linear(known_coords, known_values, interp_coords):
	"""
	Interpolate between known values on a regular grid of coordinates
	*known_coords is array of shape \product(ith grid length) x (# of dimensions)
	*known_values is array of shape \product(ith grid length)
	*interp_coords is array of shape (# of events) x (# of dimensions)
	"""
	#known_coords is array of shape \product(ith grid length) x (# of dimensions), use to find mapping between coords and grid indices
	ndim = np.shape(known_coords)[-1]
	grid_lens = np.array(np.shape(known_coords)[:-1])
	
	grid_starts = np.zeros(ndim)
	grid_factors = np.zeros(ndim)
	for dim in xrange(ndim):
		#Find min and max of grid coordinates for each dimension and use these to construct mapping to grid indices
		ind_min = np.zeros(ndim+1,dtype='int')
		ind_min[-1] = dim
		
		ind_max = np.zeros(ndim+1,dtype='int')
		ind_max[dim] = -1
		ind_max[-1] = dim
		
		grid_starts[dim] = known_coords[tuple(ind_min)]
		grid_factors[dim] = (known_coords[tuple(ind_max)] - known_coords[tuple(ind_min)]) / float(grid_lens[dim] - 1.)
	
	#known_values is array of shape \product(ith grid length), values map fine to grid indices as is
	grid_known_values = known_values
	
	#interp_coords is array of shape (# of events) x (# of dimensions), need to map into grid indices and then take transpose
	grid_interp_coords = np.transpose( (interp_coords - grid_starts) / grid_factors )
	
	#With everything mapped to grid indices, we can now interpolate between "pixels"
	return ndimage_interp.map_coordinates(input=grid_known_values, coordinates=grid_interp_coords, output=float, order=1, mode='nearest')	
Esempio n. 27
0
def get_image_values_2(img, markerPos, DeltaMarker, fname, step=y_resampling):
    img = np.flipud(img)
    ds = np.sqrt(DeltaMarker[:,0]*DeltaMarker[:,0] + DeltaMarker[:,1]*DeltaMarker[:,1])
    nSteps = img.shape[0] / step
    stepSize = step / ds
    n = np.linspace(0., nSteps, nSteps+1)
    sampleOffset = np.einsum('i,ik,j->ijk', stepSize, DeltaMarker, n)
    sampleStart = np.empty(markerPos.shape, dtype='f8')
    sampleStart[:,:] = markerPos[:,:]
    nMarkers, tmp = markerPos.shape
    sampleStart.shape = (nMarkers, 1, 2)
    sampleStart = np.repeat(sampleStart, nSteps+1, axis=1)
    samplePos = sampleStart + sampleOffset

    samplePos.shape = (nMarkers*(nSteps+1),2)
    resampImg = imginterp.map_coordinates(img.T, samplePos.T, order=1)
    resampImg.shape = (nMarkers, nSteps+1)
    resampImg = resampImg.T
    scan = str(fname[-5])

    # CONVERSION
    # Convert from pixel value to HAp density
    # In this case, HAp density is calculated with mu values, keV(1)=119
    if scan == 'g':
        resampImg *= 2.**16
        resampImg *= 0.0000689219599491
        resampImg -= 1.54118269436172
    else:
        resampImg *= 2.**16
        resampImg *= 0.00028045707501
        resampImg -= 1.48671229207043
    
    return resampImg[:,:]
Esempio n. 28
0
def apply_affine_itk_transform_on_image(input_image, transform, center, reference_image=None, order=None):
  """
  
  """
  input_data = np.float32(input_image.get_data())
      
  if reference_image is None:
    reference_image = input_image

  #set the interpolation order to 1 if not specified
  if order is None:
    order = 1
    
  ref_data = np.float32(reference_image.get_data())    
  
  #create index for the reference space
  i = np.arange(0,ref_data.shape[0])
  j = np.arange(0,ref_data.shape[1])
  k = np.arange(0,ref_data.shape[2])
  iv,jv,kv = np.meshgrid(i,j,k,indexing='ij')
  
  iv = np.reshape(iv,(-1))
  jv = np.reshape(jv,(-1))
  kv = np.reshape(kv,(-1))
  
  #convert the transform from itk (LPS) to nibabel (RAS) 
  nb_transform, nb_center = convert_itk_transform_to_affine_transform(transform,center)


  #compute the coordinates in the input image
  pointset = np.zeros((4,iv.shape[0]))
  pointset[0,:] = iv
  pointset[1,:] = jv
  pointset[2,:] = kv
  pointset[3,:] = np.ones((iv.shape[0]))

  #no need to specify the center here because it is included in itk-based nb_transform  
  #pointset = transform_a_set_of_points(pointset,nb_transform,reference_image.affine,np.linalg.inv(input_image.affine))
  pointset = transform_a_set_of_points(pointset,nb_transform,reference_image.affine,np.linalg.inv(input_image.affine),nb_center)

  #compute the interpolation
  val = np.zeros(iv.shape)            
  map_coordinates(input_data,[pointset[0,:],pointset[1,:],pointset[2,:]],output=val,order=order)
  
  output_data = np.reshape(val,ref_data.shape)
 
  return nibabel.Nifti1Image(output_data, reference_image.affine)
Esempio n. 29
0
    def warp_image(self, img, px,py, mode='nearest'):
        sh = img.shape
        dx = self.wcoords_from_params1d(px, sh)
        dy = self.wcoords_from_params1d(py, sh)
        xi,yi = np.meshgrid(np.arange(sh[1]), np.arange(sh[0]))
        return map_coordinates(img, [yi+ dy, xi+dx], mode=mode)

        return self.warp_image(img,dx,dy)
Esempio n. 30
0
def rescale(img,
            scale,
            shape=None,
            mask=None,
            order=3,
            mode='nearest',
            unitary=True):
    """Rescale an image by interpolation.

    Parameters
    ----------
    img : array_like
        Image to rescale

    scale : float
        Scaling factor. Scale factors less than 1 will shrink the image. Scale
        factors greater than 1 will grow the image.

    shape : array_like or int, optional
        Output shape. If None (default), the output shape will be the input img
        shape multiplied by the scale factor.

    mask : array_like, optional
        Binary mask applied after rescaling. If None (default), a mask is
        created from the nonzero portions of img. To skip masking operation,
        set ``mask = np.ones_like(img)``

    order : int, optional
        Order of spline interpolation used for rescaling operation. Default is
        3. Order must be in the range 0-5.

    mode : {'constant', 'nearest', 'reflect', 'wrap'}, optional
        Points outside the boundaries of the input are filled according to the
        given mode. Default is 'constant'.

    unitary : bool, optional
        Normalization flag. If True (default), a normalization is performed on
        the output such that the rescaling operation is unitary and image power
        (if complex) or intensity (if real) is conserved.

    Returns
    -------
    img : ndarray
        Rescaled image.

    Note
    ----
    The post-rescale masking operation should have no real effect on the
    resulting image but is included to eliminate interpolation artifacts that
    sometimes appear in large clusters of zeros in rescaled images.

    See Also
    --------
    :func:`rebin`

    """

    img = np.asarray(img)

    if mask is None:
        # take the real portion to ensure that even if img is complex, mask will
        # be real
        mask = np.zeros_like(img).real
        mask[img != 0] = 1

    if shape is None:
        shape = np.ceil(
            (img.shape[0] * scale, img.shape[1] * scale)).astype(np.int)
    else:
        if np.isscalar(shape):
            shape = np.ceil((shape * scale, shape * scale)).astype(np.int)
        else:
            shape = np.ceil(
                (shape[0] * scale, shape[1] * scale)).astype(np.int)

    x = (np.arange(shape[1], dtype=np.float64) -
         shape[1] / 2.) / scale + img.shape[1] / 2.
    y = (np.arange(shape[0], dtype=np.float64) -
         shape[0] / 2.) / scale + img.shape[0] / 2.

    xx, yy = np.meshgrid(x, y)

    mask = map_coordinates(mask, [yy, xx], order=1, mode='nearest')
    mask[mask < np.finfo(mask.dtype).eps] = 0

    if np.iscomplexobj(img):
        out = np.zeros(shape, dtype=np.complex128)
        out.real = map_coordinates(img.real, [yy, xx], order=order, mode=mode)
        out.imag = map_coordinates(img.imag, [yy, xx], order=order, mode=mode)
    else:
        out = map_coordinates(img, [yy, xx], order=order, mode=mode)

    if unitary:
        out *= np.sum(img) / np.sum(out)

    out *= mask

    return out
Esempio n. 31
0
def propcustom_dm(wf, dm_z0, dm_xc, dm_yc, spacing=0., **kwargs):
    """
    Generate a deformable mirror surface almost exactly like PROPER.

    Simulate a deformable mirror of specified actuator spacing, including the
    effects of the DM influence function. Has two more optional keywords
    compared to  proper.prop_dm

    Parameters
    ----------
    wf : obj
        WaveFront class object
    dm_z0 : str or numpy ndarray
        Either a 2D numpy array containing the surface piston of each DM
        actuator in meters or the name of a 2D FITS image file containing the
        above
    dm_xc, dm_yc : list or numpy ndarray
        The location of the optical axis (center of the wavefront) on the DM in
        actuator units (0 ro num_actuator-1). The center of the first actuator
        is (0.0, 0.0)
    spacing : float
        Defines the spacing in meters between actuators; must not be used when
        n_act_across_pupil is specified.

    Returns
    -------
    dmap : numpy ndarray
        Returns DM surface (not wavefront) map in meters

    Other Parameters
    ----------------
    FIT : bool
        Switch that tells routine that the values in "dm_z" are the desired
        surface heights rather than commanded actuator heights, and so the
        routine should fit this map, accounting for actuator influence functions,
        to determine the necessary actuator heights. An iterative error-minimizing
        loop is used for the fit.
    NO_APPLY : bool
        If set, the DM pattern is not added to the wavefront. Useful if the DM
        surface map is needed but should not be applied to the wavefront
    N_ACT_ACROSS_PUPIL : int
        Specifies the number of actuators that span the X-axis beam diameter. If
        it is a whole number, the left edge of the left pixel is aligned with
        the left edge of the beam, and the right edge of the right pixel with
        the right edge of the beam. This determines the spacing and size of the
        actuators. Should not be used when "spacing" value is specified.
    XTILT, YTILT, ZTILT : float
        Specify the rotation of the DM surface with respect to the wavefront plane
        in degrees about the X, Y, Z axes, respectively, with the origin at the
        center of the wavefront. The DM surface is interpolated and orthographically
        projected onto the wavefront grid. The coordinate system assumes that
        the wavefront and initial DM surface are in the X,Y plane with a lower
        left origin with Z towards the observer. The rotations are left handed.
        The default rotation order is X, Y, then Z unless the /ZYX switch is set.
    XYZ or ZYX : bool
        Specifies the rotation order if two or more of XTILT, YTILT, or ZTILT
        are specified. The default is /XYZ for X, Y, then Z rotations.
    inf_fn : string
        specify a new influence function as a FITS file with the same header keywords as 
        PROPER's default influence function. Needs these values in info.PrimaryData.Keywords:
            'P2PDX_M' % pixel width x (m)
            'P2PDY_M' % pixel width y (m)
            'C2CDX_M' % actuator pitch x (m)
            'C2CDY_M' % actuator pitch y (m)
    inf_sign : {+,-}
        specifies the sign (+/-) of the influence function. Given as an option because 
        the default influence function file is positive, but positive DM actuator 
        commands make a negative deformation for Xinetics and BMC DMs.

    Raises
    ------
    ValueError:
        User cannot specify both actuator spacing and N_ACT_ACROSS_PUPIL
    ValueError:
        User must specify either actuator spacing or N_ACT_ACROSS_PUPIL
    """
    if "ZYX" in kwargs and "XYZ" in kwargs:
        raise ValueError('PROP_DM: Error: Cannot specify both XYZ and ZYX ' +
                         'rotation orders. Stopping')
    elif "ZYX" not in kwargs and 'XYZ' not in kwargs:
        XYZ = 1  # default is rotation around X, then Y, then Z
        # ZYX = 0
    elif "ZYX" in kwargs:
        # ZYX = 1
        XYZ = 0
    elif "XYZ" in kwargs:
        XYZ = 1
        # ZYX = 0

    if "XTILT" in kwargs:
        xtilt = kwargs["XTILT"]
    else:
        xtilt = 0.

    if "YTILT" in kwargs:
        ytilt = kwargs["YTILT"]
    else:
        ytilt = 0.

    if "ZTILT" in kwargs:
        ztilt = kwargs["ZTILT"]
    else:
        ztilt = 0.

    if type(dm_z0) == str:
        dm_z = proper.prop_fits_read(dm_z0)  # Read DM setting from FITS file
    else:
        dm_z = dm_z0
    
    if "inf_fn" in kwargs:
        inf_fn = kwargs["inf_fn"]
    else:
        inf_fn = "influence_dm5v2.fits"
        
    if "inf_sign" in kwargs:
        if(kwargs["inf_sign"] == '+'):
            sign_factor = 1.
        elif(kwargs["inf_sign"] == '-'):
            sign_factor = -1.
    else:
        sign_factor = 1.

    n = proper.prop_get_gridsize(wf)
    dx_surf = proper.prop_get_sampling(wf)  # sampling of surface in meters
    beamradius = proper.prop_get_beamradius(wf)

    # Default influence function sampling is 0.1 mm, peak at (x,y)=(45,45)
    # Default influence function has shape = 1x91x91. Saving it as a 2D array
    # before continuing with processing
    dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            "data")
    inf = proper.prop_fits_read(os.path.join(dir_path, inf_fn))
    inf = sign_factor*np.squeeze(inf)
    
    s = inf.shape
    nx_inf = s[1]
    ny_inf = s[0]
    xc_inf = nx_inf // 2
    yc_inf = ny_inf // 2
    dx_inf = 0.1e-3  # influence function spacing in meters
    dx_dm_inf = 1.0e-3  # nominal spacing between DM actuators in meters
    inf_mag = 10

    if spacing != 0 and "N_ACT_ACROSS_PUPIL" in kwargs:
        raise ValueError("PROP_DM: User cannot specify both actuator spacing" +
                         "and N_ACT_ACROSS_PUPIL. Stopping.")

    if spacing == 0 and "N_ACT_ACROSS_PUPIL" not in kwargs:
        raise ValueError("PROP_DM: User must specify either actuator spacing" +
                         " or N_ACT_ACROSS_PUPIL. Stopping.")

    if "N_ACT_ACROSS_PUPIL" in kwargs:
        dx_dm = 2. * beamradius / int(kwargs["N_ACT_ACROSS_PUPIL"])
    else:
        dx_dm = spacing

    dx_inf = dx_inf * dx_dm / dx_dm_inf  # Influence function sampling scaled
                                         # to specified DM actuator spacing

    if "FIT" in kwargs:
        x = (np.arange(5, dtype=np.float64) - 2) * dx_dm

        if proper.use_cubic_conv:
            inf_kernel = proper.prop_cubic_conv(inf.T, x/dx_inf+xc_inf,
                                                x/dx_inf+yc_inf, GRID=True)
        else:
            xygrid = np.meshgrid(x/dx_inf+xc_inf, x/dx_inf+yc_inf)
            inf_kernel = map_coordinates(inf.T, xygrid, order=3,
                                         mode="nearest")

        (dm_z_commanded, dms) = proper.prop_fit_dm(dm_z, inf_kernel)
    else:
        dm_z_commanded = dm_z

    s = dm_z.shape
    nx_dm = s[1]
    ny_dm = s[0]

    # Create subsampled DM grid
    margin = 9 * inf_mag
    nx_grid = nx_dm * inf_mag + 2 * margin
    ny_grid = ny_dm * inf_mag + 2 * margin
    xoff_grid = margin + inf_mag/2           # pixel location of 1st actuator center in subsampled grid
    yoff_grid = xoff_grid
    dm_grid = np.zeros([ny_grid, nx_grid], dtype = np.float64)

    x = np.arange(nx_dm, dtype=int) * int(inf_mag) + int(xoff_grid)
    y = np.arange(ny_dm, dtype=int) * int(inf_mag) + int(yoff_grid)
    dm_grid[np.tile(np.vstack(y), (nx_dm,)),
            np.tile(x, (ny_dm, 1))] = dm_z_commanded
    dm_grid = ss.fftconvolve(dm_grid, inf, mode='same')
    
    # 3D rotate DM grid and project orthogonally onto wavefront
    xdim = int(np.round(np.sqrt(2) * nx_grid * dx_inf / dx_surf)) # grid dimensions (pix) projected onto wavefront
    ydim = int(np.round(np.sqrt(2) * ny_grid * dx_inf / dx_surf))

    if xdim > n: xdim = n

    if ydim > n: ydim = n

    x = np.ones((ydim, 1), dtype=int) * ((np.arange(xdim) - xdim // 2) * dx_surf)
    y = (np.ones((xdim, 1), dtype=int) * ((np.arange(ydim) - ydim // 2) * dx_surf)).T

    a = xtilt * np.pi / 180
    b = ytilt * np.pi / 180
    g = ztilt * np.pi /180

    if XYZ:
        m = np.array([[cos(b)*cos(g), -cos(b)*sin(g), sin(b), 0],
            [cos(a)*sin(g) + sin(a)*sin(b)*cos(g), cos(a)*cos(g)-sin(a)*sin(b)*sin(g), -sin(a)*cos(b), 0],
            [sin(a)*sin(g)-cos(a)*sin(b)*cos(g), sin(a)*cos(g)+cos(a)*sin(b)*sin(g), cos(a)*cos(b), 0],
            [0, 0, 0, 1] ])
    else:
        m = np.array([	[cos(b)*cos(g), cos(g)*sin(a)*sin(b)-cos(a)*sin(g), cos(a)*cos(g)*sin(b)+sin(a)*sin(g), 0],
            [cos(b)*sin(g), cos(a)*cos(g)+sin(a)*sin(b)*sin(g), -cos(g)*sin(a)+cos(a)*sin(b)*sin(g), 0],
            [-sin(b), cos(b)*sin(a), cos(a)*cos(b), 0],
            [0, 0, 0, 1] ])

    # Forward project a square
    edge = np.array([[-1.0, -1.0, 0.0, 0.0],
                     [1.0, -1.0, 0.0, 0.0],
                     [1.0, 1.0, 0.0, 0.0],
                     [-1.0, 1.0, 0.0, 0.0]])
    new_xyz = np.dot(edge, m)

    # determine backward projection for screen-raster-to-DM-surce computation
    dx_dxs = (new_xyz[0, 0] - new_xyz[1, 0]) / (edge[0, 0] - edge[1, 0])
    dx_dys = (new_xyz[1, 0] - new_xyz[2, 0]) / (edge[1, 1] - edge[2, 1])
    dy_dxs = (new_xyz[0, 1] - new_xyz[1, 1]) / (edge[0, 0] - edge[1, 0])
    dy_dys = (new_xyz[1, 1] - new_xyz[2, 1]) / (edge[1, 1] - edge[2, 1])

    xs = (x/dx_dxs - y*dx_dys/(dx_dxs*dy_dys)) / \
        (1 - dy_dxs*dx_dys/(dx_dxs*dy_dys))
    ys = (y/dy_dys - x*dy_dxs/(dx_dxs*dy_dys)) / \
        (1 - dx_dys*dy_dxs/(dx_dxs*dy_dys))

    xdm = (xs + dm_xc * dx_dm) / dx_inf + xoff_grid
    ydm = (ys + dm_yc * dx_dm) / dx_inf + yoff_grid

    if proper.use_cubic_conv:
        grid = proper.prop_cubic_conv(dm_grid.T, xdm, ydm, GRID = False)
        grid = grid.reshape([xdm.shape[1], xdm.shape[0]])
    else:
        grid = map_coordinates(dm_grid.T, [xdm, ydm], order=3,
                               mode="nearest", prefilter = True)

    dmap = np.zeros([n, n], dtype=np.float64)
    nx_grid, ny_grid = grid.shape
    xmin, xmax = n // 2 - xdim // 2, n // 2 - xdim // 2 + nx_grid
    ymin, ymax = n // 2 - ydim // 2, n // 2 - ydim // 2 + ny_grid
    dmap[ymin:ymax, xmin:xmax] = grid

    if "NO_APPLY" not in kwargs:
        proper.prop_add_phase(wf, 2 * dmap)  # convert surface to WFE

    return dmap
Esempio n. 32
0
    def rz2rho(self,
               r_in,
               z_in,
               t_in=None,
               coord_out='rho_pol',
               extrapolate=True):
        """Equilibrium mapping routine, map from R,Z -> rho (pol,tor,r_V,...)
           Fast for a large number of points

        Input
        ----------
        t_in : float or 1darray
            time
        r_in : ndarray
            R coordinates
            1D (time constant) or 2D+ (time variable) of size (nt,nx,...)
        z_in : ndarray
            Z coordinates
            1D (time constant) or 2D+ (time variable) of size (nt,nx,...)
        coord_out: str
            mapped coordinates - rho_pol,  rho_tor, r_V, rho_V, Psi
        extrapolate: bool
            extrapolate coordinates (like rho_tor) for values larger than 1

        Output
        -------
        rho : 2D+ array (nt,nx,...)
        Magnetics flux coordinates of the points

        """

        if not self.eq_open:
            return

        if self.debug: print(('Remapping from {R, z} to %s' % coord_out))

        if t_in is None:
            t_in = self.t_eq
        tarr = np.atleast_1d(t_in)
        r_in = np.atleast_2d(r_in)
        z_in = np.atleast_2d(z_in)

        dr = (self.Rmesh[-1] - self.Rmesh[0]) / (len(self.Rmesh) - 1)
        dz = (self.Zmesh[-1] - self.Zmesh[0]) / (len(self.Zmesh) - 1)

        nt_in = np.size(tarr)
        if r_in.shape != z_in.shape:
            raise Exception( 'Not equal shape of z_in and r_in %s,%s'\
                            %(str(z_in.shape), str(z_in.shape)) )

        if np.size(r_in, 0) != nt_in and np.size(r_in, 0) != 1:
            r_in = r_in[None]
            z_in = z_in[None]

        if np.size(r_in, 0) == 1:
            r_in = np.broadcast_to(r_in, (nt_in, ) + r_in.shape[1:])
            z_in = np.broadcast_to(z_in, (nt_in, ) + z_in.shape[1:])

        self._read_pfm()
        Psi = np.empty((nt_in, ) + r_in.shape[1:], dtype=np.single)

        scaling = np.array([dr, dz])
        offset = np.array([self.Rmesh[0], self.Zmesh[0]])

        unique_idx, idx = self._get_nearest_index(tarr)

        for i in unique_idx:
            jt = idx == i
            coords = np.array((r_in[jt], z_in[jt]))
            index = ((coords.T - offset) / scaling).T

            Psi[jt] = map_coordinates(self.pfm[i],
                                      index,
                                      mode='nearest',
                                      order=2,
                                      prefilter=True)

        rho_out = self.rho2rho(Psi,
                               t_in=t_in,
                               extrapolate=extrapolate,
                               coord_in='Psi',
                               coord_out=coord_out)

        return rho_out
Esempio n. 33
0
 def interpolate_tau(self, j, w, t, r, s):
     """Interpolate the optical depth grid."""
     idxs = self.indices(j, w, t, r, s)
     return map_coordinates(self.grid[j, 1], idxs, order=1, mode='nearest')
Esempio n. 34
0
def resample_data_or_seg(data,
                         new_shape,
                         is_seg,
                         axis=None,
                         order=3,
                         do_separate_z=False,
                         order_z=0):
    """
    separate_z=True will resample with order 0 along z
    :param data:
    :param new_shape:
    :param is_seg:
    :param axis:
    :param order:
    :param do_separate_z:
    :param cval:
    :param order_z: only applies if do_separate_z is True
    :return:
    """
    assert len(data.shape) == 4, "data must be (c, x, y, z)"
    if is_seg:
        resize_fn = resize_segmentation
        kwargs = OrderedDict()
    else:
        resize_fn = resize
        kwargs = {'mode': 'edge', 'anti_aliasing': False}
    dtype_data = data.dtype
    shape = np.array(data[0].shape)
    new_shape = np.array(new_shape)
    if np.any(shape != new_shape):
        data = data.astype(float)
        if do_separate_z:
            print("separate z, order in z is", order_z, "order inplane is",
                  order)
            assert len(axis) == 1, "only one anisotropic axis supported"
            axis = axis[0]
            if axis == 0:
                new_shape_2d = new_shape[1:]
            elif axis == 1:
                new_shape_2d = new_shape[[0, 2]]
            else:
                new_shape_2d = new_shape[:-1]

            reshaped_final_data = []
            for c in range(data.shape[0]):
                reshaped_data = []
                for slice_id in range(shape[axis]):
                    if axis == 0:
                        reshaped_data.append(
                            resize_fn(data[c, slice_id], new_shape_2d, order,
                                      **kwargs))
                    elif axis == 1:
                        reshaped_data.append(
                            resize_fn(data[c, :, slice_id], new_shape_2d,
                                      order, **kwargs))
                    else:
                        reshaped_data.append(
                            resize_fn(data[c, :, :, slice_id], new_shape_2d,
                                      order, **kwargs))
                reshaped_data = np.stack(reshaped_data, axis)
                if shape[axis] != new_shape[axis]:

                    # The following few lines are blatantly copied and modified from sklearn's resize()
                    rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
                    orig_rows, orig_cols, orig_dim = reshaped_data.shape

                    row_scale = float(orig_rows) / rows
                    col_scale = float(orig_cols) / cols
                    dim_scale = float(orig_dim) / dim

                    map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
                    map_rows = row_scale * (map_rows + 0.5) - 0.5
                    map_cols = col_scale * (map_cols + 0.5) - 0.5
                    map_dims = dim_scale * (map_dims + 0.5) - 0.5

                    coord_map = np.array([map_rows, map_cols, map_dims])
                    if not is_seg or order_z == 0:
                        reshaped_final_data.append(
                            map_coordinates(reshaped_data,
                                            coord_map,
                                            order=order_z,
                                            mode='nearest')[None])
                    else:
                        unique_labels = np.unique(reshaped_data)
                        reshaped = np.zeros(new_shape, dtype=dtype_data)

                        for i, cl in enumerate(unique_labels):
                            reshaped_multihot = np.round(
                                map_coordinates(
                                    (reshaped_data == cl).astype(float),
                                    coord_map,
                                    order=order_z,
                                    mode='nearest'))
                            reshaped[reshaped_multihot > 0.5] = cl
                        reshaped_final_data.append(reshaped[None])
                else:
                    reshaped_final_data.append(reshaped_data[None])
            reshaped_final_data = np.vstack(reshaped_final_data)
        else:
            print("no separate z, order", order)
            reshaped = []
            for c in range(data.shape[0]):
                reshaped.append(
                    resize_fn(data[c], new_shape, order, **kwargs)[None])
            reshaped_final_data = np.vstack(reshaped)
        return reshaped_final_data.astype(dtype_data)
    else:
        print("no resampling necessary")
        return data
 def _map(input, indices, shape):
     return np.expand_dims(map_coordinates(input, indices,
                                           order=1).reshape(shape),
                           axis=0)
Esempio n. 36
0
def Find_Boundaries(distmap,
                    S_features,
                    gaussian_size=0.25,
                    lower_ind_thres=-5,
                    make_plot=True):
    """Primary algorithm to find domain boundaries
    Inputs:
        distmap: distance map for a chromosome, 2d-array
        S_features: tuple or list of features, list or tuple of 2d-array
        gaussian_size: sigma for gaussian filter applied to features to better call local maximum, float
        lower_ind_thres: lower boundary for accepted indices along off-diagonal lines, int
        make_plot: whether make plots, bool
    Outputs:
        selected_pk_coords: selected peaks in feature maps, which corresponds to domain boundaries, 1d-array
    """
    from scipy.ndimage.interpolation import map_coordinates
    from scipy.signal import find_peaks
    #from astropy.convolution import Gaussian2DKernel,convolve

    dim = np.shape(distmap)[0]
    # genrate coordinates for line i+x, i+x/2 which arrow edges align:
    start_ind = np.arange(-int(dim / 2), dim)
    coord_list = [
        np.stack([
            np.arange(np.abs(i), dim),
            max(0, i) / 2 + np.arange(max(0, i), dim + min(0, i)) / 2
        ]) for i in start_ind
    ]
    # set gaussian kernel
    #kernel = Gaussian2DKernel(x_stddev=gaussian_size)
    # initialize feature ids
    feature_list = []
    for feature_id in range(2):
        # gaussian filter this map
        if gaussian_size:
            feature_map = convolve(S_features[feature_id], kernel)
        else:
            feature_map = S_features[feature_id]
        # extract arrow lines
        arrow_lines = [
            map_coordinates(feature_map, _coords) for _coords in coord_list
        ]
        # calculate mean to find local maximum
        arrow_line_means = np.array(
            [np.mean(arrline) for arrline in arrow_lines])
        # calculate peaks for meean behavior line
        feature_line_ids = find_peaks(
            arrow_line_means, distance=3,
            width=2)[0]  # this step is better to be more rigorious
        feature_line_ids = feature_line_ids[
            start_ind[feature_line_ids] > lower_ind_thres]
        feature_list.append(feature_line_ids)
        # plot selected lines
        #plt.figure()
        #plt.plot(start_ind, arrow_line_means)
        #plt.plot(start_ind[feature_line_ids], arrow_line_means[feature_line_ids], 'ro')
        #plt.show()
    # select shared feature_ids
    selected_ids = []
    for _id in feature_list[0]:
        if sum(np.abs(feature_list[1] - _id) <= 1) > 0:
            _local_ids = feature_list[1][np.abs(feature_list[1] - _id) <= 1]
            _local_ids = np.concatenate([[_id], _local_ids])
            selected_ids.append(np.min(_local_ids))
    selected_ids = np.array(selected_ids)
    if len(selected_ids) == 0:
        return np.array([])
    # selected ids plus +-1 lines
    feature_map = convolve(S_features[1], kernel)
    selected_coords = [
        coord_list[_i]
        for _i in np.unique([selected_ids, selected_ids - 1, selected_ids + 1])
    ]
    selected_lines = [
        map_coordinates(feature_map, _coords) for _coords in selected_coords
    ]
    # call peaks
    pks = [
        find_peaks(_line, distance=2, width=2)[0] for _line in selected_lines
    ]
    pk_coords = np.sort(
        np.concatenate(
            [_coord[0, _pk] for _coord, _pk in zip(selected_coords, pks)]))
    # select into connected groups
    selected_groups = []
    _group = []
    for _i, _c in enumerate(pk_coords):
        if len(_group) == 0:
            _group.append(_c)
        elif sum(np.abs(np.array(_group) - _c) <= 1) >= 1:
            _group.append(_c)
            np.delete(pk_coords, _i)
        else:
            if len(_group) > 1:
                selected_groups.append(_group)
            _group = []
    # pick from connected groups
    group_size_th = 2
    selected_pk_coords = np.sort([
        int(np.round(np.mean(_group))) for _group in selected_groups
        if len(_group) >= group_size_th
    ])
    if make_plot:
        plt.figure()
        plt.imshow(distmap, cmap='seismic_r', vmin=0, vmax=1000)
        plt.colorbar()
        plt.title("input distance map")
        edges = [0] + list(selected_pk_coords) + [dim]
        for _i, _c in enumerate(edges[:-1]):
            plt.plot(np.arange(_c, edges[_i + 1]),
                     np.ones(edges[_i + 1] - _c) * _c,
                     color='y',
                     linewidth=3.0)
            plt.plot(np.ones(edges[_i + 1] - _c) * edges[_i + 1],
                     np.arange(_c, edges[_i + 1]),
                     color='y',
                     linewidth=3.0)
        plt.xlim([0, dim])
        plt.show()

    return selected_pk_coords
Esempio n. 37
0
def extrapolate(
    precip,
    velocity,
    timesteps,
    outval=np.nan,
    xy_coords=None,
    allow_nonfinite_values=False,
    vel_timestep=1,
    **kwargs,
):
    """Apply semi-Lagrangian backward extrapolation to a two-dimensional
    precipitation field.

    Parameters
    ----------
    precip: array-like or None
        Array of shape (m,n) containing the input precipitation field. All
        values are required to be finite by default. If set to None, only the
        displacement field is returned without interpolating the inputs. This
        requires that return_displacement is set to True.
    velocity: array-like
        Array of shape (2,m,n) containing the x- and y-components of the m*n
        advection field. All values are required to be finite by default.
    timesteps: int or list of floats
        If timesteps is integer, it specifies the number of time steps to
        extrapolate. If a list is given, each element is the desired
        extrapolation time step from the current time. The elements of the list
        are required to be in ascending order.
    outval: float, optional
        Optional argument for specifying the value for pixels advected from
        outside the domain. If outval is set to 'min', the value is taken as
        the minimum value of precip.
        Default: np.nan
    xy_coords: ndarray, optional
        Array with the coordinates of the grid dimension (2, m, n ).

        * xy_coords[0]: x coordinates
        * xy_coords[1]: y coordinates

        By default, the *xy_coords* are computed for each extrapolation.
    allow_nonfinite_values: bool, optional
        If True, allow non-finite values in the precipitation and advection
        fields. This option is useful if the input fields contain a radar mask
        (i.e. pixels with no observations are set to nan).

    Other Parameters
    ----------------

    displacement_prev: array-like
        Optional initial displacement vector field of shape (2,m,n) for the
        extrapolation.
        Default: None
    n_iter: int
        Number of inner iterations in the semi-Lagrangian scheme. If n_iter > 0,
        the integration is done using the midpoint rule. Otherwise, the advection
        vectors are taken from the starting point of each interval.
        Default: 1
    return_displacement: bool
        If True, return the displacement between the initial input field and
        the one obtained by integrating along the advection field.
        Default: False
    vel_timestep: float
        The time step of the velocity field. It is assumed to have the same
        unit as the timesteps argument. Applicable if timeseps is a list.
        Default: 1.
    interp_order: int
        The order of interpolation to use. Default: 1 (linear). Setting this
        to 0 (nearest neighbor) gives the best computational performance but
        may produce visible artefacts. Setting this to 3 (cubic) gives the best
        ability to reproduce small-scale variability but may significantly
        increase the computation time.

    Returns
    -------
    out: array or tuple
        If return_displacement=False, return a time series extrapolated fields
        of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
        extrapolated fields and the integrated trajectory (displacement) along
        the advection field.

    References
    ----------
    :cite:`GZ2002`

    """
    if precip is not None and precip.ndim != 2:
        raise ValueError("precip must be a two-dimensional array")

    if velocity.ndim != 3:
        raise ValueError("velocity must be a three-dimensional array")

    if precip is not None and not allow_nonfinite_values:
        if np.any(~np.isfinite(precip)):
            raise ValueError("precip contains non-finite values")

        if np.any(~np.isfinite(velocity)):
            raise ValueError("velocity contains non-finite values")

    if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
        raise ValueError("timesteps is not in ascending order")

    # defaults
    verbose = kwargs.get("verbose", False)
    displacement_prev = kwargs.get("displacement_prev", None)
    n_iter = kwargs.get("n_iter", 1)
    return_displacement = kwargs.get("return_displacement", False)
    interp_order = kwargs.get("interp_order", 1)

    if precip is None and not return_displacement:
        raise ValueError("precip is None but return_displacement is False")

    if "D_prev" in kwargs.keys():
        warnings.warn(
            "deprecated argument D_prev is ignored, use displacement_prev instead",
        )

    # if interp_order > 1, apply separate masking to preserve nan and
    # non-precipitation values
    if precip is not None and interp_order > 1:
        minval = np.nanmin(precip)
        mask_min = (precip > minval).astype(float)
        if allow_nonfinite_values:
            mask_finite = np.isfinite(precip)
            precip = precip.copy()
            precip[~mask_finite] = 0.0
            mask_finite = mask_finite.astype(float)

    prefilter = True if interp_order > 1 else False

    if isinstance(timesteps, int):
        timesteps = np.arange(1, timesteps + 1)
        vel_timestep = 1.0
    elif np.any(np.diff(timesteps) <= 0.0):
        raise ValueError(
            "the given timestep sequence is not monotonously increasing")

    timestep_diff = np.hstack([[timesteps[0]], np.diff(timesteps)])

    if verbose:
        print("Computing the advection with the semi-lagrangian scheme.")
        t0 = time.time()

    if precip is not None and outval == "min":
        outval = np.nanmin(precip)

    if xy_coords is None:
        x_values, y_values = np.meshgrid(np.arange(velocity.shape[2]),
                                         np.arange(velocity.shape[1]))

        xy_coords = np.stack([x_values, y_values])

    def interpolate_motion(displacement, velocity_inc, td):
        coords_warped = xy_coords + displacement
        coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]

        velocity_inc_x = ip.map_coordinates(
            velocity[0, :, :],
            coords_warped,
            mode="nearest",
            order=1,
            prefilter=False,
        )
        velocity_inc_y = ip.map_coordinates(
            velocity[1, :, :],
            coords_warped,
            mode="nearest",
            order=1,
            prefilter=False,
        )

        velocity_inc[0, :, :] = velocity_inc_x
        velocity_inc[1, :, :] = velocity_inc_y

        if n_iter > 1:
            velocity_inc /= n_iter

        velocity_inc *= td / vel_timestep

    precip_extrap = []
    if displacement_prev is None:
        displacement = np.zeros((2, velocity.shape[1], velocity.shape[2]))
        velocity_inc = velocity.copy() * timestep_diff[0] / vel_timestep
    else:
        displacement = displacement_prev.copy()
        velocity_inc = np.empty(velocity.shape)
        interpolate_motion(displacement, velocity_inc, timestep_diff[0])

    for ti, td in enumerate(timestep_diff):
        if n_iter > 0:
            for k in range(n_iter):
                interpolate_motion(displacement - velocity_inc / 2.0,
                                   velocity_inc, td)
                displacement -= velocity_inc
                interpolate_motion(displacement, velocity_inc, td)
        else:
            if ti > 0 or displacement_prev is not None:
                interpolate_motion(displacement, velocity_inc, td)

            displacement -= velocity_inc

        coords_warped = xy_coords + displacement
        coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]

        if precip is not None:
            precip_warped = ip.map_coordinates(
                precip,
                coords_warped,
                mode="constant",
                cval=outval,
                order=interp_order,
                prefilter=prefilter,
            )

            if interp_order > 1:
                mask_warped = ip.map_coordinates(
                    mask_min,
                    coords_warped,
                    mode="constant",
                    cval=0,
                    order=1,
                    prefilter=False,
                )
                precip_warped[mask_warped < 0.5] = minval

                if allow_nonfinite_values:
                    mask_warped = ip.map_coordinates(
                        mask_finite,
                        coords_warped,
                        mode="constant",
                        cval=0,
                        order=1,
                        prefilter=False,
                    )
                    precip_warped[mask_warped < 0.5] = np.nan

            precip_extrap.append(np.reshape(precip_warped, precip.shape))

    if verbose:
        print("--- %s seconds ---" % (time.time() - t0))

    if precip is not None:
        if not return_displacement:
            return np.stack(precip_extrap)
        else:
            return np.stack(precip_extrap), displacement
    else:
        return None, displacement
Esempio n. 38
0
    def interpolate(self,
                    l,
                    m,
                    time=None,
                    freq=None,
                    freqaxis=None,
                    output=None):
        """Interpolates l/m coordinates in the beam.
    l,m may be arrays (both must be the same shape, or will be promoted to the same shape)

    If beam has a freq dependence, then an array of frequency coordinates (freq) must be given,
    and freqaxis must be set to the number of the frequency axis. Then the following
    possibilities apply:

    (A) len(freq)==1  (freqaxis need not be set)
        l/m is interpolated at the same frequency point.
        Output array is same shape as l/m.

    (B) len(freq)>1 and l.shape[freqaxis] == 1:
        The same l/m is interpolated at every point in freq.
        Output array is same shape as l/m, plus an extra frequency axis (number freqaxis)

    (C) len(freq)>1 and l.shape[freqaxis] == len(freq)
        l/m has its own freq dependence, so a different l/m/freq is interpolated at every point.
        Output array is same shape as l/m.

    And finally (D):

    (D) No dependence on frequency in the beam.
        We simply interpolate every l/m value as is. Output array is same shape as l/m.

    'time' is currently ignored -- provided for later compatibility (i.e. beams with time planes)
    """
        # make sure inputs are arrays
        l = numpy.array(l) + self.l0
        m = numpy.array(m) + self.m0
        freq = numpy.array(freq)

        # promote l,m to the same shape
        l, m = unite_shapes(l, m)
        dprint(3, "input l/m is", l, m)
        l, m = self.beam.lmToBeam()
        dprint(3, "in beam coordinates this is", l, m)
        # now we make a 2xN coordinate array for map_coordinates
        # lm[0,:] will be flattened L array, lm[1,:] will be flattened M array
        # lm[2,:] will be flattened freq array (if we have a freq dependence)
        # Do we have a frequency axis in the beam? (case A,B,C):
        if self.beam.hasFrequencyAxis():
            if freq is None:
                raise ValueError, "frequencies not specified, but beam has a frequency dependence"
            freq = numpy.array(freq)
            if not freq.ndim:
                freq = freq.reshape(1)
                print freq
            freq = self.beam.freqToBeam(freq)
            # case (A): reuse same frequency for every l/m point
            if len(freq) == 1:
                lm = numpy.vstack((l.ravel(), m.ravel(), [freq[0]] * l.size))
            # case B/C:
            else:
                # first turn freq vector into an array of the proper shape
                if freqaxis is None:
                    raise ValueError, "frequency axis not specified, but beam has a frequency dependence"
                freqshape = [1] * (freqaxis + 1)
                freqshape[freqaxis] = len(freq)
                freq = freq.reshape(freqshape)
                # now promote freq to same shape as l,m. This takes care of cases B and C.
                # (the freq axis of l/m will be expanded, and other axes of freq will be expanded)
                l, freq = unite_shapes(l, freq)
                m, freq = unite_shapes(m, freq)
                lm = numpy.vstack((l.ravel(), m.ravel(), freq.ravel()))
        # case (D): no frequency dependence in the beam
        else:
            lm = numpy.vstack((l.ravel(), m.ravel()))
        # interpolate and reshape back to shape of L
        if output is None:
            output = numpy.zeros(l.shape, complex)
        elif output.shape != l.shape:
            output.resize(l.shape)

        self.beam.interpolate(output, lm)

        return output

        output.real = interpolation.map_coordinates(
            self._beam_real,
            lm,
            order=self._spline_order,
            prefilter=(self._spline_order == 1)).reshape(l.shape)
        output.imag = interpolation.map_coordinates(
            self._beam_imag,
            lm,
            order=self._spline_order,
            prefilter=(self._spline_order == 1)).reshape(l.shape)
        if not self._beam_ampl is None:
            output_ampl = interpolation.map_coordinates(
                self._beam_ampl,
                lm,
                order=self._spline_order,
                prefilter=(self._spline_order == 1)).reshape(l.shape)
            phase_array = numpy.arctan2(output.imag, output.real)
            output.real = output_ampl * numpy.cos(phase_array)
            output.imag = output_ampl * numpy.sin(phase_array)
        dprint(3, "interpolated value is", output)
        return output
Esempio n. 39
0
def get_unknown_uncertainty(cloud_distances, transmission_values):
    """Calculates the unknown uncertainty term, which is part of the surface
       temperature uncertainty estimation.

    Args:
        cloud_distances <numpy.2darray>: distances to nearest cloud
        transmission_values <numpy.2darray>: transmission

    Returns:
        unknown_uncertainty <numpy.2darray>: interpolated unknown uncertainty
    """

    # Flatten the inputs.
    flat_cloud_distances = cloud_distances.flatten()
    flat_transmission_values = transmission_values.flatten()

    # Matrix of "unknown errors," which was calculated from observed and
    # predicted ST errors from the L7 global validation study.
    unknown_error_matrix = np.array([[2.3749, 2.6962, 2.5620, 2.1131],
                                     [1.9912, 1.6789, 1.4471, 1.2739],
                                     [1.7925, 1.0067, 0.9143, 0.6366],
                                     [1.9416, 1.3558, 0.7604, 0.6682],
                                     [1.3861, 0.8269, 0.7404, 0.3125]])

    # tau bins are 0.3 - 0.55, 0.55 - 0.7, 0.7 - 0.85, 0.85 - 1.0
    # cloud bins are 0 - 1 km, 1 - 5 km, 5 - 10 km, 10 - 40 km, 40 - inf
    #
    # tau_interp and cloud_interp should be a vector of the center values
    # for each bin, but we also want anything outside the entire range to be
    # equal to the nearest value from the unknown matrix.
    tau_interp = np.array([0.0, 0.425, 0.625, 0.775, 0.925, 1.0])
    cld_interp = np.array([0, 0.5, 3.0, 7.5, 25.0, 82.5, 200.0])

    # Define the highest values in the vectors. These are the last values.
    tau_highest = tau_interp[-1]
    cld_highest = cld_interp[-1]

    # From input transmission values, find closest indices from tau_interp
    # vector, calculate step in vector, then calculate fractional tau index.
    tau_close_index = np.searchsorted(tau_interp,
                                      flat_transmission_values,
                                      side='right')
    tau_close_index = tau_close_index - 1
    tau_step = tau_interp[tau_close_index + 1] - tau_interp[tau_close_index]
    tau_frac_index = tau_close_index + (
        (flat_transmission_values - tau_interp[tau_close_index]) / tau_step)
    one_locations = np.where(tau_frac_index == tau_highest)
    tau_frac_index[one_locations] = len(tau_interp) - 1

    # Memory cleanup
    del tau_interp
    del tau_close_index
    del tau_step
    del one_locations

    # From input cloud distance values, find closest indices from cld_interp
    # vector, calculate step in vector, then calculate fractional cloud index.
    cld_close_index = np.searchsorted(cld_interp,
                                      flat_cloud_distances,
                                      side='right')
    cld_close_index = cld_close_index - 1
    cld_step = cld_interp[cld_close_index + 1] - cld_interp[cld_close_index]
    cld_frac_index = cld_close_index + (
        (flat_cloud_distances - cld_interp[cld_close_index]) / cld_step)
    two_hundred_locations = np.where(cld_frac_index == cld_highest)
    cld_frac_index[two_hundred_locations] = len(cld_interp) - 1

    # Memory cleanup
    del cld_interp
    del cld_close_index
    del cld_step
    del two_hundred_locations

    # Merge the arrays so they represent coordinates in the unknown_error_matrix
    # grid to map_coordinates.
    coordinates = np.row_stack((cld_frac_index, tau_frac_index))

    # Memory cleanup
    del tau_frac_index
    del cld_frac_index

    # Interpolate the results at the specified coordinates.
    unknown_uncertainty = map_coordinates(unknown_error_matrix,
                                          coordinates,
                                          order=1,
                                          mode='nearest')

    # Memory cleanup
    del unknown_error_matrix
    del coordinates

    return unknown_uncertainty
Esempio n. 40
0
def elastic_transform_nd(image,
                         alpha,
                         sigma,
                         random_state=None,
                         order=1,
                         lazy=False):
    """Expects data to be (nx, ny, n1 ,..., nm)
    params:
    ------

    alpha:
    the scaling parameter.
    E.g.: alpha=2 => distorts images up to 2x scaling

    sigma:
    standard deviation of gaussian filter.
    E.g.
         low (sig~=1e-3) => no smoothing, pixelated.
         high (1/5 * imsize) => smooth, more like affine.
         very high (1/2*im_size) => translation
    """

    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    imsize = shape[:2]
    dim = shape[2:]

    # Random affine
    blur_size = int(4 * sigma) | 1
    dx = cv2.GaussianBlur(random_state.rand(*imsize) * 2 - 1,
                          ksize=(blur_size, blur_size),
                          sigmaX=sigma) * alpha
    dy = cv2.GaussianBlur(random_state.rand(*imsize) * 2 - 1,
                          ksize=(blur_size, blur_size),
                          sigmaX=sigma) * alpha

    # use as_strided to copy things over across n1...nn channels
    dx = as_strided(dx.astype(np.float32),
                    strides=(0, ) * len(dim) + (4 * shape[1], 4),
                    shape=dim + (shape[0], shape[1]))
    dx = np.transpose(dx, axes=(-2, -1) + tuple(range(len(dim))))

    dy = as_strided(dy.astype(np.float32),
                    strides=(0, ) * len(dim) + (4 * shape[1], 4),
                    shape=dim + (shape[0], shape[1]))
    dy = np.transpose(dy, axes=(-2, -1) + tuple(range(len(dim))))

    coord = np.meshgrid(
        *[np.arange(shape_i) for shape_i in (shape[1], shape[0]) + dim])
    indices = [
        np.reshape(e + de, (-1, 1))
        for e, de in zip([coord[1], coord[0]] + coord[2:], [dy, dx] +
                         [0] * len(dim))
    ]

    if lazy:
        return indices

    return map_coordinates(image, indices, order=order,
                           mode='reflect').reshape(shape)
Esempio n. 41
0
def applyDeformation(image,
                     field,
                     ref=None,
                     order=1,
                     mode=None,
                     cval=None,
                     premat=None):
    """Applies a :class:`DeformationField` to an :class:`.Image`.

    The image is transformed into the space of the field's reference image
    space. See the ``scipy.ndimage.interpolation.map_coordinates`` function
    for details on the ``order``, ``mode`` and ``cval`` options.

    If an alternate reference image is provided via the ``ref`` argument,
    the deformation field is resampled into its space, and then applied to
    the input image. It is therefore assumed that an alternate ``ref`` is
    aligned in world coordinates with the field's actual reference image.

    :arg image:  :class:`.Image` to be transformed

    :arg field:  :class:`DeformationField` to use

    :arg ref:    Alternate reference image - if not provided, ``field.ref``
                 is used

    :arg order:  Spline interpolation order, passed through to the
                 ``scipy.ndimage.affine_transform`` function - ``0``
                 corresponds to nearest neighbour interpolation, ``1``
                 (the default) to linear interpolation, and ``3`` to
                 cubic interpolation.

    :arg mode:   How to handle regions which are outside of the image FOV.
                 Defaults to `''nearest'``.

    :arg cval:   Constant value to use when ``mode='constant'``.

    :arg premat: Optional affine transform which can be used if ``image`` is
                 not in the same space as ``field.src``. Assumed to transform
                 from ``image`` **voxel** coordinates into ``field.src``
                 **voxel** coordinates.

    :return:     ``numpy.array`` containing the transformed image data.
    """

    if order is None: order = 1
    if mode is None: mode = 'nearest'
    if cval is None: cval = 0
    if ref is None: ref = field.ref

    # We need the field to contain
    # absolute source image voxel
    # coordinates
    field = convertDeformationSpace(field, 'voxel', 'voxel')
    if field.deformationType != 'absolute':
        field = DeformationField(convertDeformationType(field, 'absolute'),
                                 header=field.header,
                                 src=field.src,
                                 ref=field.ref,
                                 srcSpace='voxel',
                                 refSpace='voxel',
                                 defType='absolute')

    # If the field is not voxel-aligned
    # to the reference, we need to
    # resample the field itself into the
    # reference image space (assumed to
    # be world-aligned). If field and ref
    # are not not world  aligned, regions
    # of the field outside of the
    # reference image space will contain
    # -1s, so will be detected as out of
    # bounds by map_coordinates below.
    #
    # This will potentially result in
    # truncation at the field boundaries,
    # but there's nothing we can do about
    # that.
    src = field.src

    if not field.sameSpace(ref):
        field = resample.resampleToReference(field,
                                             ref,
                                             order=order,
                                             mode='constant',
                                             cval=-1)[0]
    else:
        field = field.data

    # If the input image is in a
    # different space to the field
    # source space, we need to
    # adjust the resampling matrix.
    # We assume world-world alignment
    # between the original source
    # and the image to be resampled
    if (premat is not None) or (not image.sameSpace(src)):
        if premat is None:
            premat = affine.concat(image.getAffine('world', 'voxel'),
                                   src.getAffine('voxel', 'world'))
        else:
            premat = affine.invert(premat)
        shape = field.shape
        field = field.reshape((-1, 3))
        field = affine.transform(field, premat)
        field = field.reshape(shape)

    field = field.transpose((3, 0, 1, 2))
    return ndinterp.map_coordinates(image.data,
                                    field,
                                    order=order,
                                    mode=mode,
                                    cval=cval)
Esempio n. 42
0
def sample_cartesian2hexa(f_cartesian, fill_value=0.):

    # A square integer meshgrid converted to hexagonal coordinates results in a parallelogram-shaped section
    # from the hexagonal lattice. In our hexagonal grid, the shape looks like /__/ (skewed in the x / n2 direction)
    # In order to make sure no information in the image is lost, we make sure the paralellogram encloses the square.
    # The height of the parallelogram (size along n1 dimension) is therefore equal to the height of the square,
    # while the width of the parallelogram (at any height) is equal to the height times (1 + 1. / sqrt(3)).
    # This factor can easily be seen by making a drawing and noting that the difference y between the length of the
    # horizontal side of the parallelogram and the side x of the inscribed square is
    # y = x / tan(pi / 3) = x / sqrt(3)

    alpha = 1 + 1. / np.sqrt(3)

    # Map the corners of the image (in Cartesian coordinates) to hexagonal coordinates
    n1_00, n2_00 = cartesian2hexa(x=0, y=0)
    n1_10, n2_10 = cartesian2hexa(x=f_cartesian.shape[-1] - 1, y=0)
    n1_01, n2_01 = cartesian2hexa(x=0, y=f_cartesian.shape[-2] - 1)
    n1_11, n2_11 = cartesian2hexa(x=f_cartesian.shape[-1] - 1,
                                  y=f_cartesian.shape[-2] - 1)

    min_n1 = np.floor(np.min([n1_00, n1_10, n1_01, n1_11]))
    max_n1 = np.ceil(np.max([n1_00, n1_10, n1_01, n1_11]))
    min_n2 = np.floor(np.min([n2_00, n2_10, n2_01, n2_11]))
    max_n2 = np.ceil(np.max([n2_00, n2_10, n2_01, n2_11]))
    n1 = np.arange(min_n1, max_n1 + 1)
    n2 = np.arange(min_n2, max_n2 + 1)
    n1, n2 = np.meshgrid(n1, n2)
    """min_n1 = np.min([n1_00, n1_10, n1_01, n1_11])
    max_n1 = np.max([n1_00, n1_10, n1_01, n1_11])
    min_n2 = np.min([n2_00, n2_10, n2_01, n2_11])
    max_n2 = np.max([n2_00, n2_10, n2_01, n2_11])

    import matplotlib.pyplot as plt
    fig = plt.figure()
    plt.scatter([min_n1, min_n1, max_n1, max_n1], [min_n2, max_n2, min_n2, max_n2])

    n1_sz = np.ceil(max_n1 - min_n1) + 1
    n1 = np.linspace(start=min_n1, stop=min_n1 + n1_sz - 1, num=n1_sz, endpoint=True)
    n2_sz = np.ceil(max_n2 - min_n2)
    n2 = np.linspace(start=min_n2, stop=min_n2 + n2_sz - 1, num=n2_sz, endpoint=True)
    n1, n2 = np.meshgrid(n1, n2)"""

    # The lower-right corner of the image (highest x and y coordinate), in hexagonal coordinates
    # sz1, sz2 = cartesian2hexa(x=f_cartesian.shape[-1], y=f_cartesian.shape[-2])

    # sz1 = np.ceil(alpha * f_cartesian.shape[-2])
    # sz2 = f_cartesian.shape[-1]

    # # n1, n2 = np.meshgrid(np.arange(0, f_cartesian.shape[-2]), np.arange(0, f_cartesian.shape[-1]))
    # n1, n2 = np.meshgrid(np.arange(sz1), np.arange(sz2))
    # # n1, n2 = centered_meshgrid(sz1, sz2)
    x, y = hexa2cartesian(n1, n2)
    # x -= sz1 - f_cartesian.shape[-2]

    xi = np.c_[y[..., None], x[..., None]].astype(f_cartesian.dtype)
    # f_hex = interpn(
    #         (np.arange(0, f_cartesian.shape[-2]), np.arange(0, f_cartesian.shape[-1])),
    #         f_cartesian,
    #         xi,
    #         method='nearest', # 'splinef2d',
    #         bounds_error=False,
    #         fill_value=fill_value
    # )
    from scipy.ndimage.interpolation import map_coordinates
    f_hex = map_coordinates(input=f_cartesian,
                            coordinates=xi.T,
                            order=5,
                            mode='constant',
                            cval=fill_value).T

    # return f_hex, n1, n2, x, y
    return f_hex
Esempio n. 43
0
def correct_fov_image(dax_filename, sel_channels, 
                      load_file_lock=None,
                      single_im_size=_image_size, all_channels=_allowed_colors,
                      num_buffer_frames=10, num_empty_frames=0, 
                      drift=None, calculate_drift=False, 
                      drift_channel='488', ref_filename=None,  
                      use_autocorr=True, drift_args={},
                      corr_channels=_corr_channels, correction_folder=_correction_folder,
                      warp_image=True, 
                      hot_pixel_corr=True, hot_pixel_th=4, z_shift_corr=False,
                      illumination_corr=True, illumination_profile=None, 
                      bleed_corr=True, bleed_profile=None, 
                      chromatic_ref_channel='647', chromatic_corr=True, chromatic_profile=None, 
                      gaussian_highpass=False, gauss_sigma=5, gauss_truncate=2,
                      normalization=False, output_dtype=np.uint16,
                      return_drift=False, verbose=True):
    """Function to correct one whole field-of-view image in proper manner
    Inputs:
        
    Outputs:
        """
    ## check inputs
    # dax_filename
    if not os.path.isfile(dax_filename):
        raise IOError(f"Dax file: {dax_filename} is not a file, exit!")
    if not isinstance(dax_filename, str) or dax_filename[-4:] != '.dax':
        raise IOError(f"Dax file: {dax_filename} has wrong data type, exit!")
    if verbose:
        print(f"- correct the whole fov for image: {dax_filename}")
        _total_start = time.time()
    # selected channels
    if isinstance(sel_channels, str) or isinstance(sel_channels, int):
        sel_channels = [str(sel_channels)]
    else:
        sel_channels = [str(ch) for ch in sel_channels]
    # shared parameters
    single_im_size = np.array(single_im_size, dtype=np.int)
    all_channels = [str(ch) for ch in all_channels]
    num_buffer_frames = int(num_buffer_frames)
    num_empty_frames = int(num_empty_frames)
    # drift
    if drift is None:
        drift = np.zeros(len(single_im_size), dtype=np.float32)
    else:
        drift = np.array(drift, dtype=np.float32)
    if len(drift) != len(single_im_size):
        raise IndexError(f"drift should have the same dimension as single_im_size.")
    
    ## correction channels and profiles
    corr_channels = [str(ch) for ch in sorted(corr_channels, key=lambda v:-int(v)) if str(ch) in all_channels]    
    for _ch in corr_channels:
        if _ch not in all_channels:
            raise ValueError(f"Wrong correction channel:{_ch}, should be within {all_channels}")
    
    ## determine loaded channels
    _overlap_channels = [_ch for _ch in corr_channels if _ch in sel_channels] # channels needs be corrected by bleedthrough/chromatic
    if len(_overlap_channels) > 0 and bleed_corr:
        _load_channels = [_ch for _ch in corr_channels]
    else:
        _load_channels = []
    # append sel_channels
    for _ch in sel_channels:
        if _ch not in _load_channels:
            _load_channels.append(_ch)
    # append bead_image if going to do drift corr
    _drift_channel = str(drift_channel)
    if _drift_channel not in all_channels:
        raise ValueError(f"Wrong input of drift_channel:{_drift_channel}, should be among {all_channels}")
    if calculate_drift and drift_channel not in _load_channels:
        _load_channels.append(drift_channel)
    
    ## check profiles
    # load illumination profiles for selected channels (should exist for all include beads)
    if illumination_corr:
        if illumination_profile is None:
            illumination_profile = load_correction_profile('illumination', 
                                    corr_channels=_load_channels, 
                                    correction_folder=correction_folder, all_channels=all_channels,
                                    ref_channel=chromatic_ref_channel, 
                                    im_size=single_im_size, 
                                    verbose=verbose)
        else:
            if not isinstance(illumination_profile, dict):
                raise TypeError(f"Wrong input type of illumination_profile, should be dict!")
            for _ch in _load_channels:
                if _ch not in illumination_profile:
                    raise KeyError(f"channel:{_ch} not given in illumination_profile")
    # load bleedthrough profiles if required
    if bleed_corr and len(_overlap_channels) > 0:
        if bleed_profile is None:
            bleed_profile = load_correction_profile('bleedthrough', 
                                corr_channels=corr_channels, 
                                correction_folder=correction_folder, all_channels=all_channels,
                                ref_channel=chromatic_ref_channel, im_size=single_im_size, verbose=verbose)
        else:
            bleed_profile = np.array(bleed_profile, dtype=np.float32)
            if bleed_profile.shape != (len(corr_channels),len(corr_channels),single_im_size[-2], single_im_size[-1]) and bleed_profile.shape != tuple([len(corr_channels),len(corr_channels)]+list(single_im_size)):
                raise IndexError(f"Wrong input shape for bleed_profile: {bleed_profile.shape}, should be {(len(corr_channels),len(corr_channels),single_im_size[-2], single_im_size[-1])}")
    # load chromatic or chromatic_constants depends on whether do warpping
    if chromatic_corr and len(_overlap_channels) > 0:
        if chromatic_profile is None:
            if warp_image:
                chromatic_profile = load_correction_profile('chromatic', 
                                        corr_channels=corr_channels, 
                                        correction_folder=correction_folder, all_channels=all_channels,
                                        ref_channel=chromatic_ref_channel, 
                                        im_size=single_im_size, 
                                        verbose=verbose)
            else:
                chromatic_profile = load_correction_profile('chromatic_constants',      
                                        corr_channels=corr_channels, 
                                        correction_folder=correction_folder, all_channels=all_channels,
                                        ref_channel=chromatic_ref_channel, 
                                        im_size=single_im_size, 
                                        verbose=verbose)
        else:
            if not isinstance(chromatic_profile, dict):
                raise TypeError(f"Wrong input type of chromatic_profile, should be dict!")
            for _ch in _load_channels:
                if _ch in corr_channels and _ch not in chromatic_profile:
                    raise KeyError(f"channel:{_ch} not given in chromatic_profile")

    ## check output data-type
    # if normalization, output should be float
    if normalization and output_dtype==np.uint16:
        output_dtype = np.float32 
    # otherwise keep original dtype
    else:
        pass
    ## Load image
    if verbose:
        print(f"-- loading image from file:{dax_filename}", end=' ')
        _load_time = time.time()
    if 'DaxReader' not in locals():
        from ..visual_tools import DaxReader
    if 'load_file_lock' in locals() and load_file_lock is not None:
        load_file_lock.acquire()
    _reader = DaxReader(dax_filename, verbose=verbose)
    _raw_im = _reader.loadAll()
    _reader.close()
    if 'load_file_lock' in locals() and load_file_lock is not None:
        load_file_lock.release()
    # get number of colors and frames
    #from ..get_img_info import get_num_frame, split_channels
    _full_im_shape, _num_color = get_num_frame(dax_filename,
                                               frame_per_color=single_im_size[0],
                                               buffer_frame=num_buffer_frames, 
                                               empty_frame=num_empty_frames)
    _ims = split_im_by_channels(_raw_im, _load_channels, all_channels[:_num_color], 
                                single_im_size=single_im_size, 
                                num_buffer_frames=num_buffer_frames,
                                num_empty_frames=num_empty_frames, skip_frame0=False)
    # clear memory
    del(_raw_im)
    if verbose:
        print(f" in {time.time()-_load_time:.3f}s")

    ## hot-pixel removal
    if hot_pixel_corr:
        if verbose:
            print(f"-- removing hot pixels for channels:{_load_channels}", end=' ')
            _hot_time = time.time()
        # loop through and correct
        for _i, (_ch, _im) in enumerate(zip(_load_channels, _ims)):
            _nim = corrections.Remove_Hot_Pixels(_im.astype(np.float32),
                dtype=output_dtype, hot_th=hot_pixel_th)
            _ims[_i] = _nim
        if verbose:
            print(f"in {time.time()-_hot_time:.3f}s")

    ## Z-shift correction
    if z_shift_corr:
        if verbose:
            print(f"-- correct Z-shifts for channels:{_load_channels}", end=' ')
            _z_time = time.time()
        for _i, (_ch, _im) in enumerate(zip(_load_channels, _ims)):
            _ims[_i] = corrections.Z_Shift_Correction(_im.astype(np.float32),
                dtype=output_dtype, normalization=False)
        if verbose:
            print(f"in {time.time()-_z_time:.3f}s")
    ## bleedthrough correction
    # do bleedthrough correction if there's any final required image within corr_channels
    if len(_overlap_channels) > 0 and bleed_corr:
        if verbose:
            print(f"-- bleedthrough correction for channels: {corr_channels}", end=' ')
            _bleed_time = time.time()
        # extract all images within corr_channels
        _bld_ims = [_ims[_load_channels.index(_ch)] for _ch in corr_channels]
        # initialize list to store corrected images
        _bld_corr_ims = []
        for _i, _ch in enumerate(corr_channels):
            # new image is the sum of all intensity contribution from images in corr_channels
            _nim = np.sum([_im * bleed_profile[_i, _j] 
                            for _j,_im in enumerate(_bld_ims)],axis=0)
            _bld_corr_ims.append(_nim)
        # update images
        for _nim, _ch in zip(_bld_corr_ims, corr_channels):
            # restore output_type
            _nim[_nim > np.iinfo(output_dtype).max] = np.iinfo(output_dtype).max
            _nim[_nim < np.iinfo(output_dtype).min] = np.iinfo(output_dtype).min
            _ims[_load_channels.index(_ch)] = _nim.astype(output_dtype)
        # clear
        del(_bld_ims, _bld_corr_ims, bleed_profile)
        if verbose:
            print(f"in {time.time()-_bleed_time:.3f}s")

    ## illumination correction
    if illumination_corr:
        if verbose:
            print(f"-- illumination correction for channels:", end=' ')
            _illumination_time = time.time()
        for _i, (_im,_ch) in enumerate(zip(_ims, _load_channels)):
            if verbose:
                print(f"{_ch}", end=', ')
            _ims[_i] = (_im.astype(np.float32) / illumination_profile[_ch][np.newaxis,:]).astype(output_dtype)
        # clear
        del(illumination_profile)
        if verbose:
            print(f"in {time.time()-_illumination_time:.3f}s")

    ## calculate bead drift if required
    if calculate_drift:
        if verbose:
            print(f"-- apply bead_drift calculate for channel: {_drift_channel}")
            _drift_time = time.time()
        if 'align_image' not in locals():
            from ..correction_tools.alignment import align_image
        # update drift_args
        _updated_drift_args = {_k:_v for _k,_v in drift_args.items()}
        _updated_drift_args.update({
            'all_channels': all_channels,
            'ref_all_channels': all_channels,
            'drift_channel': drift_channel,
        })
        _drift_corr_args = {
            'single_im_size': single_im_size,
            'num_buffer_frames':num_buffer_frames,
            'num_empty_frames':num_empty_frames,
            'correction_folder':correction_folder,
        }
        _drift, _drift_flag = align_image(
            _ims[_load_channels.index(_drift_channel)],
            ref_filename, 
            use_autocorr=use_autocorr, 
            correction_args=_drift_corr_args,
            verbose=verbose,
            **_updated_drift_args,
        )
        
        if verbose:
            print(f"--- finish drift: {np.around(_drift,2)} in {time.time()-_drift_time:.3f}s")
    else:
        _drift = drift.copy()
        _drift_flag = 0
        
    ## chromatic abbrevation
    _chromatic_channels = [_ch for _ch in corr_channels 
                            if _ch in sel_channels and _ch != chromatic_ref_channel]
    if warp_image:
        if verbose:
            if chromatic_corr:
                print(f"-- warp image with chromatic correction for channels: {_chromatic_channels} and drift:{np.round(_drift, 2)}", end=' ')
            else:
                print(f"-- warp image with drift:{np.round(_drift, 2)}", end=' ')
            _warp_time = time.time()
        for _i, _ch in enumerate(sel_channels):
            if (chromatic_corr and _ch in _chromatic_channels) or _drift.any():
                if verbose:
                    print(f"{_ch}", end=', ')
                    # 0. get old image
                    _im = _ims[_load_channels.index(_ch)]
                    # 1. get coordiates to be mapped
                    _coords = np.meshgrid( np.arange(single_im_size[0]), 
                            np.arange(single_im_size[1]), 
                            np.arange(single_im_size[2]), )
                    # transpose is necessary  
                    _coords = np.stack(_coords).transpose((0, 2, 1, 3)) 
                    # 2. calculate corrected coordinates if chormatic abbrev.
                    if chromatic_corr and _ch in _chromatic_channels:
                        _coords = _coords + chromatic_profile[_ch]#[:,np.newaxis,:,:] # only need this for old correction
                    # 3. apply drift if necessary
                    if _drift.any():
                        _coords = _coords - _drift[:, np.newaxis,np.newaxis,np.newaxis]
                    # 4. map coordinates
                    _corr_im = map_coordinates(_im, 
                                               _coords.reshape(_coords.shape[0], -1),
                                               mode='nearest').astype(output_dtype)
                    _corr_im = _corr_im.reshape(np.shape(_im))
                    # append 
                    _ims[_load_channels.index(_ch)] = _corr_im.copy()
                    # local clear
                    del(_coords, _im, _corr_im)
        # clear
        if verbose:
            print(f"in {time.time()-_warp_time:.3f}s")
    else:
        if verbose:
            if chromatic_corr:
                print(f"-- generate translation function for chromatic correction for channels: {_chromatic_channels} and drift:{np.round(_drift, 2)}", end=' ')
            else:
                print(f"-- -- generate translation function with drift:{np.round(_drift, 2)}", end=' ')
            _warp_time = time.time()
        # generate mapping function for spot coordinates
        from ..correction_tools.chromatic import generate_chromatic_function
        # init corr functions
        _warp_functions = []
        for _i, _ch in enumerate(sel_channels):
            # with drift in consideration
            if (chromatic_corr and _ch in _chromatic_channels) or _drift.any():
                # with chromatic abbrevation
                if chromatic_corr and _ch in _chromatic_channels:
                    _func = generate_chromatic_function(chromatic_profile[_ch], _drift)
                # without chromatic
                else:
                    _func = generate_chromatic_function(None, _drift)
            # no translating
            else:
                def _func(_spots):
                    return _spots
            _warp_functions.append(_func)
        # clear
        if verbose:
            print(f"in {time.time()-_warp_time:.3f}s")
    ## apply gaussian
    if gaussian_highpass:
        if verbose:
            print(f"-- applying gaussian highpass filte, sigma={gauss_sigma}", end=' ')
            _highpass_time = time.time()
        from ..correction_tools.filter import gaussian_high_pass_filter
        for _i, _im in enumerate(_ims):
            _ims[_i] = gaussian_high_pass_filter(_im, gauss_sigma, gauss_truncate)
        # clear
        if verbose:
            print(f"in {time.time()-_highpass_time:.3f}s")

    ## normalization
    if normalization:
        for _i, _im in enumerate(_ims):
            _ims[_i] = _im.astype(np.float32) / np.median(_im)
    
    ## summarize and report selected_ims
    _sel_ims = []
    for _ch in sel_channels:
        _sel_ims.append(_ims[_load_channels.index(_ch)].astype(output_dtype).copy())
    # clear
    del(_ims)
    if verbose:
        print(f"-- finish correction in {time.time()-_total_start:.3f}s")
    # return
    _return_args = [_sel_ims]
    if not warp_image:
        _return_args.append(_warp_functions)
    if return_drift:
        _return_args.extend([_drift, _drift_flag])
    
    return tuple(_return_args)
Esempio n. 44
0
## We pre-allocate the memory for the 15*15 image patches extracted
## around each corner point from both images
patch_size = 15
npts1 = x1.shape[0]
npts2 = x2.shape[0]
patches1 = np.zeros((patch_size, patch_size, npts1))
patches2 = np.zeros((patch_size, patch_size, npts2))
print(npts2)
input("aa")

## The following part extracts the patches using bilinear interpolation
k = (patch_size - 1) / 2.
xv, yv = np.meshgrid(np.arange(-k, k + 1), np.arange(-k, k + 1))

for i in range(npts1):
    patch = map_coordinates(I1, (yv + y1[i], xv + x1[i]))
    patches1[:, :, i] = patch
for i in range(npts2):
    patch = map_coordinates(I2, (yv + y2[i], xv + x2[i]))
    patches2[:, :, i] = patch

############################ SSD MEASURE ######################################
## We compute the sum of squared differences (SSD) of pixels' intensities
## for all pairs of patches extracted from the two images
distmat = np.zeros((npts1, npts2))
for i1 in range(npts1):
    for i2 in range(npts2):
        # Sum of Squared Differences
        distmat[i1, i2] = np.sum((patches1[:, :, i1] - patches2[:, :, i2])**2)

## Next we compute pairs of patches that are mutually nearest neighbors
Esempio n. 45
0
def extrapolate(precip,
                velocity,
                num_timesteps,
                outval=np.nan,
                xy_coords=None,
                allow_nonfinite_values=False,
                **kwargs):
    """Apply semi-Lagrangian extrapolation to a two-dimensional precipitation
    field.

    Parameters
    ----------
    precip : array-like
        Array of shape (m,n) containing the input precipitation field. All
        values are required to be finite by default.
    velocity : array-like
        Array of shape (2,m,n) containing the x- and y-components of the m*n
        advection field. All values are required to be finite by default.
    num_timesteps : int
        Number of time steps to extrapolate.
    outval : float, optional
        Optional argument for specifying the value for pixels advected from
        outside the domain. If outval is set to 'min', the value is taken as
        the minimum value of R.
        Default : np.nan
    xy_coords : ndarray, optional
        Array with the coordinates of the grid dimension (2, m, n ).

        * xy_coords[0] : x coordinates
        * xy_coords[1] : y coordinates

        By default, the *xy_coords* are computed for each extrapolation.
    allow_nonfinite_values : bool, optional
        If True, allow non-finite values in the precipitation and advection
        fields. This option is useful if the input fields contain a radar mask
        (i.e. pixels with no observations are set to nan).

    Other Parameters
    ----------------

    D_prev : array-like
        Optional initial displacement vector field of shape (2,m,n) for the
        extrapolation.
        Default : None
    n_iter : int
        Number of inner iterations in the semi-Lagrangian scheme.
        Default : 3
    inverse : bool
        If True, the extrapolation trajectory is computed backward along the
        flow (default), forward otherwise.
        Default : True
    return_displacement : bool
        If True, return the total advection velocity (displacement) between the
        initial input field and the advected one integrated along
        the trajectory. Default : False

    Returns
    -------
    out : array or tuple
        If return_displacement=False, return a time series extrapolated fields
        of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
        extrapolated fields and the total displacement along the advection
        trajectory.

    References
    ----------
    :cite:`GZ2002` Germann et al (2002)

    """
    if len(precip.shape) != 2:
        raise ValueError("precip must be a two-dimensional array")

    if len(velocity.shape) != 3:
        raise ValueError("velocity must be a three-dimensional array")

    if not allow_nonfinite_values:
        if np.any(~np.isfinite(precip)):
            raise ValueError("precip contains non-finite values")

        if np.any(~np.isfinite(velocity)):
            raise ValueError("velocity contains non-finite values")

    # defaults
    verbose = kwargs.get("verbose", False)
    D_prev = kwargs.get("D_prev", None)
    n_iter = kwargs.get("n_iter", 3)
    inverse = kwargs.get("inverse", True)
    return_displacement = kwargs.get("return_displacement", False)

    if verbose:
        print("Computing the advection with the semi-lagrangian scheme.")
        t0 = time.time()

    if outval == "min":
        outval = np.nanmin(precip)

    coeff = 1.0 if not inverse else -1.0

    if xy_coords is None:
        x_values, y_values = np.meshgrid(np.arange(precip.shape[1]),
                                         np.arange(precip.shape[0]))

        xy_coords = np.stack([x_values, y_values])

    R_e = []
    if D_prev is None:
        D = np.zeros((2, velocity.shape[1], velocity.shape[2]))
    else:
        D = D_prev.copy()

    for t in range(num_timesteps):
        V_inc = np.zeros(D.shape)

        for k in range(n_iter):
            if t > 0 or k > 0 or D_prev is not None:
                XYW = xy_coords + D - V_inc / 2.0
                XYW = [XYW[1, :, :], XYW[0, :, :]]

                VWX = ip.map_coordinates(velocity[0, :, :],
                                         XYW,
                                         mode="nearest",
                                         order=0,
                                         prefilter=False)
                VWY = ip.map_coordinates(velocity[1, :, :],
                                         XYW,
                                         mode="nearest",
                                         order=0,
                                         prefilter=False)
            else:
                VWX = velocity[0, :, :]
                VWY = velocity[1, :, :]

            V_inc[0, :, :] = VWX / n_iter
            V_inc[1, :, :] = VWY / n_iter

            D += coeff * V_inc

        XYW = xy_coords + D
        XYW = [XYW[1, :, :], XYW[0, :, :]]

        IW = ip.map_coordinates(precip,
                                XYW,
                                mode="constant",
                                cval=outval,
                                order=0,
                                prefilter=False)
        R_e.append(np.reshape(IW, precip.shape))

    if verbose:
        print("--- %s seconds ---" % (time.time() - t0))

    if not return_displacement:
        return np.stack(R_e)
    else:
        return np.stack(R_e), D
Esempio n. 46
0
def extrapolate(precip,
                velocity,
                timesteps,
                outval=np.nan,
                xy_coords=None,
                allow_nonfinite_values=False,
                vel_timestep=None,
                **kwargs):
    """Apply semi-Lagrangian backward extrapolation to a two-dimensional
    precipitation field.

    Parameters
    ----------
    precip : array-like
        Array of shape (m,n) containing the input precipitation field. All
        values are required to be finite by default.
    velocity : array-like
        Array of shape (2,m,n) containing the x- and y-components of the m*n
        advection field. All values are required to be finite by default.
    timesteps : int or list
        If timesteps is integer, it specifies the number of time steps to
        extrapolate. If a list is given, each element is the desired
        extrapolation time step from the current time. In this case, the
        vel_timestep argument must be specified.
    outval : float, optional
        Optional argument for specifying the value for pixels advected from
        outside the domain. If outval is set to 'min', the value is taken as
        the minimum value of R.
        Default : np.nan
    xy_coords : ndarray, optional
        Array with the coordinates of the grid dimension (2, m, n ).

        * xy_coords[0] : x coordinates
        * xy_coords[1] : y coordinates

        By default, the *xy_coords* are computed for each extrapolation.
    allow_nonfinite_values : bool, optional
        If True, allow non-finite values in the precipitation and advection
        fields. This option is useful if the input fields contain a radar mask
        (i.e. pixels with no observations are set to nan).

    Other Parameters
    ----------------

    D_prev : array-like
        Optional initial displacement vector field of shape (2,m,n) for the
        extrapolation.
        Default : None
    n_iter : int
        Number of inner iterations in the semi-Lagrangian scheme. If n_iter > 0,
        the integration is done using the midpoint rule. Otherwise, the advection
        vectors are taken from the starting point of each interval.
        Default : 1
    return_displacement : bool
        If True, return the total advection velocity (displacement) between the
        initial input field and the advected one integrated along
        the trajectory. Default : False
    vel_timestep : float
        The time step of the velocity field. It is assumed to have the same
        unit as the timesteps argument.

    Returns
    -------
    out : array or tuple
        If return_displacement=False, return a time series extrapolated fields
        of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
        extrapolated fields and the total displacement along the advection
        trajectory.

    References
    ----------
    :cite:`GZ2002` Germann et al (2002)

    """
    if len(precip.shape) != 2:
        raise ValueError("precip must be a two-dimensional array")

    if len(velocity.shape) != 3:
        raise ValueError("velocity must be a three-dimensional array")

    if not allow_nonfinite_values:
        if np.any(~np.isfinite(precip)):
            raise ValueError("precip contains non-finite values")

        if np.any(~np.isfinite(velocity)):
            raise ValueError("velocity contains non-finite values")

    # defaults
    verbose = kwargs.get("verbose", False)
    D_prev = kwargs.get("D_prev", None)
    n_iter = kwargs.get("n_iter", 1)
    return_displacement = kwargs.get("return_displacement", False)

    if isinstance(timesteps, int):
        timesteps = np.arange(1, timesteps + 1)
        vel_timestep = 1.0
    elif np.any(np.diff(timesteps) <= 0.0):
        raise ValueError(
            "the given timestep sequence is not monotonously increasing")

    timestep_diff = np.hstack([[timesteps[0]], np.diff(timesteps)])

    if verbose:
        print("Computing the advection with the semi-lagrangian scheme.")
        t0 = time.time()

    if outval == "min":
        outval = np.nanmin(precip)

    if xy_coords is None:
        x_values, y_values = np.meshgrid(np.arange(precip.shape[1]),
                                         np.arange(precip.shape[0]))

        xy_coords = np.stack([x_values, y_values])

    def interpolate_motion(D, V_inc, td):
        XYW = xy_coords + D
        XYW = [XYW[1, :, :], XYW[0, :, :]]

        VWX = ip.map_coordinates(velocity[0, :, :],
                                 XYW,
                                 mode="nearest",
                                 order=0,
                                 prefilter=False)
        VWY = ip.map_coordinates(velocity[1, :, :],
                                 XYW,
                                 mode="nearest",
                                 order=0,
                                 prefilter=False)

        V_inc[0, :, :] = VWX
        V_inc[1, :, :] = VWY

        if n_iter > 1:
            V_inc /= n_iter

        V_inc *= td / vel_timestep

    R_e = []
    if D_prev is None:
        D = np.zeros((2, velocity.shape[1], velocity.shape[2]))
        V_inc = velocity.copy() * timestep_diff[0] / vel_timestep
    else:
        D = D_prev.copy()
        V_inc = np.empty(velocity.shape)
        interpolate_motion(D, V_inc, timestep_diff[0])

    for ti, td in enumerate(timestep_diff):
        if n_iter > 0:
            for k in range(n_iter):
                interpolate_motion(D - V_inc / 2.0, V_inc, td)
                D -= V_inc
                interpolate_motion(D, V_inc, td)
        else:
            if ti > 0 or D_prev is not None:
                interpolate_motion(D, V_inc, td)

            D -= V_inc

        XYW = xy_coords + D
        XYW = [XYW[1, :, :], XYW[0, :, :]]

        IW = ip.map_coordinates(precip,
                                XYW,
                                mode="constant",
                                cval=outval,
                                order=0,
                                prefilter=False)
        R_e.append(np.reshape(IW, precip.shape))

    if verbose:
        print("--- %s seconds ---" % (time.time() - t0))

    if not return_displacement:
        return np.stack(R_e)
    else:
        return np.stack(R_e), D
Esempio n. 47
0
def project_panoramas(opt, projection_list, start_point, end_point, core):
    f_handler = open(os.path.join(opt.log_folder, str(core) + '.log'), 'w')
    std_f = sys.stdout
    sys.stdout = f_handler
    sys.stdout.write(
        'start is Projection number {}, end is Projection number {}\n'.format(
            start_point,
            min(end_point, len(projection_list)) - 1))
    sys.stdout.flush()

    [tmp_xy1, m_tmp, n_tmp, _] = calculate_no_adaptive_coor(h_fov=160,
                                                            v_fov1=-45,
                                                            v_fov2=80,
                                                            mpp=0.0125 * 2)
    with open(opt.panorama_rectification) as f:
        rectification_results = json.load(f)

    for projection_name in projection_list[start_point:end_point]:

        tmp_xy = tmp_xy1.copy()

        if projection_name in rectification_results:
            panorama_img_name = os.path.join(
                opt.pano_folder,
                rectification_results[projection_name]['panoID'] + '.jpg')

            projection_img_path = os.path.join(Projection_folder,
                                               projection_name)
            if os.path.exists(panorama_img_name):
                if not os.path.exists(projection_img_path):
                    super_R = R_pitch(
                        rectification_results[projection_name]['pitch']).dot(
                            R_roll(
                                rectification_results[projection_name]
                                ['roll']).dot(
                                    R_heading(
                                        rectification_results[projection_name]
                                        ['heading'])))

                    tmp_coordinates = super_R.dot(tmp_xy).T

                    tmp_coordinates = calculate_new_pano(
                        tmp_coordinates, Image.open(panorama_img_name))

                    tmp_coordinates = tmp_coordinates.reshape(2, m_tmp, n_tmp)

                    img = skimage.io.imread(panorama_img_name)
                    tmp_sub = np.dstack([
                        map_coordinates(img[:, :, 0], tmp_coordinates,
                                        order=0),
                        map_coordinates(img[:, :, 1], tmp_coordinates,
                                        order=0),
                        map_coordinates(img[:, :, 2], tmp_coordinates, order=0)
                    ])

                    skimage.io.imsave(projection_img_path, tmp_sub)
                    print(projection_name + ' has been saved')
                else:
                    print(projection_name + ' has already been saved before')
            else:
                print(projection_name +
                      ' does not have corresponding panorama image')
        else:
            print(projection_name +
                  ' rectification parameters are not saved before')
        sys.stdout.flush()

    sys.stdout.close()
    sys.stdout = std_f
    def _elastic_transform(self, image_in, label_in, random_state=None):
        """Elastic deformation of image_ins as described in [Simard2003]_.
		.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
		   Convolutional Neural Networks applied to Visual Document Analysis", in
		   Proc. of the International Conference on Document Analysis and
		   Recognition, 2003.
		"""
        alpha = np.random.uniform(0, self.alpha_range)

        if random_state is None:
            random_state = np.random.RandomState(None)

        shape = image_in.shape[1:]
        # shape = image_in.shape

        dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
                             self.sigma,
                             mode='constant',
                             cval=0) * alpha
        dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
                             self.sigma,
                             mode='constant',
                             cval=0) * alpha

        x, y = np.meshgrid(np.arange(shape[0]),
                           np.arange(shape[1]),
                           indexing='ij')
        indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))

        if self.use_mp:
            image_out = np.concatenate(self.parallel(
                delayed(self._map)(input, indices, shape)
                for input in image_in),
                                       axis=0)
        else:
            # image_out = map_coordinates(image_in, indices, order=1).reshape(shape)
            image_out = []
            for input in image_in:
                image_out.append(
                    np.expand_dims(map_coordinates(input, indices,
                                                   order=1).reshape(shape),
                                   axis=0))
            image_out = np.concatenate(image_out, axis=0)
            # image_out = []
            # for input in image_in:
            # 	image_out.append(np.expand_dims(map_coordinates(input, indices, order=1).reshape(shape), axis=0))
            # image_out = np.concatenate(image_out, axis=0)

        if self.use_mp:
            label_out = []
            for sub_vol in label_in:
                results = np.concatenate(self.parallel(
                    delayed(self._map)(input, indices, shape)
                    for input in sub_vol),
                                         axis=0)
                label_out.append(np.expand_dims(results, axis=0))
            label_out = np.concatenate(label_out, axis=0)
        else:
            label_out = map_coordinates(label_in, indices,
                                        order=1).reshape(shape)
            # label_out = []
            # for sub_vol in label_in:
            # 	results = []
            # 	for input in sub_vol:
            # 		results.append(np.expand_dims(map_coordinates(input, indices, order=1).reshape(shape), axis=0))
            # 	label_out.append(np.expand_dims(np.concatenate(results, axis=0), axis=0))
            # label_out = np.concatenate(label_out, axis=0)

        image_out = self._shave(image_out, [self.shave, self.shave])
        label_out = self._shave(label_out, [self.shave, self.shave])

        return image_out, label_out
Esempio n. 49
0
def extrapolate(R, V, num_timesteps, outval=np.nan, **kwargs):
    """Apply semi-Lagrangian extrapolation to a two-dimensional precipitation 
    field.
    
    Parameters
    ----------
    R : array-like
        Array of shape (m,n) containing the input precipitation field. All 
        values are required to be finite.
    V : array-like
        Array of shape (2,m,n) containing the x- and y-components of the m*n 
        advection field. All values are required to be finite.
    num_timesteps : int
        Number of time steps to extrapolate.
    outval : float
        Optional argument for specifying the value for pixels advected from 
        outside the domain. If outval is set to 'min', the value is taken as 
        the minimum value of R.
        Default : np.nan

    Optional kwargs:
    ---------------
    D_prev : array-like
        Optional initial displacement vector field of shape (2,m,n) for the 
        extrapolation.
        Default : None
    n_iter : int
        Number of inner iterations in the semi-Lagrangian scheme.
        Default : 3
    inverse : bool
        If True, the extrapolation trajectory is computed backward along the 
        flow (default), forward otherwise.
        Default : True
    return_displacement : bool
        If True, return the total advection velocity (displacement) between the 
        initial input field and the advected one integrated along the trajectory.
        Default : False
    
    Returns
    -------
    out : array or tuple
        If return_displacement=False, return a time series extrapolated fields of 
        shape (num_timesteps,m,n). Otherwise, return a tuple containing the 
        extrapolated fields and the total displacement along the advection trajectory.
    """
    if len(R.shape) != 2:
        raise ValueError("R must be a two-dimensional array")

    if len(V.shape) != 3:
        raise ValueError("V must be a three-dimensional array")

    if np.any(~np.isfinite(R)):
        raise ValueError("R contains non-finite values")

    if np.any(~np.isfinite(V)):
        raise ValueError("V contains non-finite values")

    # defaults
    verbose = kwargs.get("verbose", False)
    D_prev = kwargs.get("D_prev", None)
    n_iter = kwargs.get("n_iter", 3)
    inverse = kwargs.get("inverse", True)
    return_displacement = kwargs.get("return_displacement", False)

    if verbose:
        print("Computing the advection with the semi-lagrangian scheme.")
        t0 = time.time()

    if outval == "min":
        outval = np.nanmin(R)

    coeff = 1.0 if not inverse else -1.0

    X, Y = np.meshgrid(np.arange(V.shape[2]), np.arange(V.shape[1]))
    XY = np.stack([X, Y])

    R_e = []
    if D_prev is None:
        D = np.zeros((2, V.shape[1], V.shape[2]))
    else:
        D = D_prev.copy()

    for t in range(num_timesteps):
        V_inc = np.zeros(D.shape)

        for k in range(n_iter):
            if t > 0 or k > 0 or D_prev is not None:
                XYW = XY + D - V_inc / 2.0
                XYW = [XYW[1, :, :], XYW[0, :, :]]

                VWX = ip.map_coordinates(V[0, :, :],
                                         XYW,
                                         mode="nearest",
                                         order=0,
                                         prefilter=False)
                VWY = ip.map_coordinates(V[1, :, :],
                                         XYW,
                                         mode="nearest",
                                         order=0,
                                         prefilter=False)
            else:
                VWX = V[0, :, :]
                VWY = V[1, :, :]

            V_inc[0, :, :] = VWX / n_iter
            V_inc[1, :, :] = VWY / n_iter

            D += coeff * V_inc

        XYW = XY + D
        XYW = [XYW[1, :, :], XYW[0, :, :]]

        IW = ip.map_coordinates(R,
                                XYW,
                                mode="constant",
                                cval=outval,
                                order=0,
                                prefilter=False)
        R_e.append(np.reshape(IW, R.shape))

    if verbose:
        print("--- %s seconds ---" % (time.time() - t0))

    if not return_displacement:
        return np.stack(R_e)
    else:
        return np.stack(R_e), D
Esempio n. 50
0
 def interpolate_intensity(self, j, w, t, r, s):
     """Returns integrated intensity in [K km/s]. Width is not FWHM."""
     idxs = self.indices(j, self.fwhm * w, t, r, s)
     inten = map_coordinates(self.grid[j, 0], idxs, order=1, mode='nearest')
     return inten * w * np.sqrt(2. * np.pi)
Esempio n. 51
0
def measureVcut(img_masked, line, img_unmasked=None, img_bg=None,
             max_width=101,
             mask_is_dark=False,
             v_isDark=None):
    '''
    img_masked   ... image with v-cut mask
    img_unmasked ... image without v-cut mask
    img_bg - either average background level or background (or dark current) image
    line = (x0,y0,x1,y1) - line from a place within the gap untill a place behind
              the v-cut intersection 
            line points do not have to be precise
  
    v_isDark - whether v-cut mask is dark - if None, value is determined from image intensities at line start/end
    mask_is_dark - whether mask used to build V is absolutely dark (0) 
    '''
    
    if img_unmasked is not None:
        img_unmasked = img_unmasked.astype(float)
        
    img_masked = img_masked.astype(float)
   
    if img_bg is not None:
        img_unmasked = img_unmasked - img_bg
        img_masked = img_masked - img_bg
      
    s0, s1 = img_masked.shape
    poly = ((0, 0), (s1, 0), (s1, s0), (0, s0), (0, 0))
    line = cutToFitIntoPolygon(line, poly)
    # rectify image to given line: (this is not necassarily precise)
    if img_unmasked is not None:
        sub_img_unmasked = alignImageAlongLine(img_unmasked, line, max_width)
    sub_img_masked = alignImageAlongLine(img_masked, line, max_width)

    if not mask_is_dark:
        # assume mtf being 0 at given end value
        offs = sub_img_masked[-1].mean()
        sub_img_masked -= offs
        if img_unmasked is not None:
            sub_img_unmasked -= offs
    
    # create 0...1 scaled contrast image:
    if img_unmasked is None:
        contrast_img = sub_img_masked
    else:
        # make relative
        contrast_img = (sub_img_unmasked - sub_img_masked) / (sub_img_unmasked + sub_img_masked)

    if v_isDark is None:
        # determine whether vmask is dark from eval sub_img_masked intensities at x=0
        # if mid brighter than corner:
        if contrast_img[contrast_img.shape[0] // 2].mean() > contrast_img[0].mean():
            v_isDark = True

    if not v_isDark:
        contrast_img = 1 - contrast_img

    s0, s1 = contrast_img.shape
    x = np.arange(s1, dtype=int)

    dsub = cv2.Sobel(contrast_img, cv2.CV_64F, 0, 1, ksize=5)
  
    # FIND LINES: (precise)
    line1 = np.argmin(dsub, axis=0)
    line3 = np.argmax(dsub, axis=0)
    line2 = 0.5 * (line1 + line3)

    xx = x
    # find out where V ends:
    i = np.argmax(contrast_img[line2.round().astype(int), x] < 0.22)
    if i:
        s1 = i
        xx = x[:i]
        line1 = line1[:i]
        line2 = line2[:i]
        line3 = line3[:i]

    # FIT LINEAR LINES:
    m1, n1 = robustLinregress(xx, line1)[:2]  # upper edge
    m3, n3 = robustLinregress(xx, line3)[:2]  # lower edge

    l1 = fromFn(m1, n1)
    l3 = fromFn(m3, n3)

    # lines y-pos:
    fitline1 = x * m1 + n1  # REMOVE NOT NEEDED
    fitline3 = x * m3 + n3  # R..
    fitline2 = 0.5 * (fitline1 + fitline3)  # middle line
    y = map_coordinates(contrast_img, [fitline2, x], order=2)

    try:
        # Intersection of detected v-cut lines:
        i0, i1 = intersection(l1, l3)
    except TypeError:
        raise Exception('no intersection found')
    # Angle of intersection:
    angle = abs(angle2(l1, l3))
    dx = np.asfarray(x) - i0
    dy = fitline2 - i1
    # radii from intersection:
    r = np.hypot(dx, dy)  

#     print(dx, i0, i1)
    # exclude area behind intersection:
    behind_intersection = np.argmax(dx > 0)
    
    if behind_intersection:
#         print(behind_intersection)
#         import pylab as plt
#         plt.imshow(contrast_img)
#         plt.show()
#         plt.plot(r, y)
#         plt.plot(r[behind_intersection:], y[behind_intersection:])
#      
#         plt.show()
        
        r = r[behind_intersection:]
        y = y[behind_intersection:]

    if img_unmasked is None:
        # only one masked image avail. normalize y 0...1:
        mx = y.max()
        mn = y.min()
        mean = y.mean()
        t0 = 0.7 * mx - 0.3 * mean
        t1 = 0.7 * mn + 0.3 * mean
        low = np.median(y[y < t1])
        high = np.median(y[y > t0])
        y -= low
        y /= (high - low)

    return (r, y, angle), (fitline1, fitline2, fitline3), (contrast_img, dsub)
    def _elastic_deformation(self,
                             image,
                             output,
                             alpha,
                             sigma,
                             random_state=None):
        import cv2
        """Elastic deformation of images as described in [Simard2003]_.
        .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
           Convolutional Neural Networks applied to Visual Document Analysis", in
           Proc. of the International Conference on Document Analysis and
           Recognition, 2003.
        """
        start = time.time()
        original_shape = image.shape
        original_output_shape = output.shape
        image = image[:, :, 0].astype(np.float32)
        #assert len(image.shape) == 2

        if random_state is None:
            random_state = np.random.RandomState(None)

        shape = image.shape
        sigma = np.random.uniform(
            0.08,
            0.1) * shape[1]  #np.random.uniform(shape[0] * 0.5, shape[1] * 0.5)
        alpha = np.random.uniform(0.8, 1.0) * shape[1]
        #print('Parameters: ', alpha, sigma)

        blur_size = int(4 * sigma) | 1
        dx = cv2.GaussianBlur((random_state.rand(*shape) * 2 - 1),
                              ksize=(blur_size, blur_size),
                              sigmaX=sigma) * alpha
        dy = cv2.GaussianBlur((random_state.rand(*shape) * 2 - 1),
                              ksize=(blur_size, blur_size),
                              sigmaX=sigma) * alpha
        #dx = gaussian_filter(, sigma, mode="constant", cval=0) * alpha
        #dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
        dz = np.zeros_like(dx)

        #x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
        #indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
        x, y = np.meshgrid(np.arange(shape[0]),
                           np.arange(shape[1]),
                           indexing='ij')
        indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))

        # Transform output
        output[:, :, 0] = np.ones(output.shape[:2])  # Clear background
        for label in range(1, output.shape[2]):
            #segmentation = cv2.remap(output[:, :, label], dx, dy, interpolation=cv2.INTER_LINEAR).reshape(output.shape[:2])
            segmentation = map_coordinates(output[:, :, label],
                                           indices,
                                           order=0).reshape(output.shape[:2])
            output[:, :, label] = segmentation

        # Remove segmentation from other labels
        for label in range(1, output.shape[2]):
            for label2 in range(output.shape[2]):
                if label2 == label:
                    continue
                output[output[:, :, label] == 1, label2] = 0

        #input = cv2.remap(image, dx, dy, interpolation=cv2.INTER_LINEAR).reshape(original_shape)
        input = map_coordinates(image, indices,
                                order=1).reshape(original_shape)
        end = time.time()

        #print('Elastic deformation time', (end - start))

        return input, output
Esempio n. 53
0
    def rz2brzt(self, r_in, z_in, t_in=None):
        """calculates Br, Bz, Bt profiles

        Input
        ----------
        r_in : ndarray
            R coordinates
            1D, size(nr_in) or 2D, size (nt, nr_in)
        z_in : ndarray
            Z coordinates
            1D, size(nz_in) or 2D, size (nt, nz_in)
        t_in : float or 1darray
            time

        Output
        -------
        interpBr : ndarray
            profile of Br on the grid
        interpBz : ndarray
            profile of Bz on the grid
        interpBt : ndarray
            profile of Bt on the grid

        """

        if not self.eq_open:
            return

        if t_in is None:
            t_in = self.t_eq

        tarr = np.atleast_1d(t_in)
        r_in = np.atleast_2d(r_in)
        z_in = np.atleast_2d(z_in)

        self._read_scalars()
        self._read_profiles()
        self._read_pfm()

        # Poloidal current

        nt = np.size(tarr)

        if np.size(r_in, 0) != nt:
            r_in = np.tile(r_in, (nt, 1))
        if np.size(z_in, 0) != nt:
            z_in = np.tile(z_in, (nt, 1))

        nr_in = np.size(r_in, 1)
        nz_in = np.size(z_in, 1)

        interpBr = np.zeros((nt, nr_in, nz_in), dtype='single')
        interpBz = np.zeros((nt, nr_in, nz_in), dtype='single')
        interpBt = np.zeros((nt, nr_in, nz_in), dtype='single')

        from scipy.constants import mu_0
        nr, nz = len(self.Rmesh), len(self.Zmesh)
        dr = (self.Rmesh[-1] - self.Rmesh[0]) / (nr - 1)
        dz = (self.Zmesh[-1] - self.Zmesh[0]) / (nz - 1)

        scaling = np.array([dr, dz])
        offset = np.array([self.Rmesh[0], self.Zmesh[0]])

        unique_idx, idx = self._get_nearest_index(tarr)

        for i in unique_idx:

            Phi = self.pfm[i]

            Br = -np.diff(Phi, axis=1) / (self.Rmesh[:, None])
            Bz = np.diff(
                Phi, axis=0) / (.5 *
                                (self.Rmesh[1:] + self.Rmesh[:-1])[:, None])
            Bt = np.interp(Phi, self.pf[:, i],
                           self.fpol[:, i]) * mu_0 / self.Rmesh[:, None]

            jt = idx == i
            r = np.tile(r_in[jt], (nz_in, 1, 1)).transpose(1, 2, 0)
            z = np.tile(z_in[jt], (nr_in, 1, 1)).transpose(1, 0, 2)

            coords = np.array((r, z))

            index_t = ((coords.T - offset) / scaling).T
            index_r = ((coords.T - offset) / scaling - np.array((0, .5))).T
            index_z = ((coords.T - offset) / scaling - np.array((.5, 0))).T

            interpBr[jt] = map_coordinates(Br,
                                           index_r,
                                           mode='nearest',
                                           order=2,
                                           prefilter=True)
            interpBz[jt] = map_coordinates(Bz,
                                           index_z,
                                           mode='nearest',
                                           order=2,
                                           prefilter=True)
            interpBt[jt] = map_coordinates(Bt,
                                           index_t,
                                           mode='nearest',
                                           order=2,
                                           prefilter=True)

        return interpBr / (2 * np.pi * dz), interpBz / (
            2 * np.pi * dr), interpBt / (2. * np.pi)
Esempio n. 54
0
def refine_ellipsoid(image3d,
                     params,
                     spacing=1,
                     rad_range=None,
                     maxfit_size=2,
                     spline_order=3,
                     threshold=0.1):
    """ Refines coordinates of a 3D ellipsoid, starting from given parameters.

    Interpolates the image along lines perpendicular to the ellipsoid.
    The maximum along each line is found using linear regression of the
    descrete derivative.

    Parameters
    ----------
    image3d : 3d numpy array of numbers
        Image indices are interpreted as (z, y, x)
    params : tuple
        zr, yr, xr, zc, yc, xc
    spacing: number
        spacing along radial direction
    rad_range: tuple of floats
        length of the line (distance inwards, distance outwards)
    maxfit_size: integer
        pixels around maximum pixel that will be used in linear regression
    spline_order: integer
        interpolation order for edge crossections
    threshold: float
        a threshold is calculated based on the global maximum
        fitregions are rejected if their average value is lower than this

    Returns
    -------
    - zr, yr, xr, zc, yc, xc, skew_y, skew_x
    - contour coordinates at z = 0

    """
    if not np.all([x > 0 for x in params]):
        raise ValueError(
            "All zc, yc, xc, zr, yr, xr params should be positive")
    assert image3d.ndim == 3
    zr, yr, xr, zc, yc, xc = params
    if rad_range is None:
        rad_range = (-min(zr, yr, xr) / 2, min(zr, yr, xr) / 2)
    steps = np.arange(rad_range[0], rad_range[1] + 1, 1)
    pos, normal = ellipsoid_grid((zr, yr, xr), (zc, yc, xc), spacing=spacing)
    coords = normal[:, :, np.newaxis] * steps[np.newaxis, np.newaxis, :] + \
             pos[:, :, np.newaxis]

    # interpolate the image on calculated coordinates
    intensity = map_coordinates(image3d, coords, order=spline_order)

    # identify the regions around the max value
    r_dev = max_linregress(intensity, maxfit_size, threshold)

    # calculate new coords
    coord_new = pos + (r_dev + rad_range[0]) * normal
    coord_new = coord_new[:, np.isfinite(coord_new).all(0)]

    # fit ellipsoid
    radius, center, skew = fit_ellipsoid(coord_new,
                                         mode='xy',
                                         return_mode='skew')
    return tuple(radius) + tuple(center) + tuple(skew), coord_new.T
Esempio n. 55
0
 def elastic_transform3Dv2(self,
                           image,
                           alpha,
                           sigma,
                           alpha_affine,
                           random_state=None):
     """Elastic deformation of images as described in [Simard2003]_ (with modifications).
     .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
          Convolutional Neural Networks applied to Visual Document Analysis", in
          Proc. of the International Conference on Document Analysis and
          Recognition, 2003.
      Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
      From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
     """
     # affine and deformation must be slice by slice and fixed for slices
     if random_state is None:
         random_state = np.random.RandomState(None)
     shape = image.shape  # image is contatenated, the first channel [:,:,:,0] is the image, the second channel
     # [:,:,:,1] is the mask. The two channel are under the same tranformation.
     shape_size = shape[:-1]  # z y x
     # Random affine
     shape_size_aff = shape[1:-1]  # y x
     center_square = np.float32(shape_size_aff) // 2
     square_size = min(shape_size_aff) // 3
     pts1 = np.float32([
         center_square + square_size,
         [center_square[0] + square_size, center_square[1] - square_size],
         center_square - square_size
     ])
     pts2 = pts1 + random_state.uniform(
         -alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
     M = cv2.getAffineTransform(pts1, pts2)
     new_img = np.zeros_like(image)
     for i in range(shape[0]):
         new_img[i, :, :,
                 0] = cv2.warpAffine(image[i, :, :, 0],
                                     M,
                                     shape_size_aff[::-1],
                                     borderMode=cv2.BORDER_CONSTANT,
                                     borderValue=0.)
         for j in range(1, 10):
             new_img[i, :, :,
                     j] = cv2.warpAffine(image[i, :, :, j],
                                         M,
                                         shape_size_aff[::-1],
                                         flags=cv2.INTER_NEAREST,
                                         borderMode=cv2.BORDER_TRANSPARENT,
                                         borderValue=0)
     dx = gaussian_filter(
         (random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
     dy = gaussian_filter(
         (random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
     x, y = np.meshgrid(np.arange(shape_size_aff[1]),
                        np.arange(shape_size_aff[0]))
     indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1))
     new_img2 = np.zeros_like(image)
     for i in range(shape[0]):
         new_img2[i, :, :,
                  0] = map_coordinates(new_img[i, :, :, 0],
                                       indices,
                                       order=1,
                                       mode='constant').reshape(shape[1:-1])
         for j in range(1, 10):
             new_img2[i, :, :,
                      j] = map_coordinates(new_img[i, :, :, j],
                                           indices,
                                           order=0,
                                           mode='constant').reshape(
                                               shape[1:-1])
     return np.array(new_img2), new_img
Esempio n. 56
0
def actr(data,
         yxguess,
         asym_rad=8,
         asym_size=5,
         maxcounts=2,
         method='gaus',
         half_pix=False,
         resize=False,
         weights=False):
    '''
    Calculate the center of an input array by first switching the array into
    asymmetry space and finding the minimum

    This function takes in an input two dimentional array `data`, with a certain
    distribution of values, such as the flux from a star. The center of this
    distribution of values is based on the idea that the center will be the
    point of minimum asymmetry. To convert to asymmetry space, the asymmetry of
    a radial profile about a particular pixel is calculated according to
    sum(var(r)*Npts(r)), the sum of the variance about a particular radius times
    the number of points at that radius. The outter radius of consideration is
    set by `asym_rad`. The number of points that are converted to asymmetry
    space is governed by `asym_size` producing a shape `asym_size`*2+1 by
    `asym_size`*2+1. This asymmetry space is recalculated and moved succesively
    until the point of minimum asymmetry is in the center of the array or
    `maxcounts` is reached. Traditional Gaussian or center of light centering is
    then used in the asymmetry space to find the sub-pixel point of minimum
    asymmetry.

    Parameters
    --------------
    data: 2D array    this is the data to be worked on, the radius of the array
                      should at minimum be asym_size times 2. radius being
                      defined as the cut size, ie data[yxguess[0]-rad:yxguess[0]
                      +rad+1,yxguess[1]-rad:yxguess[1]+rad+1] a recomendation
                      would be asym_size times 3 or 4, to account for the
                      searching of the algorithmu

    yxguess: tuple    This contains a tuple, or 1x2 array, that contains the
                      guess of the center, this is used as the starting location
                      of the transform

    asym_rad: int     The integer used to define the span of the radial profile
                      used in the asym calculation

    asym_size: int    This sets the radius of the asym space that is used to
                      determine the center. The larger the radius of the star
                      in flux space, the larger this variable should be

    maxcounts: int    An int to set the number of times the routine is allowed
                      to walk trying to put the point of minimum asymmetry in
                      the center of the array

    method: string    Must be "gaus" to used gaussian for sub pixel, or "col"
                      for center of light sub pixel

    half_pix: Bool    Default false, if set to true asymmetry will be calculated
                      for all the half pixel location between the integers
                      contained in the asym array. This may be more accurate,
                      but much slower. If set, it is recommended to use a larger
                      value for `asym_rad`.

    resize: float     Though by default False, set to a float which will be the
                      factor the asym the array is resized before centering.
                      Reccomend scale factors 5 or below. Resizeing introduces a
                      bit of error in the center by itself, but may be
                      combensated by the gains in percision. test with care.
                      This WILL slow the function down noticably.

    weights: array    This is an array the same size as the input data, each
                      point contains the weighting that each point should
                      recive. low number is less weight, should be type float,
                      if none is given the weights are set to one.

    Returns
    ---------------
    numpy array:      A 1x2 array containing the found center of the data

    numpy array:      Array that contains the asymmery values calculated from
                      the input array

    Raises
    ---------------
    Possibly an runtime error, it the size of the radial profile is different
    than a given view of the data. This is most likely due to a boundary issue,
    aka the routine is trying to move out of the boundary of the input data.
    This can be caused by incorrect sizes for asym_rad & asym_size, or becuase
    the routine is trying to walk off the edge of data searching for the point
    of minimum asymmetry. If this happens, try reducing asym_size, or maxcounts
    '''
    # initialize the boolean that determines if there are weights present or not
    # This is used in the make asym array to square the variance, to provide
    # larger contrast when using weights
    w_truth = 1
    # create the weights array if one is not passed in, and set w_truth to 0
    if isinstance(type(weights), type(False)):
        weights = np.ones(data.shape, dtype=float)
        w_truth = 0
    elif weights.dtype != np.dtype('float'):
        # cast to a float if it is not already float
        weights = np.array(weights, dtype=float)
    if data.dtype != 'float64':
        data = data.astype('float64')

    x_guess = int(yxguess[1])
    y_guess = int(yxguess[0])

    # create the array indexes
    ny, nx = np.indices((data.shape))

    # make the view for the positions to calculate an asymmetry value, this may
    # be unneeded look at removing this for optimization, save the shape for
    # later shape restoration

    suby = ny[y_guess - asym_size:y_guess + asym_size + 1,
              x_guess - asym_size:x_guess + asym_size + 1]
    shape_save = suby.shape
    suby = suby.flatten()
    subx = nx[y_guess - asym_size:y_guess + asym_size + 1,
              x_guess - asym_size:x_guess + asym_size + 1].flatten()

    # set up the range statement, to not recreate it every loop, same with len
    len_y = len(suby)
    itterator = np.arange(len_y)
    ones_len = np.ones(len_y)
    middle = (len_y - 1) / 2.

    # set a counter for the number of times that the routine has moved in pixel
    # space
    counter = 0

    # define the lambda function used to generate the views outside of the loop
    view_maker = lambda frame, y_lamb, x_lamb, rng:\
        frame[y_lamb-rng:y_lamb+rng+1, x_lamb-rng:x_lamb+rng+1]

    # start a while loop, to be broken when the maximum number of steps is
    # reached, or when the minimum asymmetry value is in the center of the array
    while counter <= maxcounts:
        # This will make heave use of python generators ie (x for x in
        # range(10)), does not make the list all at once, but instead creates
        # something that will create the each element when itterated over
        # this saves memory and time, an generator needs to be recreated after
        # each use however. The map function will be used alot as well. This is
        # like a for loop but one which runs faster.

        # create a generator for the views ahead of time that will be needed
        views = (view_maker(data, suby[i], subx[i], asym_rad)
                 for i in itterator)

        # create a generator for the view on the weights ahead of time
        lb_views = (view_maker(weights, suby[i], subx[i], asym_rad)
                    for i in itterator)

        # greate a generator duplicate for the state of w_truth
        w_truth_dup = (w_truth * 1 for i in ones_len)

        # now create the actual asym array
        asym = np.array(map(make_asym, views, lb_views, w_truth_dup))

        # need to find out if the minimum is in the center of the array if it
        # is, break
        if asym.argmin() == middle:
            break

        # if not, move the array index locations and itterate counter, the
        # while loop then repeats, delete variable to make the garbage collector
        # work less hard
        else:
            suby += (suby[asym.argmin()] - y_guess)
            subx += (subx[asym.argmin()] - x_guess)
            counter += 1
            del views, asym

    if counter <= maxcounts:
        # now we must find the sub pixel persision and related options
        # First reshape the array to the saved shape
        asym = asym.reshape(shape_save)

        # set a constant used in the center calculation, it is one if half_pix
        # is unset, it becomes 2 if it is set
        divisor = 1.0

        # This gives the option to include the half pixel values
        if half_pix:
            # First create the new container array for the asymmetry, double the
            # dimentions of the orrigional
            new_asym = np.zeros(np.array(asym.shape) * 2, dtype=float)

            # Next assign the old asym array to the be the top left corners in
            # the new 4x4 pixels that represent one orrigional one
            new_asym[::2, ::2] = asym.copy()

            # Now create all of the edges, this represnts the upper right box in
            # the 4x4, or the edges between pixels going to the right.

            # create the generator for the views
            views = (view_maker(data, suby[i], subx[i], asym_rad)
                     for i in itterator)

            # create a generator for the view on the weights ahead of time
            lb_views = (view_maker(weights, suby[i], subx[i], asym_rad)
                        for i in itterator)

            # greate a generator duplicate for the state of w_truth
            w_truth_dup = (w_truth * 1 for i in ones_len)

            # generate the asymmetry values and save them
            new_asym[::2, 1::2] = np.array(map(make_asym, views,
                                               lb_views, w_truth_dup))\
                .reshape(new_asym[::2, 1::2].shape)

            # create all of the bottoms, representing the lower left box in 4x4,
            # the edge between pixels going down

            # create the generator for the views
            views = (view_maker(data, suby[i], subx[i], asym_rad)
                     for i in itterator)

            # create a generator for the view on the weights ahead of time
            lb_views = (view_maker(weights, suby[i], subx[i], asym_rad)
                        for i in itterator)

            # greate a generator duplicate for the state of w_truth
            w_truth_dup = (w_truth for i in ones_len)

            # generate the asymmetry values and save them
            new_asym[1::2, ::2] = np.array(map(make_asym, views,
                                               lb_views, w_truth_dup))\
                .reshape(new_asym[1::2, ::2].shape)

            # create all of the corners, like above

            # create the generator for the views
            views = (view_maker(data, suby[i], subx[i], asym_rad)
                     for i in itterator)

            # create a generator for the view on the weights ahead of time
            lb_views = (view_maker(weights, suby[i], subx[i], asym_rad)
                        for i in itterator)

            # greate a generator duplicate for the state of w_truth
            w_truth_dup = (w_truth for i in ones_len)

            # generate the asymmetry values and save them
            new_asym[1::2, 1::2] = np.array(map(make_asym, views, lb_views,
                                                w_truth_dup))\
                .reshape(new_asym[1::2, 1::2].shape)

            # set the new_asym to the old one
            asym = new_asym

            # set the divisor varible, representing that every pixel now
            # represents a half step
            divisor = 2.0

        # next invert the asym space so the minimum point is now the maximum
        asym = -1.0 * asym

        # insert any additional steps that would modify the asym array, such as
        # interpolating, or adding more asym values.
        if resize is not False:
            fact = 1 / float(resize)
            asym = map_coordinates(
                asym, np.mgrid[0:asym.shape[1] - 1 + fact:fact,
                               0:asym.shape[1] - 1 + fact:fact])
            divisor = resize

        # now find the sub pixel position using the given method
        if method == 'col':
            return ((np.array(col(asym)))/divisor - asym_size) + \
                   np.array((suby[middle], subx[middle]), dtype=float)
        if method == 'gaus':
            return [
                ((np.array(fitgaussian(asym)[[1, 2]])) / divisor - asym_size) +
                np.array((suby[middle], subx[middle]), dtype=float), asym
            ]

    else:
        # return this as an error code if the function waled more times than
        # allowed, ie not finding a center
        return np.array([-1.0, -1.0])
Esempio n. 57
0
    def warp_image(self, img, **kwargs):

        assert img.shape == (self.n_pixels_r, self.n_pixels_c)
        assert self.spatial_unit == 'cm'

        return spndi.map_coordinates(img, self.experiment_geometry.warp_coordinates.T).reshape((self.n_pixels_r, self.n_pixels_c))
    image = cv2.warpAffine(image,
                           M,
                           shape_size[::-1],
                           borderMode=cv2.BORDER_REFLECT_101)

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dz = np.zeros_like(dx)

    x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]),
                          np.arange(shape[2]))
    indices = np.reshape(y + dy,
                         (-1, 1)), np.reshape(x + dx,
                                              (-1, 1)), np.reshape(z, (-1, 1))

    im_merge_t = map_coordinates(image, indices, order=1,
                                 mode='reflect').reshape(shape)

    # im_t = map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
    # mask_t = map_coordinates(mask_, indices, order=1, mode='reflect').reshape(shape)

    im_t = im_merge_t[..., 0:3]
    # imshow(im_t)
    # plt.show()
    mask_t = im_merge_t[..., 3:6]
    mask_t = cv2.cvtColor(mask_t, cv2.COLOR_RGB2GRAY)
    # imshow(mask_t)
    # plt.show()

    new_id = 'elastic' + id_[7:]
    os.mkdir(TRAIN_PATH + new_id)
    os.mkdir(TRAIN_PATH + new_id + '/images/')
Esempio n. 59
0
def _mask_transform(mask, coordinates):
    mask_trans = map_coordinates(np.squeeze(mask),
                                 coordinates,
                                 order=0,
                                 mode='reflect')
    return np.expand_dims(mask_trans, axis=-1)
Esempio n. 60
-1
def elastic_distortion(image, alpha, sigma):
    s = image.shape
    dx = gaussian_filter(random_state.rand(*s) * 2 - 1, sigma, mode="constant", cval=0) * alpha
    dy = gaussian_filter(random_state.rand(*s) * 2 - 1, sigma, mode="constant", cval=0) * alpha
    x, y = np.meshgrid(np.arange(s[0]), np.arange(s[1]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
    return map_coordinates(image, indices, order=1).reshape(s)