示例#1
0
def Inversion(Qsca,Qabs,wavelength,diameter,nMin=1,nMax=3,kMin=0.001,kMax=1,scatteringPrecision=0.010,absorptionPrecision=0.010,spaceSize=120,interp=2):
  
  nRange = np.linspace(nMin,nMax,spaceSize)
  kRange = np.logspace(np.log10(kMin),np.log10(kMax),spaceSize)
  scaSpace = np.zeros((spaceSize,spaceSize))
  absSpace = np.zeros((spaceSize,spaceSize))

  for ni,n in enumerate(nRange):
    for ki,k in enumerate(kRange):
      _derp = fastMieQ(n+(1j*k),wavelength,diameter)
      scaSpace[ni][ki] = _derp[0]
      absSpace[ni][ki] = _derp[1]
  if interp is not None:
    nRange = zoom(nRange,interp)
    kRange = zoom(kRange,interp)
    scaSpace = zoom(scaSpace,interp)
    absSpace = zoom(absSpace,interp)
    
  scaSolutions = np.where(np.logical_and(Qsca*(1-scatteringPrecision)<scaSpace, scaSpace<Qsca*(1+scatteringPrecision)))
  absSolutions = np.where(np.logical_and(Qabs*(1-absorptionPrecision)<absSpace, absSpace<Qabs*(1+absorptionPrecision)))

  validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
  validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]
  
  solution = np.intersect1d(validScattering,validAbsorption)
#  errors = [error()]

  return solution
示例#2
0
def imageUp(img, order=1):
    """Upsample input image by a factor of 2.

    Parameters
    ----------
    img : ndarray
        Image array. It can be a 2D or 3D array. If it is a 3D array,
        the smoothing is applied independently to each channel.

    order : integer, optional
        Interpolation order. Defaults to 1

    Returns :
    imgUp : ndarray
        Upsampled image of size (2*H, 2*W, D) where (H, W, D) is the
        width, height and depth of the input image
    """
    
    if img.ndim == 2:
        imgZoomed = np.zeros([2*img.shape[0], 2*img.shape[1]], dtype=img.dtype)
        nd.zoom(img, 2.0, output=imgZoomed, order=order, mode='reflect')
        return imgZoomed

    else:

        zoomList = list()
        for d in range(img.shape[2]):

            imgZoomed = np.zeros([2*img.shape[0], 2*img.shape[1]], dtype=img.dtype)
            nd.zoom(img[...,d], 2.0, output=imgZoomed, order=order, mode='reflect')

            zoomList.append(imgZoomed)

        # recombine channels and return
        return np.concatenate([p[...,np.newaxis] for p in zoomList], axis=2)
示例#3
0
def Inversion_SD(Bsca,Babs,wavelength,dp,ndp,nMin=1,nMax=3,kMin=0,kMax=1,scatteringPrecision=0.001,absorptionPrecision=0.001,spaceSize=40,interp=2):
  dp = coerceDType(dp)
  ndp = coerceDType(ndp)

  nRange = np.linspace(nMin,nMax,spaceSize)
  kRange = np.linspace(kMin,kMax,spaceSize)
  scaSpace = np.zeros((spaceSize,spaceSize))
  absSpace = np.zeros((spaceSize,spaceSize))

  for ni,n in enumerate(nRange):
    for ki,k in enumerate(kRange):
      _derp = fastMie_SD(n+(1j*k),wavelength,dp,ndp)
      scaSpace[ni][ki] = _derp[0]
      absSpace[ni][ki] = _derp[1]
  if interp is not None:
    nRange = zoom(nRange,interp)
    kRange = zoom(kRange,interp)
    scaSpace = zoom(scaSpace,interp)
    absSpace = zoom(absSpace,interp)

  scaSolutions = np.where(np.logical_and(Bsca*(1-scatteringPrecision)<scaSpace, scaSpace<Bsca*(1+scatteringPrecision)))
  absSolutions = np.where(np.logical_and(Babs*(1-absorptionPrecision)<absSpace, absSpace<Babs*(1+absorptionPrecision)))

  validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
  validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]

  return np.intersect1d(validScattering,validAbsorption)
示例#4
0
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_4c/output', clip=True, **step_params):
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in range(octave_n - 1):
        octaves.append(
            nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))

    src = net.blobs['data']
    # allocate image for network-produced details
    detail = np.zeros_like(octaves[-1])
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)

        src.reshape(1, 3, h, w)  # resize the network's input image size
        src.data[0] = octave_base + detail
        print("octave %d %s" % (octave, end))
        for i in range(iter_n):
            make_step(net, end=end, clip=clip, **step_params)
            sys.stdout.write("%d " % i)
            sys.stdout.flush()
        print("")

        # extract details produced on the current octave
        detail = src.data[0] - octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])
示例#5
0
def deepdream(
    net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True, **step_params
):
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n - 1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))

    src = net.blobs["data"]
    detail = np.zeros_like(octaves[-1])  # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)

        src.reshape(1, 3, h, w)  # resize the network's input image size
        src.data[0] = octave_base + detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip:  # adjust image contrast if clipping is disabled
                vis = vis * (255.0 / np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0] - octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])
示例#6
0
def compareData(x1, y1, x2, y2, **kwargs):
    """
    """
    # First compare that there x-axis are same. else report warning.
    x1 = np.array(x1)
    x2 = np.array(x2)
    y1 = np.array(y1)
    y2 = np.array(y2)
    print("[INFO] Plotting")
    p1, = pylab.plot(x1, y1)
    p2, = pylab.plot(x2, y2)
    pylab.legend([p1, p2], ["MOOSE", "NEURON"])

    outfile = kwargs.get('outfile', None)
    if not outfile:
        pylab.show()
    else:
        mu.info("Saving figure to %s" % outfile)
        pylab.savefig(outfile)
    
    if len(y1) > len(y2): y1 = ndimage.zoom(y1, len(y1)/len(y2))
    else: y2 = ndimage.zoom(y2, len(y2)/len(y1))
    diff = y1 - y2
    linDiff = diff.sum()
    rms = np.zeros(len(diff))
    for i, d in enumerate(diff):
        rms[i] = d**2.0
    rms = rms.sum() ** 0.5
    print(" |- RMS diff is: {}".format(rms))
示例#7
0
def deepdream(base_img, iter_n=5, octave_n=4, octave_scale=1.4, **step_params):
    # prepare base images for all octaves
    octaves = [preprocess(base_img)]
    for i in xrange(octave_n - 1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))

    detail = np.zeros_like(octaves[-1])  # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)

        x = np.array((1, 3, h, w))  # resize the network's input image size
        x = octave_base + detail
        for i in xrange(iter_n):
            print h, w
            make_step(x.reshape(1, 3, h, w))
            # visualization
            vis = deprocess(x)
            # showarray(vis)
            # print octave, i, end, vis.shape
        # extract details produced on the current octave
        detail = x - octave_base
    # returning the resulting image
    return deprocess(x)
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, 
              end='inception_5b/pool_proj', jitter = 32,step_size=1.5):
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
    
    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end,step_size=step_size,jitter=jitter)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])
    def upsample_pyramid(self, pyramid):

        target_shape = self.residual_hipass.shape

        result = []
        for level in pyramid:
            new_level = []
            for band in level:
                band_shape = band.shape
                if len(target_shape) > len(band_shape):
                    band_shape = (band_shape[0], band_shape[1], 1)

                zf = array(target_shape) / array(band_shape)

                band.shape = band_shape

                tmp = ones(target_shape)
                if any(zf != 1):
                    ndi.zoom(band, zf, tmp, order=1)
                    upsamped = tmp
                else:
                    upsamped = band

                new_level.append(upsamped)
            result.append(new_level)

        return result
示例#10
0
文件: psycam.py 项目: JoBergs/psycam
    def deepdream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4, 
                              end='inception_4c/output'):

        # prepare base images for all octaves
        octaves = [preprocess(self.net, base_img)]
        for i in xrange(octave_n-1):
            octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
        
        source = self.net.blobs['data']  # original image
        detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details

        for octave, octave_base in enumerate(octaves[::-1]):
            h, w = octave_base.shape[-2:]  # octave size
            if octave > 0:
                # upscale details from previous octave
                h1, w1 = detail.shape[-2:]
                detail = nd.zoom(detail, (1, 1.0*h/h1, 1.0*w/w1), order=1)

            source.reshape(1, 3, h, w) # resize the network's input image size
            source.data[0] = octave_base + detail

            for i in xrange(iter_n):
                self.make_step(end=end)
                
            # extract details produced on the current octave
            detail = source.data[0] - octave_base

        return deprocess(self.net, source.data[0])  # return final image
示例#11
0
def deepdream(net, base_img, end, iter_n=10, octave_n=4, octave_scale=1.4, clip=True, **step_params):
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end, clip=clip, **step_params)

            # display step
            #vis = deprocess(net, src.data[0])
            #if not clip: # adjust image contrast if clipping is disabled
            #    vis = vis*(255.0/np.percentile(vis, 99.98))
            #ename = '-'.join(end.split('/'))
            #saveimage(vis, '{}-{}-{}'.format(octave, i))
            #print octave, i, end, vis.shape

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])
def dream(model,
          base_img,
          octave_n=6,
          octave_scale=1.4,
          control=None,
          distance=objective_L2):
    octaves = [base_img]
    for i in range(octave_n - 1):
        octaves.append(
            nd.zoom(
                octaves[-1], (1, 1, 1.0 / octave_scale, 1.0 / octave_scale),
                order=1))

    detail = np.zeros_like(octaves[-1])
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(
                detail, (1, 1, 1.0 * h / h1, 1.0 * w / w1), order=1)

        input_oct = octave_base + detail
        print(input_oct.shape)
        out = make_step(input_oct, model, control, distance=distance)
        detail = out - octave_base
示例#13
0
    def __init__(self, polmap, I0, ne, flip_ne=False):
        self.fn=polmap.fn[:8]
        I0=plt.imread(I0)
        self.I0s=np.sum(I0,2)
        I1=np.loadtxt(ne, delimiter=',')
        I1=I1-np.nan_to_num(I1).min()
        self.I1=np.nan_to_num(I1)
        self.pm=polmap
        #scale and flip to data
        B0=self.pm.B0
        scale=B0.shape[0]/self.I0s.shape[0]

        I0z=zoom(self.I0s, scale)
        crop=(I0z.shape[1]-B0.shape[1])//2
        if B0.shape[1]%2==0:
            I0zc=I0z[:,crop:-crop]
        elif B0.shape[1]%2==1:
            I0zc=I0z[:,crop:-crop-1]
        self.I0zcn=np.flipud(I0zc/I0zc.max())
        I1z=zoom(self.I1, scale)
        if B0.shape[1]%2==0:
            I1zc=I1z[:,crop:-crop]
        elif B0.shape[1]%2==1:
            I1zc=I1z[:,crop:-crop-1]
        self.I1zc=np.flipud(I1zc)
        if flip_ne is True:
            self.I1zc=np.flipud(self.I1zc)
            
        self.cmap='seismic'
示例#14
0
def deepdream_stepped(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_3b/5x5_reduce', start_sigma=2.5, end_sigma=.1, start_jitter=48., end_jitter=4., start_step_size=3.0, end_step_size=1.5, clip=True, **step_params):
	# prepare base images for all octaves
	octaves = [preprocess(net, base_img)]
	for i in xrange(octave_n-1):
		octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
	src = net.blobs['data']
	detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
	for octave, octave_base in enumerate(octaves[::-1]):
		h, w = octave_base.shape[-2:]
		if octave > 0:	# upscale details from the previous octave
			h1, w1 = detail.shape[-2:]
			detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
		src.reshape(1,3,h,w) # resize the network's input image size
		src.data[0] = octave_base+detail

		for i in xrange(iter_n):	
			sigma = start_sigma + ((end_sigma - start_sigma) * i) / iter_n
			jitter = start_jitter + ((end_jitter - start_jitter) * i) / iter_n
			step_size = start_step_size + ((end_step_size - start_step_size) * i) / iter_n
            
			make_step(net, end=end, clip=clip, jitter=jitter, step_size=step_size, **step_params)
			#src.data[0] = blur(src.data[0], sigma)
		
		# extract details produced on the current octave
		detail = src.data[0]-octave_base
	#returning the resulting image
	return deprocess(net, src.data[0])
def show_downsize():
	for im in gen_images(n=-1, crop=True):
		t_im = im['T1c']
		gt = im['gt']
		
		t_im = np.asarray(t_im, dtype='float32')
		gt = np.asarray(gt, dtype='float32')
		
		d_im = zoom(t_im, 0.5, order=3)
		d_gt = zoom(gt, 0.5, order=0)
		print 'New shape: ', d_im.shape
		
		slices1 = np.arange(0, d_im.shape[0], d_im.shape[0]/20)
		slices2 = np.arange(0, t_im.shape[0], t_im.shape[0]/20)
		
		for s1, s2 in zip(slices1, slices2):
			d_im_slice = d_im[s1]
			d_gt_slice = d_gt[s1]
			
			im_slice = t_im[s2]
			gt_slice = gt[s2]
			
			title0= 'Original'
			title1= 'Downsized'
			vis_ims(im0=im_slice, gt0=gt_slice, im1=d_im_slice, 
				gt1=d_gt_slice, title0=title0, title1=title1)
示例#16
0
    def overlay_velocities(self, ax):
        """Given an axes instance, overlay a quiver plot
        of Uf_ and Wf_.

        Uses interpolation (scipy.ndimage.zoom) to reduce
        number of quivers to readable number.

        Will only work sensibly if the thing plotted in ax
        has same shape as Uf_
        """
        zoom_factor = (0.5, 0.05)
        # TODO: proper x, z
        Z, X = np.indices(self.uf_.shape)

        # TODO: are the velocities going at the middle of their grid?
        # NB. these are not averages. ndi.zoom makes a spline and
        # then interpolates a value from this
        # TODO: gaussian filter first?
        # both are valid approaches
        Xr = ndi.zoom(X, zoom_factor)
        Zr = ndi.zoom(Z, zoom_factor)
        Uf_r = ndi.zoom(self.uf_, zoom_factor)
        Wf_r = ndi.zoom(self.wf_, zoom_factor)

        ax.quiver(Xr, Zr, Uf_r, Wf_r, scale=100)
示例#17
0
def deepdream(net, base_imarray, iter_n=50, octave_n=4, octave_scale=1.4, end='inception_4c/output', clip=True, **step_params):

	octaves = [preprocess(net, base_imarray)]

	for i in xrange(octave_n-1):
		octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

	src = net.blobs['data']
	detail = np.zeros_like(octaves[-1])

	for octave, octave_base in enumerate(octaves[::-1]):
		h, w = octave_base.shape[-2:]
		if octave > 0:
			h1, w1 = detail.shape[-2:]
			detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

		src.reshape(1,3,h,w)
		src.data[0] = octave_base+detail

		for i in xrange(iter_n):
			make_step(net, end=end, clip=clip, **step_params)
			vis = deprocess(net, src.data[0])

			if not clip:
				vis = vis*(255.0/np.percentile(vis, 99.98))

			showarray(vis)

			print octave, i, end, vis.shape

			clear_output(wait=True)

		detail = src.data[0]-octave_base

	return deprocess(net, src.data[0])
示例#18
0
def plot_all_params(filen='obj_props', out_filen='ppv_grid', log_Z=False):
    """
    Read in the pickled tree parameter dictionary and plot the containing
    parameters.

    Parameters
    ----------
    filen : str
        File name of pickled reduced property dictionary.
    out_filen : str
        Basename of plots, the key of the object dictionary is appended to the
        filename.
    log_Z : bool
        Create plots with logarithmic Z axis
    """
    cmap = cm.RdYlBu_r
    obj_dict = pickle.load(open(filen + '.pickle', 'rb'))
    X = obj_dict['velo']
    Y = obj_dict['angle']
    X = ndimage.zoom(X, 3)
    Y = ndimage.zoom(Y, 3)
    W = ndimage.zoom(obj_dict['conflict_frac'], 3)
    obj_dict['reward'] = np.log10(obj_dict['new_kdar_assoc']) / obj_dict['conflict_frac']
    params = [(k, v) for k, v in obj_dict.iteritems()
              if k not in ['velo', 'angle']]
    clevels = [0.06, 0.12, 0.20, 0.30, 0.5]
    for key, Z in params:
        print ':: ', key
        fig, ax = plt.subplots(figsize=(4, 4.5))
        cax = fig.add_axes([0.15, 0.88, 0.8, 0.03])
        plt.subplots_adjust(top=0.85, left=0.15, right=0.95, bottom=0.125)
        if log_Z:
            Z = np.log10(Z)
            key += '_(log)'
        Z = ndimage.zoom(Z, 3)
        pc = ax.pcolor(X, Y, Z, cmap=cmap, vmin=Z.min(), vmax=Z.max())
        cb = plt.colorbar(pc, ax=ax, cax=cax, orientation='horizontal',
                          ticklocation='top')
        ax.plot([4], [0.065], 'ko', ms=10, markerfacecolor='none', markeredgewidth=2)
        # Contours for conflict frac
        cn = ax.contour(X, Y, W, levels=clevels,
                        colors='k', linewidth=2)
        plt.setp(cn.collections,
                 path_effects=[PathEffects.withStroke(linewidth=2,
                 foreground='w')])
        cl = ax.clabel(cn, fmt='%1.2f', inline=1, fontsize=10,
                       use_clabeltext=True)
        plt.setp(cl, path_effects=[PathEffects.withStroke(linewidth=2,
                 foreground='w')])
        # Labels
        ax.set_xlabel(r'$v \ \ [{\rm km \ s^{-1}}]$')
        ax.set_ylabel(r'$\theta \ \ [^{\circ}]$')
        # Limits
        ax.set_xlim([X.min(), X.max()])
        ax.set_ylim([Y.min(), Y.max()])
        # Save
        plt.savefig(out_filen + '_' + key + '.pdf')
        plt.savefig(out_filen + '_' + key + '.png', dpi=300)
        plt.close()
def scaleImage(path_img, dilated_img, depth, color_depth, scale=1):
                  
        final_vessel = ndimage.zoom(dilated_img, scale, order=0) 
        final_path = skeletonize_Image(255*ndimage.zoom(path_img, scale, order=0))/255# use nearest neighbour
        final_depth = final_path*ndimage.zoom(depth, scale, order=0)
        final_color_depth = ndimage.zoom(color_depth, scale, order=0)

        return final_path,final_vessel,final_depth, final_color_depth
示例#20
0
    def __call__(self, locs, wfImage):
        """Align a set of localizations to a widefield image.
        
        Parameters
        ----------
        locs    : Pandas DataFrame
            The DataFrame containing the localizations. x- and y-column
            labels are specified in self.coordCols.
        wfImage : array of int or array of float 
            The widefield image to align the localizations to.
        
        Returns
        -------
        offsets : tuple of float
            The estimated offset between the localizations and widefield
            image. The first element is the offset in x and the second
            in y. These should be subtracted from the input localizations
            to align them to the widefield image.
            
        """
        upsampleFactor = self.upsampleFactor
        
        # Bin the localizations into a 2D histogram;
        # x corresponds to rows for histogram2d
        binsX = np.arange(0, upsampleFactor * wfImage.shape[0] + 1, 1) \
                                            * self.pixelSize / upsampleFactor
        binsY = np.arange(0, upsampleFactor * wfImage.shape[1] + 1, 1) \
                                            * self.pixelSize / upsampleFactor
        H, _, _ = np.histogram2d(locs[self.coordCols[0]],
                                 locs[self.coordCols[1]],
                                 bins = [binsX, binsY])
                           
        # Upsample and flip the image to align it to the histogram;
        # then compute the cross correlation
        crossCorr = fftconvolve(H,
                                zoom(np.transpose(wfImage)[::-1, ::-1], 
                                     upsampleFactor, order = 0),
                                mode = 'same')
        
        # Find the maximum of the cross correlation
        centerLoc = np.unravel_index(np.argmax(crossCorr), crossCorr.shape)

        # Find the center of the widefield image
        imgCorr = fftconvolve(zoom(np.transpose(wfImage), 
                                   upsampleFactor, order = 0),
                              zoom(np.transpose(wfImage)[::-1, ::-1], 
                                   upsampleFactor, order = 0),
                              mode = 'same')
        centerWF = np.unravel_index(np.argmax(imgCorr), imgCorr.shape)
                              
        # Find the shift between the images.
        # dx -> rows, dy -> cols because the image was transposed during
        # fftconvolve operation.
        dy = (centerLoc[1] - centerWF[1]) / upsampleFactor * self.pixelSize
        dx = (centerLoc[0] - centerWF[0]) / upsampleFactor * self.pixelSize
        
        offsets = (dx, dy)
        return offsets
示例#21
0
    def _make_tuples(self, key):
        from scipy import ndimage
        from .utils import registration

        print('Registering', key)

        # Get stack
        stack_rel = (stack.CorrectedStack() & key & {'session': key['stack_session']})
        stack_ = stack_rel.get_stack(key['stack_channel'])

        # Get average field
        field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
                     'scan_idx': key['scan_idx'], 'field': key['field'],
                     'channel': key['scan_channel']} #no pipe_version
        frames = (meso.Quality().SummaryFrames() & field_key).fetch1('summary')
        field = frames[:, :, int(frames.shape[-1] / 2)]

        # Drop some edges (only y and x) to avoid artifacts
        skip_dims = [max(1, int(round(s * 0.025))) for s in stack_.shape]
        stack_ = stack_[:, skip_dims[1] : -skip_dims[1], skip_dims[2]: -skip_dims[2]]
        skip_dims = [max(1, int(round(s * 0.025))) for s in field.shape]
        field = field[skip_dims[0] : -skip_dims[0], skip_dims[1]: -skip_dims[1]]

        # Rescale to match lowest resolution  (isotropic pixels/voxels)
        field_res = (meso.ScanInfo.Field() & field_key).microns_per_pixel
        dims = stack_rel.fetch1('um_depth', 'px_depth', 'um_height', 'px_height',
                                'um_width', 'px_width')
        stack_res = np.array([dims[0] / dims[1], dims[2] / dims[3], dims[4] / dims[5]])
        common_res = max(*field_res, *stack_res) # minimum available resolution
        stack_ = ndimage.zoom(stack_, stack_res / common_res, order=1)
        field = ndimage.zoom(field, field_res / common_res, order=1)

        # Get estimated depth of the field (from experimenters)
        stack_x, stack_y, stack_z = stack_rel.fetch1('x', 'y', 'z') # z of the first slice (zero is at surface depth)
        field_z = (meso.ScanInfo.Field() & field_key).fetch1('z') # measured in microns (zero is at surface depth)
        if field_z < stack_z or field_z > stack_z + dims[0]:
            msg_template = 'Warning: Estimated depth ({}) outside stack range ({}-{}).'
            print(msg_template.format(field_z, stack_z , stack_z + dims[0]))
        estimated_px_z = (field_z - stack_z + 0.5) / common_res # in pixels

        # Register
        z_range = 40 / common_res # search 40 microns up and down

        # Run rigid registration with no rotations
        result = registration.register_rigid(stack_, field, estimated_px_z, z_range)
        score, (x, y, z), (yaw, pitch, roll) = result

        # Map back to stack coordinates
        final_x = stack_x + x * (common_res / stack_res[2]) # in stack pixels
        final_y = stack_y + y * (common_res / stack_res[1]) # in stack pixels
        final_z = stack_z + (z + stack_.shape[0] / 2) * common_res # in microns*
        #* Best match in slice 0 will not result in z = 0 but 0.5 * z_step.

        # Insert
        self.insert1({**key, 'common_res': common_res, 'reg_x': final_x, 'reg_y': final_y,
                      'reg_z': final_z, 'score': score})
        self.notify(key)
def zoomSmooth(inArr, smoothing, inAffine):
    zoomReg = zoom(inArr.data, smoothing, order=0)
    zoomed = zoom(inArr.data, smoothing, order=1)
    zoomMask = zoom(inArr.mask, smoothing, order=0)
    zoomed[np.where(zoomed > inArr.max())] = inArr.max()
    zoomed[np.where(zoomed < inArr.min())] = inArr.min()
    inArr = np.ma.array(zoomed, mask=zoomMask)
    oaff = tools.resampleAffine(inAffine, smoothing)
    del zoomed, zoomMask
    return inArr, oaff
示例#23
0
def _detectHaarFeatures(image, options={}):
    if options is None:
        options = _haarDefaultOptions(image)
    levels = options.get('levels')   
    maxpoints = options.get('maxpoints')
    threshold = options.get('threshold')
    locality = options.get('locality')
    
    haarData = haar2d(image, levels)

    avgRows = haarData.shape[0] / 2 ** levels
    avgCols = haarData.shape[1] / 2 ** levels
    
    SalientPoints = {}
    
    siloH = np.zeros([haarData.shape[0]/2, haarData.shape[1]/2, levels])
    siloD = np.zeros([haarData.shape[0]/2, haarData.shape[1]/2, levels])
    siloV = np.zeros([haarData.shape[0]/2, haarData.shape[1]/2, levels])
    
    # Build the saliency silos
    for i in range(levels):
        level = i + 1
        halfRows = haarData.shape[0] / 2 ** level
        halfCols = haarData.shape[1] / 2 ** level
        siloH[:,:,i] = nd.zoom(haarData[:halfRows, halfCols:halfCols*2], 2**(level-1)) 
        siloD[:,:,i] = nd.zoom(haarData[halfRows:halfRows*2, halfCols:halfCols*2], 2**(level-1)) 
        siloV[:,:,i] = nd.zoom(haarData[halfRows:halfRows*2, :halfCols], 2**(level-1)) 
    
    # Calculate saliency heat-map
    saliencyMap = np.max(np.array([
                                np.sum(np.abs(siloH), axis=2), 
                                np.sum(np.abs(siloD), axis=2),
                                np.sum(np.abs(siloV), axis=2)
                                ]), axis=0)
                               
    # Determine global maximum and saliency threshold
    maximum = np.max(saliencyMap)
    sthreshold = threshold * maximum
    
    # Extract features by finding local maxima
    rows = haarData.shape[0] / 2
    cols = haarData.shape[1] / 2
    features = {}
    id = 0
    for row in range(locality,rows-locality):
        for col in range(locality,cols-locality):
            saliency = saliencyMap[row,col]
            if saliency > sthreshold:
                if  saliency >= np.max(saliencyMap[row-locality:row+locality, col-locality:col+locality]):
                    features[id] = (row*2,col*2)
                    id += 1

    result = {}
    result['points'] = features
    return result
示例#24
0
def scale_dif(image,full_size):
	#compare images
	x,y = image.shape[0:2]
	i,j = full_size.shape[0:2]
	print("scale dif {} {} {} {}".format(x,y,i,j))
	if i == x and j == y:
		return image
	original = nd.zoom(full_size, (1.0*x/i,1.0*y/j,1))
	dif_zoom = nd.zoom(original - image, (1.0*i/x,1.0*j/y,1))
	
	return np.clip(full_size - dif_zoom,0,255)
示例#25
0
文件: extra.py 项目: ChrisYang/CRFdet
def myzoom(img,factor,order):
    auxf=numpy.array(factor)
    order=1 #force order to be 1 otherwise I do not know if it is still correct
    from scipy.ndimage import zoom
    aux=img.copy()
    while (auxf[0]<0.5 and auxf[1]<0.5):
        aux=zoom(aux,(0.5,0.5,1),order=order)
        auxf[0]=auxf[0]*2
        auxf[1]=auxf[1]*2
    aux=zoom(aux,auxf,order=order)
    return aux
示例#26
0
 def scale_and_crop(self):
     B0=self.pm.B0
     scale=B0.shape[0]/self.I0s.shape[0]
     I0z=zoom(self.I0s, scale)
     crop=(I0z.shape[1]-B0.shape[1])/2
     I0zc=I0z[:,crop:-crop]
     self.I0zcn=np.fliplr(I0zc/I0zc.max())
     
     I1z=zoom(self.I1, scale)
     I1zc=I1z[:,crop:-crop]
     self.I1zc=np.flipud(I1z[:,crop:-crop])
示例#27
0
def test_CubicSpline_estimate():
    """
    Asserts that scaling a warp field is a reasonable thing to do.
    """
    
    scale = 2.0
    
    # Form a high resolution image.
    high = register.RegisterData(misc.lena().astype(np.double))
    
    # Form a low resolution image.
    low = high.downsample(scale)
    
    # Make a deformed low resolution image.
    p = model.CubicSpline(low.coords).identity
    p += np.random.rand(p.shape[0]) * 100 - 50
    
    warp = model.CubicSpline(low.coords).transform(p)
    
    dlow = sampler.Nearest(low.coords).f(
        low.data, 
        low.coords.tensor - warp
        ).reshape(low.data.shape)
    
    # Scale the low resolution warp field to the same size as the high resolution 
    # image. 
    
    hwarp = np.array( [nd.zoom(warp[0],scale), nd.zoom(warp[1],scale)] ) * scale
    
    # Estimate the high resolution spline parameters that best fit the 
    # enlarged warp field.
    
    invB = np.linalg.pinv(model.CubicSpline(high.coords).basis)
    
    pHat = np.hstack(
        (np.dot(invB, hwarp[1].flatten()), 
         np.dot(invB, hwarp[0].flatten()))
        )
    
    warpHat = model.CubicSpline(high.coords).warp(pHat)
    
    # Make a deformed high resolution image.
    dhigh = sampler.Nearest(high.coords).f(high.data, warpHat).reshape(high.data.shape)
    
    # down-sample the deformed high-resolution image and assert that the 
    # pixel values are "close".
    dhigh_low = nd.zoom(dhigh, 1.0/scale)
    
    # Assert that the down-sampler highresolution image is "roughly" similar to
    # the low resolution image.
    
    assert (np.abs((dhigh_low[:] - dlow[:])).sum() / dlow.size < 10.0), \
        "Normalized absolute error is greater than 10 pixels."
示例#28
0
文件: dream.py 项目: spurll/dream
def deepdream(
    net, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
    end='inception_4c/output', clip=True, **step_params
):
    '''
    Implement an ascent through different scales. We call these scales
    "octaves".
    '''

    # Prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in range(octave_n - 1):
        octaves.append(
            # ndimage? numpy?
            ndimage.zoom(
                octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale),
                order=1
            )
        )

    src = net.blobs['data']
    # Allocate image for network-produced details
    detail = numpy.zeros_like(octaves[-1])
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # Upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = ndimage.zoom(
                detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1
            )

        # Resize the network's input image size
        src.reshape(1, 3, h, w)
        src.data[0] = octave_base + detail
        for i in range(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # Visualization
            vis = postprocess(net, src.data[0])
            if not clip:
                # Adjust image contrast if clipping is disabled
                vis = vis * (255.0 / numpy.percentile(vis, 99.98))

            showarray(vis)
            print(octave, i, end, vis.shape)
            clear_output(wait=True)

        # Extract details produced on the current octave
        detail = src.data[0]-octave_base

    # Returning the resulting image
    return postprocess(net, src.data[0])
示例#29
0
def measure_shift(da, db, use_md=True):
    """
    use_md (bool): if False, will not use metadata and assume the 2 images are
      of the same area
    return (float, float): shift of the second image compared to the first one,
     in pixels of the first image.
    """
    da_res = da.shape[1], da.shape[0] # X/Y are inverted
    db_res = db.shape[1], db.shape[0]
    if any(sa < sb for sa, sb in zip(da_res, db_res)):
        logging.warning("Comparing a large image %s to a small image %s, you should do the opposite", db_res, da_res)

    # if db FoV is smaller than da, crop da
    if use_md:
        dafov = [pxs * s for pxs, s in zip(da.metadata[model.MD_PIXEL_SIZE], da_res)]
        dbfov = [pxs * s for pxs, s in zip(db.metadata[model.MD_PIXEL_SIZE], db_res)]
        fov_ratio = [fa / fb for fa, fb in zip(dafov, dbfov)]
    else:
        fov_ratio = (1, 1)
    if any(r < 1 for r in fov_ratio):
        logging.warning("Cannot compare an image with a large FoV %g to a small FoV %g",
                        dbfov, dafov)
        shift_px = measure_shift(db, da)
        return [-s for s in shift_px]

    crop_res = [int(s / r) for s, r in zip(da_res, fov_ratio)]
    logging.debug("Cropping da to %s", crop_res)
    da_ctr = [s // 2 for s in da_res]
    da_lt = [int(c - r // 2) for c, r in zip(da_ctr, crop_res)]
    da_crop = da[da_lt[1]: da_lt[1] + crop_res[1],
                 da_lt[0]: da_lt[0] + crop_res[0]]

    scale = [sa / sb for sa, sb in zip(da_crop.shape, db.shape)]
    if scale[0] != scale[1]:
        raise ValueError("Comparing images with different zooms levels %s on each axis is not supported" % (scale,))

    # Resample the smaller image to fit the resolution of the larger image
    if scale[0] < 1:
        # The "big" image has actually less pixels than the small FoV image
        # => zoom the big image, and compensate later for the shift
        logging.info("Rescaling large FoV image by scale %f", 1 / scale[0])
        da_crop = zoom(da_crop, 1 / scale[0])
        db_big = db
        shift_ratio = scale[0]
    else:
        logging.info("Rescaling small FoV image by scale %f", scale[0])
        db_big = zoom(db, scale[0])
        shift_ratio = 1
    # Apply phase correlation
    shift_px = MeasureShift(da_crop, db_big, 10)

    return shift_px[0] * shift_ratio, shift_px[1] * shift_ratio
def analyseSpotsDeconvolution(files):
    """
    Analyse spot measurements using deconvolutions.

    Note: does not really work... perhaps an issue with sizes.

    :param files: a list of input files
    :type files: list

    :return: None
    """
    d = {}
    data = []
    for filename in files:
        tmp = readData(filename, crop=False)
        f = filename.replace('.fits', '')
        d[f] = tmp
        data.append(tmp)
    data = np.asarray(data)

    #sanity check plots
    #stackData(data)

    #deconvolve with top hat
    dec1 = {}
    y, x = data[0].shape
    top = np.zeros((y, x))
    top[y/2, x/2] = 1.
    fileIO.writeFITS(top, 'tophat.fits', int=False)
    for filename, im in zip(files, data):
        deconv = weinerFilter(im, top, normalize=False)
        f = filename.replace('.fits', 'deconv1.fits')
        fileIO.writeFITS(deconv, f, int=False)
        dec1[f] = deconv

    print "Tophat deconvolution done"

    #deconvolve with a Besssel
    dec2 = {}
    bes = generateBessel(radius=0.13)
    bes = ndimage.zoom(bes, 1./2.5, order=0)
    bes /= np.max(bes)
    fileIO.writeFITS(bes, 'bessel.fits', int=False)
    for key, value in dec1.iteritems():
        value = ndimage.zoom(value, 4., order=0)
        value -= np.median(value)
        deconv = weinerFilter(value, bes, reqularization=2.0, normalize=False)
        f = key.replace('deconv1.fits', 'deconv2.fits')
        fileIO.writeFITS(deconv, f, int=False)
        dec2[f] = deconv

    print 'Bessel deconvolution done'
示例#31
0
def merge_images_landmarks_maps_gt(images,
                                   maps,
                                   maps_gt,
                                   landmarks=None,
                                   image_size=256,
                                   num_landmarks=68,
                                   num_samples=9,
                                   scale=255,
                                   circle_size=2,
                                   fast=False):
    """create image for log - containing input face images, predicted heatmaps and GT heatmaps (if exists)"""

    images = images[:num_samples]
    if maps.shape[1] is not image_size:
        images = zoom(images, (1, 0.25, 0.25, 1))
        image_size /= 4
        image_size = int(image_size)
    if maps_gt is not None:
        if maps_gt.shape[1] is not image_size:
            maps_gt = zoom(maps_gt, (1, 0.25, 0.25, 1))

    cmap = plt.get_cmap('jet')

    row = int(np.sqrt(num_samples))
    if maps_gt is None:
        merged = np.zeros([row * image_size, row * image_size * 2, 3])
    else:
        merged = np.zeros([row * image_size, row * image_size * 3, 3])

    for idx, img in enumerate(images):
        i = idx // row
        j = idx % row

        if landmarks is None:
            img_landmarks = heat_maps_to_landmarks(maps[idx, :, :, :],
                                                   image_size=image_size,
                                                   num_landmarks=num_landmarks)
        else:
            img_landmarks = landmarks[idx]

        if fast:
            map_image = np.amax(maps[idx, :, :, :], 2)
            map_image = (map_image - map_image.min()) / (map_image.max() -
                                                         map_image.min())
        else:
            map_image = heat_maps_to_image(maps[idx, :, :, :],
                                           img_landmarks,
                                           image_size=image_size,
                                           num_landmarks=num_landmarks)
        rgba_map_image = cmap(map_image)
        map_image = np.delete(rgba_map_image, 3, 2) * 255

        img = create_img_with_landmarks(img,
                                        img_landmarks,
                                        image_size,
                                        num_landmarks,
                                        scale=scale,
                                        circle_size=circle_size)

        if maps_gt is not None:
            if fast:
                map_gt_image = np.amax(maps_gt[idx, :, :, :], 2)
                map_gt_image = (map_gt_image - map_gt_image.min()) / (
                    map_gt_image.max() - map_gt_image.min())
            else:
                map_gt_image = heat_maps_to_image(maps_gt[idx, :, :, :],
                                                  image_size=image_size,
                                                  num_landmarks=num_landmarks)
            rgba_map_gt_image = cmap(map_gt_image)
            map_gt_image = np.delete(rgba_map_gt_image, 3, 2) * 255

            merged[i * image_size:(i + 1) * image_size,
                   (j * 3) * image_size:(j * 3 + 1) * image_size, :] = img
            merged[i * image_size:(i + 1) * image_size, (j * 3 + 1) *
                   image_size:(j * 3 + 2) * image_size, :] = map_image
            merged[i * image_size:(i + 1) * image_size, (j * 3 + 2) *
                   image_size:(j * 3 + 3) * image_size, :] = map_gt_image
        else:
            merged[i * image_size:(i + 1) * image_size,
                   (j * 2) * image_size:(j * 2 + 1) * image_size, :] = img
            merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) *
                   image_size:(j * 2 + 2) * image_size, :] = map_image

    return merged
示例#32
0
import sys
from cv2 import imwrite

from numpy import mean, binary_repr, ones
from numpy.random import randint
from scipy.ndimage import zoom

for i in range(0, 16):
    img = ones((6, 6)) * 255
    img[1, 1] = 0
    img[4, 1] = 0
    img[1, 4] = 0

    if i % 2 == 1:
        img[2, 2] = 0

    if (i >> 1) % 2 == 1:
        img[2, 3] = 0

    if (i >> 2) % 2 == 1:
        img[3, 2] = 0

    if (i >> 3) % 2 == 1:
        img[3, 3] = 0

    print(img)
    marker = zoom(img, zoom=50, order=0)

    imwrite('marker_images/marker_{}.png'.format(i), marker)
示例#33
0
 def resize(self, arr, size_):
     from scipy.ndimage import zoom
     s, w, h = arr.shape[0], arr.shape[1], arr.shape[2]
     return zoom(arr,
                 (self.size[2] / s, self.size[0] / w, self.size[1] / h))
示例#34
0
def process_image(img):
    img = zoom(img, np.random.uniform(MIN_ZOOM, MAX_ZOOM), order=0)
    img = rotate(img, np.random.uniform(-MAX_ROTATE, MAX_ROTATE), order=0)
    img = gaussian_filter(img, np.random.uniform(-MIN_BLUR, MAN_BLUR))
    img = random_crop(img, (299, 299))
    return img
示例#35
0
def nirps_pp(files):

    ref_hdr = fits.getheader('ref_hdr.fits')

    if type(files) == str:
        files = glob.glob(files)

    for file in files:
        outname = '_pp.'.join(file.split('.'))

        if '_pp.' in file:
            print(file + ' is a _pp file')
            continue

        if os.path.isfile(outname):
            print('File : ' + outname + ' exists')
            continue
        else:
            print('We pre-process ' + file)

            hdr = fits.getheader(file)
            im = fits.getdata(file)

            mask = np.array(fits.getdata('mask.fits'), dtype=bool)

            im2 = np.array(im)
            im2[mask] = np.nan

            # we find the low level frequencies
            # we bin in regions of 32x32 pixels. This CANNOT be
            # smaller than the order footprint on the array
            # as it would lead to a set of NaNs in the downsized
            # image and chaos afterward
            binsize = 32  # pixels

            # median-bin and expand back to original size
            lowf = zoom(medbin(im2, binsize, binsize), 4096 // binsize)

            # subtract low-frequency from masked image
            im2 -= lowf

            # find the amplifier x-talk map
            xtalk = med32(im2)
            im2 -= xtalk

            # subtract both low-frequency and x-talk from input image
            im -= (lowf + xtalk)

            tmp = np.nanmedian(im2, axis=0)

            im -= np.tile(tmp, 4096).reshape(4096, 4096)

            # rotates the image so that it matches the order geometry of SPIRou and HARPS
            # redder orders at the bottom and redder wavelength within each order on the left

            # NIRPS = 5
            # SPIROU = 3
            im = rot8(im, 5)

            #DPRTYPE
            """
            MJDMID  =    58875.10336167315 / Mid Observation time [mjd]                     
            BERVOBSM= 'header  '           / BERV method used to calc observation time      
            DPRTYPE = 'FP_FP   '           / The type of file (from pre-process)            
            PVERSION= '0.6.029 '           / DRS Pre-Processing version                     
            DRSVDATE= '2020-01-27'         / DRS Release date                               
            DRSPDATE= '2020-01-30 22:16:00.344' / DRS Processed date                        
            DRSPID  = 'PID-00015804225603440424-JKBM' / The process ID that outputted this f
            INF1000 = '2466774a.fits'      / Input file used to create output infile=0      
            QCC001N = 'snr_hotpix'         / All quality control passed                     
            QCC001V =    876.2474157597072 / All quality control passed                     
            QCC001L = 'snr_hotpix < 1.00000e+01' / All quality control passed               
            QCC001P =                    1 / All quality control passed                     
            QCC002N = 'max(rms_list)'      / All quality control passed                     
            QCC002V = 0.002373232122258537 / All quality control passed                     
            QCC002L = 'max(rms_list) > 1.5000e-01' / All quality control passed             
            QCC002P =                    1 / All quality control passed                     
            QCC_ALL =                    T                                                  
            DETOFFDX=                    0 / Pixel offset in x from readout lag             
            DETOFFDY=                    0 / Pixel offset in y from readout lag    

            """

            if 'MJDEND' not in hdr:
                hdr['MJDEND'] = 0.00
                hdr['EXPTIME'] = 5.57 * len(hdr['INTT*'])

            hdr['MJDMID'] = hdr['MJDEND'] - hdr['EXPTIME'] / 2.0 / 86400.0

            hdr['INF1000'] = file
            DPRTYPES = [
                'DARK_DARK', 'DARK_FP', 'FLAT_FLAT', 'DARK_FLAT', 'FLAT_DARK',
                'HC_FP', 'FP_HC', 'FP_FP', 'OBJ_DARK', 'OBJ_FP', 'HC_DARK',
                'DARK_HC', 'HC_HC'
            ]

            if 'STAR_DARK' in file:
                hdr['DPRTYPE'] = 'OBJ_DARK'

            if 'STAR_FP' in file:
                hdr['DPRTYPE'] = 'OBJ_FP'

            for DPRTYPE in DPRTYPES:
                if DPRTYPE in file:
                    if DPRTYPE == 'DARK_DARK':
                        hdr['DPRTYPE'] = 'DARK_DARK_TEL'
                    elif DPRTYPE == 'HC_HC':
                        hdr['DPRTYPE'] = 'HCONE_HCONE'
                    elif DPRTYPE == 'FP_HC':
                        hdr['DPRTYPE'] = 'FP_HCONE'
                    elif DPRTYPE == 'HC_FP':
                        hdr['DPRTYPE'] = 'HCONE_FP'
                    elif DPRTYPE == 'DARK_HC':
                        hdr['DPRTYPE'] = 'DARK_HCONE'
                    elif DPRTYPE == 'HC_DARK':
                        hdr['DPRTYPE'] = 'HCONE_DARK'
                    elif DPRTYPE == 'FP_DARK':
                        hdr['DPRTYPE'] = 'FP_DARK'
                    elif DPRTYPE == 'DARK_FP':
                        hdr['DPRTYPE'] = 'DARK_FP'
                    else:
                        hdr['DPRTYPE '] = DPRTYPE

            if 'DPRTYPE' not in hdr:
                print('error, with DPRTYPE for ', file)
                return

            if 'OBJECT' not in hdr:
                hdr['OBJECT'] = 'none'

            if 'RDNOISE' not in hdr:
                hdr['RDNOISE'] = 10.0, 'rdnoise *not* provided, added by _pp'

            if 'GAIN' not in hdr:
                hdr['GAIN'] = 1.000, 'gain *not* provided, added by _pp'

            if 'SATURATE' not in hdr:
                hdr['SATURATE'] = 60000, 'saturate *not* provided, added by _pp'

            if 'PVERSION' not in hdr:
                hdr['PVERSION'] = 'NIRPS_SIMU_PP'

            if 'OBSTYPE' not in hdr:
                if hdr['DPRTYPE'][0:4] == 'FLAT':
                    hdr['OBSTYPE'] = 'FLAT'

                if hdr['DPRTYPE'][0:4] == 'DARK':
                    hdr['OBSTYPE'] = 'DARK'

                if hdr['DPRTYPE'][0:2] == 'FP':
                    hdr['OBSTYPE'] = 'ALIGN'

                if hdr['DPRTYPE'][0:2] == 'HC':
                    hdr['OBSTYPE'] = 'COMPARISON'

                if hdr['DPRTYPE'][0:3] == 'OBJ':
                    hdr['OBSTYPE'] = 'OBJECT'

            if hdr['DPRTYPE'][0:3] == 'OBJ':
                hdr['TRG_TYPE'] = 'TARGET'
            else:
                hdr['TRG_TYPE'] = ''

            necessary_kwrd = [
                'OBSTYPE', 'TRG_TYPE', 'OBJECT', 'OBJRA', 'OBJDEC', 'OBJECT',
                'OBJEQUIN', 'OBJRAPM', 'OBJDECPM', 'AIRMASS', 'RELHUMID',
                'OBJTEMP', 'GAIA_ID', 'OBJPLX', 'OBSRV', 'GAIN', 'RDNOISE',
                'FRMTIME', 'EXPTIME', 'PI_NAME', 'CMPLTEXP', 'NEXP', 'MJDATE',
                'MJDEND', 'SBCREF_P', 'SBCCAS_P', 'SBCALI_P', 'SBCDEN_P',
                'DATE-OBS', 'UTC-OBS', 'SATURATE', 'TEMPERAT', 'SB_POL_T'
            ]

            missing = False
            for key in necessary_kwrd:
                if key not in hdr:
                    print('missing keyword : {0}'.format(key))
                    missing = True

                    if key in ref_hdr:
                        hdr[key] = ref_hdr[key]

            b = fits.getdata(file, ext=2)
            errslope = fits.getdata(file, ext=3)
            n = fits.getdata(file, ext=4)

            b = rot8(b, 5)
            errslope = rot8(errslope, 5)
            n = rot8(n, 5)

            hdu1 = fits.PrimaryHDU()
            hdu1.header = hdr
            hdu1.header['NEXTEND'] = 4
            hdu2 = fits.ImageHDU(im)
            hdu2.header['UNITS'] = ('ADU/S', 'Slope of fit, flux vs time')
            hdu2.header['EXTNAME'] = ('slope', 'Slope of fit, flux vs time')

            hdu3 = fits.ImageHDU(b)
            hdu3.header['UNITS'] = ('ADU', 'Intercept of the pixel/time fit.')
            hdu3.header['EXTNAME'] = ('intercept',
                                      'Intercept of the pixel/time fit.')

            hdu4 = fits.ImageHDU(errslope)
            hdu4.header['UNITS'] = ('ADU/S', 'Formal error on slope fit')
            hdu4.header['EXTNAME'] = ('errslope', 'Formal error on slope fit')

            hdu5 = fits.ImageHDU(n)
            hdu5.header['UNITS'] = ('Nimages', 'N readouts below saturation')
            hdu5.header['EXTNAME'] = ('count', 'N readouts below saturation')

            new_hdul = fits.HDUList([hdu1, hdu2, hdu3, hdu4, hdu5])

            # just to avoid an error message with writeto
            if os.path.isfile(outname):
                print('file : ' + outname + ' exists, we are overwriting it')
                os.system('rm ' + outname + '')

            new_hdul.writeto(outname, overwrite=True)

    return []
示例#36
0
 def _imgs_stim():
     X = ndi.zoom(imgs[istim, ...], (1, img_scale, img_scale), order=1)
     img_dim = X.shape[1:]
     X = np.reshape(X, [len(istim), -1])
     return img_dim, zscore(X, axis=0) / np.sqrt(len(istim)).astype(
         np.float32)
示例#37
0
                                   (0, 0), (0, 0)), 'constant')
    else:
        x_start = round(x / 2) - IMAGE_WIDTH_HALF
        x_end = round(x / 2) + IMAGE_WIDTH_HALF
        img_arr = img_arr[x_start:x_end, :, :]

    if y < IMAGE_LENGTH_HALF * 2:
        img_arr = np.pad(img_arr,
                         ((0, 0), (IMAGE_LENGTH_HALF - math.ceil(y / 2), IMAGE_LENGTH_HALF - math.floor(y / 2)),
                          (0, 0)), 'constant')
    else:
        y_start = round(y / 2) - IMAGE_LENGTH_HALF
        y_end = round(y / 2) + IMAGE_LENGTH_HALF
        img_arr = img_arr[:, y_start:y_end, :]

    if z < IMAGE_HEIGHT_HALF * 2:
        img_arr = np.pad(img_arr, ((0, 0), (0, 0),
                                   (IMAGE_HEIGHT_HALF - math.ceil(z / 2), IMAGE_HEIGHT_HALF - math.floor(z / 2))),
                         'constant')
    else:
        z_start = round(z / 2) - IMAGE_HEIGHT_HALF
        z_end = round(z / 2) + IMAGE_HEIGHT_HALF
        img_arr = img_arr[:, :, z_start:z_end]

    print("shape = " + str(img_arr.shape))
    img_arr = ndi.zoom(img_arr, 0.25)   # resize the image from 256*256*256 to 64*64*64
    print("shape = " + str(img_arr.shape))
    nib_img = nib.Nifti1Image(img_arr, img.affine)
    nib.save(nib_img, resized_path + file)
    # nib.save(nib_img, "E:/Y4/DT/data/Resized64_MNI152_T1_1mm_brain.nii.gz")
示例#38
0
def inference(deploy_set,
              output_dir,
              model_path,
              FineNet_path=None,
              set_name=None,
              file_ext='.bmp',
              isHavingFineNet=False):
    if set_name is None:
        set_name = deploy_set.split('/')[-2]

    mkdir(output_dir + '/' + set_name + '/')
    mkdir(output_dir + '/' + set_name + '/mnt_results/')
    mkdir(output_dir + '/' + set_name + '/seg_results/')
    mkdir(output_dir + '/' + set_name + '/OF_results/')

    logging.info("Predicting %s:" % (set_name))

    _, img_name = get_files_in_folder(deploy_set + 'img_files/', file_ext)
    print deploy_set

    # ====== Load FineNet to verify
    if isHavingFineNet == True:
        model_FineNet = FineNetmodel(num_classes=2,
                                     pretrained_path=FineNet_path,
                                     input_shape=(224, 224, 3))

        model_FineNet.compile(loss='categorical_crossentropy',
                              optimizer=Adam(lr=0),
                              metrics=['accuracy'])

    time_c = []

    main_net_model = CoarseNetmodel((None, None, 1), model_path, mode='deploy')

    for i in xrange(0, len(img_name)):
        print i

        image = misc.imread(deploy_set + 'img_files/' + img_name[i] + file_ext,
                            mode='L')  # / 255.0

        img_size = image.shape
        img_size = np.array(img_size, dtype=np.int32) // 8 * 8

        # read the mask from files
        try:
            mask = misc.imread(
                deploy_set + 'seg_files/' + img_name[i] + '.jpg',
                mode='L') / 255.0
        except:
            mask = np.ones((img_size[0], img_size[1]))

        image = image[:img_size[0], :img_size[1]]
        mask = mask[:img_size[0], :img_size[1]]

        original_image = image.copy()

        texture_img = FastEnhanceTexture(image, sigma=2.5, show=False)
        dir_map, fre_map = get_maps_STFT(texture_img,
                                         patch_size=64,
                                         block_size=16,
                                         preprocess=True)

        image = image * mask

        logging.info("%s %d / %d: %s" %
                     (set_name, i + 1, len(img_name), img_name[i]))
        time_start = time()

        image = np.reshape(image, [1, image.shape[0], image.shape[1], 1])

        enh_img, enh_img_imag, enhance_img, ori_out_1, ori_out_2, seg_out, mnt_o_out, mnt_w_out, mnt_h_out, mnt_s_out \
            = main_net_model.predict(image)
        time_afterconv = time()

        # If use mask from model
        round_seg = np.round(np.squeeze(seg_out))
        seg_out = 1 - round_seg
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
        seg_out = cv2.morphologyEx(seg_out, cv2.MORPH_CLOSE, kernel)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
        seg_out = cv2.morphologyEx(seg_out, cv2.MORPH_OPEN, kernel)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        seg_out = cv2.dilate(seg_out, kernel)

        # If use mask from outside
        # seg_out = cv2.resize(mask, dsize=(seg_out.shape[1], seg_out.shape[0]))

        max_num_minu = 20
        min_num_minu = 6

        early_minutiae_thres = 0.5

        # New adaptive threshold
        mnt = label2mnt(np.squeeze(mnt_s_out) * np.round(np.squeeze(seg_out)),
                        mnt_w_out,
                        mnt_h_out,
                        mnt_o_out,
                        thresh=0)

        # Previous exp: 0.2
        mnt_nms_1 = py_cpu_nms(mnt, 0.5)
        mnt_nms_2 = nms(mnt)
        mnt_nms_1.view('f8,f8,f8,f8').sort(order=['f3'], axis=0)
        mnt_nms_1 = mnt_nms_1[::-1]

        mnt_nms_1_copy = mnt_nms_1.copy()
        mnt_nms_2_copy = mnt_nms_2.copy()
        # Adaptive threshold goes here
        # Make sure the maximum number of minutiae is max_num_minu

        # Sort minutiae by score
        while early_minutiae_thres > 0:
            mnt_nms_1 = mnt_nms_1_copy[
                mnt_nms_1_copy[:, 3] > early_minutiae_thres, :]
            mnt_nms_2 = mnt_nms_2_copy[
                mnt_nms_2_copy[:, 3] > early_minutiae_thres, :]

            if mnt_nms_1.shape[0] > max_num_minu or mnt_nms_2.shape[
                    0] > max_num_minu:
                mnt_nms_1 = mnt_nms_1[:max_num_minu, :]
                mnt_nms_2 = mnt_nms_2[:max_num_minu, :]
            if mnt_nms_1.shape[0] > min_num_minu and mnt_nms_2.shape[
                    0] > min_num_minu:
                break

            early_minutiae_thres = early_minutiae_thres - 0.05

        mnt_nms = fuse_nms(mnt_nms_1, mnt_nms_2)

        final_minutiae_score_threashold = early_minutiae_thres - 0.05

        print early_minutiae_thres, final_minutiae_score_threashold

        mnt_refined = []
        if isHavingFineNet == True:
            # ======= Verify using FineNet ============
            patch_minu_radio = 22
            if FineNet_path != None:
                for idx_minu in range(mnt_nms.shape[0]):
                    try:
                        # Extract patch from image
                        x_begin = int(mnt_nms[idx_minu, 1]) - patch_minu_radio
                        y_begin = int(mnt_nms[idx_minu, 0]) - patch_minu_radio
                        patch_minu = original_image[x_begin:x_begin +
                                                    2 * patch_minu_radio,
                                                    y_begin:y_begin +
                                                    2 * patch_minu_radio]

                        patch_minu = cv2.resize(
                            patch_minu,
                            dsize=(224, 224),
                            interpolation=cv2.INTER_NEAREST)

                        ret = np.empty(
                            (patch_minu.shape[0], patch_minu.shape[1], 3),
                            dtype=np.uint8)
                        ret[:, :, 0] = patch_minu
                        ret[:, :, 1] = patch_minu
                        ret[:, :, 2] = patch_minu
                        patch_minu = ret
                        patch_minu = np.expand_dims(patch_minu, axis=0)

                        # # Can use class as hard decision
                        # # 0: minu  1: non-minu
                        # [class_Minutiae] = np.argmax(model_FineNet.predict(patch_minu), axis=1)
                        #
                        # if class_Minutiae == 0:
                        #     mnt_refined.append(mnt_nms[idx_minu,:])

                        # Use soft decision: merge FineNet score with CoarseNet score
                        [isMinutiaeProb] = model_FineNet.predict(patch_minu)
                        isMinutiaeProb = isMinutiaeProb[0]
                        #print isMinutiaeProb
                        tmp_mnt = mnt_nms[idx_minu, :].copy()
                        tmp_mnt[3] = (4 * tmp_mnt[3] + isMinutiaeProb) / 5
                        mnt_refined.append(tmp_mnt)

                    except:
                        mnt_refined.append(mnt_nms[idx_minu, :])
        else:
            mnt_refined = mnt_nms

        mnt_nms_backup = mnt_nms.copy()
        mnt_nms = np.array(mnt_refined)

        if mnt_nms.shape[0] > 0:
            mnt_nms = mnt_nms[mnt_nms[:,
                                      3] > final_minutiae_score_threashold, :]

        final_mask = ndimage.zoom(np.round(np.squeeze(seg_out)), [8, 8],
                                  order=0)
        # Show the orientation
        show_orientation_field(original_image,
                               dir_map + np.pi,
                               mask=final_mask,
                               fname="%s/%s/OF_results/%s_OF.jpg" %
                               (output_dir, set_name, img_name[i]))

        fuse_minu_orientation(dir_map, mnt_nms, mode=3)

        time_afterpost = time()
        mnt_writer(
            mnt_nms, img_name[i], img_size,
            "%s/%s/mnt_results/%s.mnt" % (output_dir, set_name, img_name[i]))
        draw_minutiae(original_image,
                      mnt_nms,
                      "%s/%s/%s_minu.jpg" %
                      (output_dir, set_name, img_name[i]),
                      saveimage=True)

        misc.imsave(
            "%s/%s/seg_results/%s_seg.jpg" %
            (output_dir, set_name, img_name[i]), final_mask)

        time_afterdraw = time()
        time_c.append([
            time_afterconv - time_start, time_afterpost - time_afterconv,
            time_afterdraw - time_afterpost
        ])
        logging.info("load+conv: %.3fs, seg-postpro+nms: %.3f, draw: %.3f" %
                     (time_c[-1][0], time_c[-1][1], time_c[-1][2]))

    # time_c = np.mean(np.array(time_c), axis=0)
    # logging.info(
    #     "Average: load+conv: %.3fs, oir-select+seg-post+nms: %.3f, draw: %.3f" % (time_c[0], time_c[1], time_c[2]))
    return
示例#39
0
def Workflow_atp2a2(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure ATP2A2

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [2.5, 9.0]
    vesselness_sigma = [1]
    vesselness_cutoff = 0.25
    minArea = 15
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio), order=2)

        struct_img = (struct_img - struct_img.min() + 1e-8) / (
            struct_img.max() - struct_img.min() + 1e-8
        )

    # smoothing with boundary preserving smoothing
    structure_img_smooth = edge_preserving_smoothing_3d(struct_img)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    # 2d vesselness slice by slice
    response = vesselnessSliceBySlice(
        structure_img_smooth, sigmas=vesselness_sigma, tau=1, whiteonblack=True
    )
    bw = response > vesselness_cutoff

    ###################
    # POST-PROCESSING
    ###################
    bw = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False)
    for zz in range(bw.shape[0]):
        bw[zz, :, :] = remove_small_objects(
            bw[zz, :, :], min_size=3, connectivity=1, in_place=False
        )

    seg = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
示例#40
0
def resize(scale, old_mats):
    new_mats = []
    for mat in old_mats:
        new_mats.append(zoom(mat, scale, order=0))
    return np.array(new_mats)
示例#41
0
    def _generate_bss(self, x_batch, y_batch, c):
        """
        Generate adversarial examples for a batch of inputs with a specific batch of constants.

        :param x_batch: A batch of original examples.
        :type x_batch: `np.ndarray`
        :param y_batch: A batch of targets (0-1 hot).
        :type y_batch: `np.ndarray`
        :param c: A batch of constants.
        :type c: `np.ndarray`
        :return: A tuple of best elastic distances, best labels, best attacks
        :rtype: `tuple`
        """
        def compare(object1, object2):
            return object1 == object2 if self.targeted else object1 != object2

        x_orig = x_batch.astype(NUMPY_DTYPE)
        fine_tuning = np.full(x_batch.shape[0], False, dtype=bool)
        prev_loss = 1e6 * np.ones(x_batch.shape[0])
        prev_l2dist = np.zeros(x_batch.shape[0])

        # Resize and initialize Adam
        if self.use_resize:
            x_orig = self._resize_image(x_orig, self._init_size,
                                        self._init_size, True)
            assert (x_orig != 0).any()
            x_adv = x_orig.copy()
        else:
            x_orig = x_batch
            self._reset_adam(np.prod(self.classifier.input_shape))
            self._current_noise.fill(0)

        # Initialize best distortions, best changed labels and best attacks
        best_dist = np.inf * np.ones(x_adv.shape[0])
        best_label = -np.inf * np.ones(x_adv.shape[0])
        best_attack = [x_adv[i] for i in range(x_adv.shape[0])]

        for iter_ in range(self.max_iter):
            logger.debug('Iteration step %i out of %i', iter_, self.max_iter)

            # Upscaling for very large number of iterations
            if self.use_resize:
                if iter_ == 2000:
                    x_adv = self._resize_image(x_adv, 64, 64)
                    x_orig = zoom(x_orig, [
                        1, x_adv.shape[1] / x_orig.shape[1], x_adv.shape[2] /
                        x_orig.shape[2], x_adv.shape[3] / x_orig.shape[3]
                    ])
                elif iter_ == 10000:
                    x_adv = self._resize_image(x_adv, 128, 128)
                    x_orig = zoom(x_orig, [
                        1, x_adv.shape[1] / x_orig.shape[1], x_adv.shape[2] /
                        x_orig.shape[2], x_adv.shape[3] / x_orig.shape[3]
                    ])

            # Compute adversarial examples and loss
            x_adv = self._optimizer(x_adv, y_batch, c)
            preds, l2dist, loss = self._loss(x_orig, x_adv, y_batch, c)

            # Reset Adam if a valid example has been found to avoid overshoot
            mask_fine_tune = (~fine_tuning) & (loss == l2dist) & (prev_loss !=
                                                                  prev_l2dist)
            fine_tuning[mask_fine_tune] = True
            self._reset_adam(self.adam_mean.size,
                             np.repeat(mask_fine_tune, x_adv[0].size))
            prev_l2dist = l2dist

            # Abort early if no improvement is obtained
            if self.abort_early and iter_ % self._early_stop_iters == 0:
                if (loss > .9999 * prev_loss).all():
                    break
                prev_loss = loss

            # Adjust the best result
            labels_batch = np.argmax(y_batch, axis=1)
            for i, (dist,
                    pred) in enumerate(zip(l2dist, np.argmax(preds, axis=1))):
                if dist < best_dist[i] and compare(pred, labels_batch[i]):
                    best_dist[i] = dist
                    best_attack[i] = x_adv[i]
                    best_label[i] = pred

        # Resize images to original size before returning
        best_attack = np.array(best_attack)
        if self.use_resize:
            if self.classifier.channel_index == 3:
                best_attack = zoom(best_attack, [
                    1,
                    int(x_batch.shape[1]) / best_attack.shape[1],
                    int(x_batch.shape[2]) / best_attack.shape[2], 1
                ])
            elif self.classifier.channel_index == 1:
                best_attack = zoom(best_attack, [
                    1, 1,
                    int(x_batch.shape[2]) / best_attack.shape[2],
                    int(x_batch.shape[2]) / best_attack.shape[3]
                ])

        return best_dist, best_label, best_attack
示例#42
0
def globalPredSK(metaFN):
    meta = landsat_metadata(metaFN)
    sceneID = meta.LANDSAT_SCENE_ID
    base = os.getcwd()
    regr_1 = DecisionTreeRegressor(max_depth=15)
    rng = np.random.RandomState(1)
    regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=15),
                               n_estimators=5,
                               random_state=rng)
    fn = os.path.join(base, 'th_samples.data')
    df = pd.read_csv(fn)
    X = np.array(df.iloc[:, 3:-4])
    w = np.array(df.iloc[:, -1])
    w = np.reshape(w, [w.shape[0], 1])
    X = np.concatenate((X, w), axis=1)
    y = np.array(df.iloc[:, -2])
    regr_1.fit(X, y)
    regr_2.fit(X, y)
    blue = os.path.join(landsat_temp, "%s_sr_band2.tif" % sceneID)
    green = os.path.join(landsat_temp, "%s_sr_band3.tif" % sceneID)
    red = os.path.join(landsat_temp, "%s_sr_band4.tif" % sceneID)
    nir = os.path.join(landsat_temp, "%s_sr_band5.tif" % sceneID)
    swir1 = os.path.join(landsat_temp, "%s_sr_band6.tif" % sceneID)
    swir2 = os.path.join(landsat_temp, "%s_sr_band7.tif" % sceneID)
    # open files and assepble them into 2-d numpy array

    Gblue = gdal.Open(blue)
    blueData = Gblue.ReadAsArray()
    blueVec = np.reshape(blueData, [blueData.shape[0] * blueData.shape[1]])
    Ggreen = gdal.Open(green)
    greenData = Ggreen.ReadAsArray()
    greenVec = np.reshape(greenData, [greenData.shape[0] * greenData.shape[1]])
    Gnir = gdal.Open(nir)
    nirData = Gnir.ReadAsArray()
    nirVec = np.reshape(nirData, [nirData.shape[0] * nirData.shape[1]])
    Gred = gdal.Open(red)
    redData = Gred.ReadAsArray()
    redVec = np.reshape(redData, [redData.shape[0] * redData.shape[1]])
    Gswir1 = gdal.Open(swir1)
    swir1Data = Gswir1.ReadAsArray()
    swir1Vec = np.reshape(swir1Data, [swir1Data.shape[0] * swir1Data.shape[1]])
    Gswir2 = gdal.Open(swir2)
    swir2Data = Gswir2.ReadAsArray()
    swir2Vec = np.reshape(swir2Data, [swir2Data.shape[0] * swir2Data.shape[1]])

    ylocs = (np.tile(range(0, blueData.shape[0]),
                     (blueData.shape[1], 1)).T) / 3
    xlocs = (np.tile(range(0, blueData.shape[1]), (blueData.shape[0], 1))) / 3
    pixID = ylocs * 10000 + xlocs
    pixIDvec = np.reshape(pixID, [swir2Data.shape[0] * swir2Data.shape[1]])
    newDF = pd.DataFrame({
        'pixID': pixIDvec,
        'green': greenVec,
        'red': redVec,
        'nir': nirVec,
        'swir1': swir1Vec,
        'swir2': swir2Vec
    })
    #newDF.replace(to_replace=-9999,value=np.)
    dnMean = newDF.groupby('pixID').mean()
    cv = newDF.groupby('pixID').std() / dnMean
    meanCV = np.array(cv.mean(axis=1))
    meanCV[np.isinf(meanCV)] = 10.
    meanCV[np.where(meanCV == 0)] = 10.
    weight = 0.1 / meanCV
    weight[np.isinf(weight)] = 20.
    weight[np.where(meanCV < 0.01)] = 10.
    weight[weight == 20.] = 0.
    weight[np.where(weight < 0.)] = 0.

    rows = np.array(dnMean.index / 10000)
    cols = np.array(dnMean.index - ((dnMean.index / 10000) * 10000))
    w_array = np.nan * np.empty(
        (greenData.shape[0] / 3, greenData.shape[1] / 3))
    w_array[list(rows), list(cols)] = list(weight)
    w_array2 = zoom(w_array, 3.)
    weight = np.reshape(w_array2, [greenData.shape[0] * greenData.shape[1]])
    newDF['weight'] = weight
    xNew = np.stack((greenVec, redVec, nirVec, swir1Vec, swir2Vec, weight),
                    axis=-1)
    outData = regr_1.predict(xNew)
    outData = regr_2.predict(xNew)

    return np.reshape(outData, [blueData.shape[0], blueData.shape[1]])
# Geometrical transformation - zoom
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy import ndimage

# open the input image as numpy array, set datatype
img = np.array(Image.open("car-rgb.png"), dtype=np.uint8)
# apply zoom function by zoom factor=
# (row/y ratio, column/x Ratio, channel ratio)
# order=0: nearest interpolation
img_zm = ndimage.zoom(img, zoom=(2, 2, 1), order=0)

# save image
imgz = Image.fromarray(img_zm)
imgz.save('car-op-rgb-zoom.png')

# set plot size in inch
plt.figure(figsize=(10, 5), dpi=140)

# plot gray image
plt.subplot(121)
plt.axis('on')
plt.title('Original', fontsize=10)
plt.imshow(img)

# plot gray edge image
plt.subplot(122)
plt.axis('on')
plt.title('Zoom', fontsize=10)
plt.imshow(img_zm)
示例#44
0
文件: utils.py 项目: cswin/lungmask
def reshape_mask(mask, tbox, origsize):
    res = np.ones(origsize) * 0
    resize = [tbox[2] - tbox[0], tbox[3] - tbox[1]]
    imgres = ndimage.zoom(mask, resize / np.asarray(mask.shape), order=0)
    res[tbox[0]:tbox[2], tbox[1]:tbox[3]] = imgres
    return res
示例#45
0
def deploy_with_GT(deploy_set,
                   output_dir,
                   model_path,
                   FineNet_path=None,
                   set_name=None):
    if set_name is None:
        set_name = deploy_set.split('/')[-2]

    # Read image and GT
    img_name, folder_name, img_size = get_maximum_img_size_and_names(
        deploy_set)

    mkdir(output_dir + '/' + set_name + '/')
    mkdir(output_dir + '/' + set_name + '/mnt_results/')
    mkdir(output_dir + '/' + set_name + '/seg_results/')
    mkdir(output_dir + '/' + set_name + '/OF_results/')

    logging.info("Predicting %s:" % (set_name))

    isHavingFineNet = False

    main_net_model = CoarseNetmodel((None, None, 1), model_path, mode='deploy')

    if isHavingFineNet == True:
        # ====== Load FineNet to verify
        model_FineNet = FineNetmodel(num_classes=2,
                                     pretrained_path=FineNet_path,
                                     input_shape=(224, 224, 3))

        model_FineNet.compile(loss='categorical_crossentropy',
                              optimizer=Adam(lr=0),
                              metrics=['accuracy'])

    time_c = []
    ave_prf_nms = []
    for i, test in enumerate(
            load_data((img_name, folder_name, img_size),
                      tra_ori_model,
                      rand=False,
                      aug=0.0,
                      batch_size=1)):

        print i, img_name[i]
        logging.info("%s %d / %d: %s" %
                     (set_name, i + 1, len(img_name), img_name[i]))
        time_start = time()

        image = misc.imread(deploy_set + 'img_files/' + img_name[i] + '.bmp',
                            mode='L')  # / 255.0
        mask = misc.imread(deploy_set + 'seg_files/' + img_name[i] + '.bmp',
                           mode='L') / 255.0

        img_size = image.shape
        img_size = np.array(img_size, dtype=np.int32) // 8 * 8
        image = image[:img_size[0], :img_size[1]]
        mask = mask[:img_size[0], :img_size[1]]

        original_image = image.copy()

        # Generate OF
        texture_img = FastEnhanceTexture(image, sigma=2.5, show=False)
        dir_map, fre_map = get_maps_STFT(texture_img,
                                         patch_size=64,
                                         block_size=16,
                                         preprocess=True)

        image = np.reshape(image, [1, image.shape[0], image.shape[1], 1])

        enh_img, enh_img_imag, enhance_img, ori_out_1, ori_out_2, seg_out, mnt_o_out, mnt_w_out, mnt_h_out, mnt_s_out \
            = main_net_model.predict(image)

        time_afterconv = time()

        # Use post processing to smooth image
        round_seg = np.round(np.squeeze(seg_out))
        seg_out = 1 - round_seg
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
        seg_out = cv2.morphologyEx(seg_out, cv2.MORPH_CLOSE, kernel)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
        seg_out = cv2.morphologyEx(seg_out, cv2.MORPH_OPEN, kernel)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        seg_out = cv2.dilate(seg_out, kernel)

        # If use mask from outside
        # seg_out = cv2.resize(mask, dsize=(seg_out.shape[1], seg_out.shape[0]))

        mnt_gt = label2mnt(test[7], test[4], test[5], test[6])

        final_minutiae_score_threashold = 0.45
        early_minutiae_thres = final_minutiae_score_threashold + 0.05

        # In cases of small amount of minutiae given, try adaptive threshold
        while final_minutiae_score_threashold >= 0:
            mnt = label2mnt(np.squeeze(mnt_s_out) *
                            np.round(np.squeeze(seg_out)),
                            mnt_w_out,
                            mnt_h_out,
                            mnt_o_out,
                            thresh=early_minutiae_thres)

            # Previous exp: 0.2
            mnt_nms_1 = py_cpu_nms(mnt, 0.5)
            mnt_nms_2 = nms(mnt)
            # Make sure good result is given
            if mnt_nms_1.shape[0] > 4 and mnt_nms_2.shape[0] > 4:
                break
            else:
                final_minutiae_score_threashold = final_minutiae_score_threashold - 0.05
                early_minutiae_thres = early_minutiae_thres - 0.05

        mnt_nms = fuse_nms(mnt_nms_1, mnt_nms_2)

        mnt_nms = mnt_nms[mnt_nms[:, 3] > early_minutiae_thres, :]
        mnt_refined = []
        if isHavingFineNet == True:
            # ======= Verify using FineNet ============
            patch_minu_radio = 22
            if FineNet_path != None:
                for idx_minu in range(mnt_nms.shape[0]):
                    try:
                        # Extract patch from image
                        x_begin = int(mnt_nms[idx_minu, 1]) - patch_minu_radio
                        y_begin = int(mnt_nms[idx_minu, 0]) - patch_minu_radio
                        patch_minu = original_image[x_begin:x_begin +
                                                    2 * patch_minu_radio,
                                                    y_begin:y_begin +
                                                    2 * patch_minu_radio]

                        patch_minu = cv2.resize(
                            patch_minu,
                            dsize=(224, 224),
                            interpolation=cv2.INTER_NEAREST)

                        ret = np.empty(
                            (patch_minu.shape[0], patch_minu.shape[1], 3),
                            dtype=np.uint8)
                        ret[:, :, 0] = patch_minu
                        ret[:, :, 1] = patch_minu
                        ret[:, :, 2] = patch_minu
                        patch_minu = ret
                        patch_minu = np.expand_dims(patch_minu, axis=0)

                        # # Can use class as hard decision
                        # # 0: minu  1: non-minu
                        # [class_Minutiae] = np.argmax(model_FineNet.predict(patch_minu), axis=1)
                        #
                        # if class_Minutiae == 0:
                        #     mnt_refined.append(mnt_nms[idx_minu,:])

                        # Use soft decision: merge FineNet score with CoarseNet score
                        [isMinutiaeProb] = model_FineNet.predict(patch_minu)
                        isMinutiaeProb = isMinutiaeProb[0]
                        # print isMinutiaeProb
                        tmp_mnt = mnt_nms[idx_minu, :].copy()
                        tmp_mnt[3] = (4 * tmp_mnt[3] + isMinutiaeProb) / 5
                        mnt_refined.append(tmp_mnt)

                    except:
                        mnt_refined.append(mnt_nms[idx_minu, :])
        else:
            mnt_refined = mnt_nms

        mnt_nms = np.array(mnt_refined)

        if mnt_nms.shape[0] > 0:
            mnt_nms = mnt_nms[mnt_nms[:,
                                      3] > final_minutiae_score_threashold, :]

        final_mask = ndimage.zoom(np.round(np.squeeze(seg_out)), [8, 8],
                                  order=0)

        # Show the orientation
        show_orientation_field(original_image,
                               dir_map + np.pi,
                               mask=final_mask,
                               fname="%s/%s/OF_results/%s_OF.jpg" %
                               (output_dir, set_name, img_name[i]))

        fuse_minu_orientation(dir_map, mnt_nms, mode=3)

        time_afterpost = time()
        mnt_writer(
            mnt_nms, img_name[i], img_size,
            "%s/%s/mnt_results/%s.mnt" % (output_dir, set_name, img_name[i]))
        draw_minutiae_overlay_with_score(image,
                                         mnt_nms,
                                         mnt_gt[:, :3],
                                         "%s/%s/%s_minu.jpg" %
                                         (output_dir, set_name, img_name[i]),
                                         saveimage=True)
        # misc.imsave("%s/%s/%s_score.jpg"%(output_dir, set_name, img_name[i]), np.squeeze(mnt_s_out_upscale))

        misc.imsave(
            "%s/%s/seg_results/%s_seg.jpg" %
            (output_dir, set_name, img_name[i]), final_mask)

        time_afterdraw = time()
        time_c.append([
            time_afterconv - time_start, time_afterpost - time_afterconv,
            time_afterdraw - time_afterpost
        ])
        logging.info("load+conv: %.3fs, seg-postpro+nms: %.3f, draw: %.3f" %
                     (time_c[-1][0], time_c[-1][1], time_c[-1][2]))

        # Metrics calculating
        p, r, f, l, o = metric_P_R_F(mnt_gt, mnt_nms)
        ave_prf_nms.append([p, r, f, l, o])
        print p, r, f

    time_c = np.mean(np.array(time_c), axis=0)
    ave_prf_nms = np.mean(np.array(ave_prf_nms), 0)
    print "Precision: %f\tRecall: %f\tF1-measure: %f" % (
        ave_prf_nms[0], ave_prf_nms[1], ave_prf_nms[2])

    logging.info(
        "Average: load+conv: %.3fs, oir-select+seg-post+nms: %.3f, draw: %.3f"
        % (time_c[0], time_c[1], time_c[2]))
    return
示例#46
0
 def resize(self, frames):
     if self.state_type != 'features':
         return [ndimage.zoom(f, self.zoom, order=2) for f in frames]
     else:
         return frames
示例#47
0
# Specify the path to the file
filename = os.path.join('data', 'O2-ANU1024.txt.bz2')

# Name the output files
base_dir, name = os.path.split(filename)
name = name.split('.')[0]
output_image = name + '_inverse_Abel_transform_HansenLaw.png'
output_text = name + '_speeds_HansenLaw.dat'
output_plot = name + '_comparison_HansenLaw.png'

# Load an image file as a numpy array
print('Loading ' + filename)
im = np.loadtxt(filename)
print("scaling image to size 501 reduce the time of the basis set calculation")
im = zoom(im, 0.4892578125)
(rows, cols) = np.shape(im)
if cols % 2 == 0:
    print("Even pixel image cols={:d}, adjusting image centre\n",
          " center_image()".format(cols))
    im = abel.tools.center.center_image(im, center="slice", odd_size=True)
    # alternative
    #im = shift(im,(0.5,0.5))
    #im = im[:-1, 1::]  # drop left col, bottom row
    (rows, cols) = np.shape(im)

c2 = cols // 2  # half-image width
r2 = rows // 2  # half-image height
print('image size {:d}x{:d}'.format(rows, cols))

# Hansen & Law inverse Abel transform
示例#48
0
def load_patients(i, j, base_path="", rescale=None):
    '''
    Function which loads patients from BraTS data
    :param i: First patient to be loaded
    :param j: From patient i to j load all patients
    :param base_path: Specifies where data is
    :return: A tuple with data in the first place and labels in the second place.
    Data has shape (n,240,240,1) where n is the number of slices from patient i to j who contains tumors
    and the labels has has shape (n, 240, 240, 2) which is a pixelwise binary softmax.
    '''
    assert j >= i, 'j>i has to be true, you have given an invalid range of patients.'
    path = base_path + "MICCAI_BraTS_2019_Data_Training/*/*/*"
    wild_t1ce = path + "_t1ce.nii.gz"
    wild_gt = path + "_seg.nii.gz"

    t1ce_paths = glob.glob(wild_t1ce)
    gt_paths = glob.glob(wild_gt)

    num_patients = j - i
    ind = []
    #fixme: the list and the set patients should be made into a dictionary
    patients = set({})
    num_non_empty_slices = 0
    labels_of_interest = set([1, 4])

    for k in range(i, j):
        path_gt = gt_paths[k]
        img_gt = nib.load(path_gt)
        img_gt = img_gt.get_fdata()
        curr_patient = []
        for l in range(img_gt.shape[-1]):
            labels_in_slice = set(np.unique(img_gt[:, :, l]))
            if labels_of_interest.issubset(labels_in_slice):
                curr_patient.append(l)
                num_non_empty_slices += 1
                patients.add(k)
        if len(curr_patient) > 0:
            ind.append(curr_patient)

    image_data = np.zeros((1, 240, 240, num_non_empty_slices))
    labels = np.zeros((num_non_empty_slices, 240, 240))
    OHE_labels = np.zeros((num_non_empty_slices, 240, 240, 2))
    next_ind = 0

    for k, y in enumerate(patients):
        print('Patient: ' + str(y))
        curr_ind = ind[k]

        path_t1ce = t1ce_paths[y]
        path_gt = gt_paths[y]

        img_t1ce = nib.load(path_t1ce)
        img_gt = nib.load(path_gt)

        img_t1ce = img_t1ce.get_fdata()
        img_gt = img_gt.get_fdata()

        # This code is necessary when we will use the data from Asgeir
        if rescale:
            img_gt = zoom(img_gt, rescale, order=0)
            img_t1ce = zoom(img_t1ce, rescale, order=0)

        temp = 0
        for l, x in enumerate(curr_ind):
            image_data[0, :, :, next_ind + l] = img_t1ce[:, :, x]
            labels[next_ind + l, :, :] = img_gt[:, :, x]
            temp += 1
        next_ind += temp

    for l in range(num_non_empty_slices):
        image_data[0, :, :, l] = normalize(image_data[0, :, :, l])
        OHE_labels[l, :, :, :] = OHE(labels[l, :, :])

    # The last axis will become the first axis
    image_data = np.moveaxis(image_data, -1, 0)
    image_data = np.moveaxis(image_data, 1, 3)
    return (image_data, OHE_labels, patients)
示例#49
0
@author: Rifky


"""

# In[]

import scipy.io as io

voxels = io.loadmat(
    "data/3DShapeNets/volumetric_data/chair/30/test/chair_000000000_1.mat"
)['instance']
#%%
import numpy as np

voxels = np.pad(voxels, (1, 1), 'constant', constant_values=(0, 0))
#%%
import scipy.ndimage as nd

voxels = nd.zoom(voxels, (2, 2, 2), mode='constant', order=0)
#%%
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

fig = plt.figure()
ax = Axes3D(fig)
ax.voxels(voxels, edgecolor="red")

plt.show()
plt.savefig('data')
示例#50
0
    def forward(self,
                color_heightmap,
                depth_heightmap,
                is_volatile=False,
                specific_rotation=-1):

        # Apply 2x scale to input heightmaps
        color_heightmap_2x = ndimage.zoom(color_heightmap,
                                          zoom=[2, 2, 1],
                                          order=0)
        depth_heightmap_2x = ndimage.zoom(depth_heightmap,
                                          zoom=[2, 2],
                                          order=0)
        assert (color_heightmap_2x.shape[0:2] == depth_heightmap_2x.shape[0:2])

        # Add extra padding (to handle rotations inside network)
        diag_length = float(color_heightmap_2x.shape[0]) * np.sqrt(2)
        diag_length = np.ceil(diag_length / 32) * 32
        padding_width = int((diag_length - color_heightmap_2x.shape[0]) / 2)
        color_heightmap_2x_r = np.pad(color_heightmap_2x[:, :, 0],
                                      padding_width,
                                      'constant',
                                      constant_values=0)
        color_heightmap_2x_r.shape = (color_heightmap_2x_r.shape[0],
                                      color_heightmap_2x_r.shape[1], 1)
        color_heightmap_2x_g = np.pad(color_heightmap_2x[:, :, 1],
                                      padding_width,
                                      'constant',
                                      constant_values=0)
        color_heightmap_2x_g.shape = (color_heightmap_2x_g.shape[0],
                                      color_heightmap_2x_g.shape[1], 1)
        color_heightmap_2x_b = np.pad(color_heightmap_2x[:, :, 2],
                                      padding_width,
                                      'constant',
                                      constant_values=0)
        color_heightmap_2x_b.shape = (color_heightmap_2x_b.shape[0],
                                      color_heightmap_2x_b.shape[1], 1)
        color_heightmap_2x = np.concatenate(
            (color_heightmap_2x_r, color_heightmap_2x_g, color_heightmap_2x_b),
            axis=2)
        depth_heightmap_2x = np.pad(depth_heightmap_2x,
                                    padding_width,
                                    'constant',
                                    constant_values=0)

        # Pre-process color image (scale and normalize)
        image_mean = [0.485, 0.456, 0.406]
        image_std = [0.229, 0.224, 0.225]
        input_color_image = color_heightmap_2x.astype(float) / 255
        for c in range(3):
            input_color_image[:, :, c] = (input_color_image[:, :, c] -
                                          image_mean[c]) / image_std[c]

        # Pre-process depth image (normalize)
        image_mean = [0.01, 0.01, 0.01]
        image_std = [0.03, 0.03, 0.03]
        depth_heightmap_2x.shape = (depth_heightmap_2x.shape[0],
                                    depth_heightmap_2x.shape[1], 1)
        input_depth_image = np.concatenate(
            (depth_heightmap_2x, depth_heightmap_2x, depth_heightmap_2x),
            axis=2)
        for c in range(3):
            input_depth_image[:, :, c] = (input_depth_image[:, :, c] -
                                          image_mean[c]) / image_std[c]

        # Construct minibatch of size 1 (b,c,h,w)
        input_color_image.shape = (input_color_image.shape[0],
                                   input_color_image.shape[1],
                                   input_color_image.shape[2], 1)
        input_depth_image.shape = (input_depth_image.shape[0],
                                   input_depth_image.shape[1],
                                   input_depth_image.shape[2], 1)
        input_color_data = torch.from_numpy(
            input_color_image.astype(np.float32)).permute(3, 2, 0, 1)
        input_depth_data = torch.from_numpy(
            input_depth_image.astype(np.float32)).permute(3, 2, 0, 1)

        # Pass input data through model
        output_prob, state_feat = self.model.forward(input_color_data,
                                                     input_depth_data,
                                                     is_volatile,
                                                     specific_rotation)

        if self.method == 'reactive':

            # Return affordances (and remove extra padding)
            for rotate_idx in range(len(output_prob)):
                if rotate_idx == 0:
                    push_predictions = F.softmax(
                        output_prob[rotate_idx][0],
                        dim=1).cpu().data.numpy()[:, 0, (padding_width / 2):(
                            color_heightmap_2x.shape[0] / 2 -
                            padding_width / 2), (padding_width / 2):(
                                color_heightmap_2x.shape[0] / 2 -
                                padding_width / 2)]
                    grasp_predictions = F.softmax(
                        output_prob[rotate_idx][1],
                        dim=1).cpu().data.numpy()[:, 0, (padding_width / 2):(
                            color_heightmap_2x.shape[0] / 2 -
                            padding_width / 2), (padding_width / 2):(
                                color_heightmap_2x.shape[0] / 2 -
                                padding_width / 2)]
                else:
                    push_predictions = np.concatenate(
                        (push_predictions,
                         F.softmax(output_prob[rotate_idx][0],
                                   dim=1).cpu().data.numpy()
                         [:, 0, (padding_width /
                                 2):(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2),
                          (padding_width /
                           2):(color_heightmap_2x.shape[0] / 2 -
                               padding_width / 2)]),
                        axis=0)
                    grasp_predictions = np.concatenate(
                        (grasp_predictions,
                         F.softmax(output_prob[rotate_idx][1],
                                   dim=1).cpu().data.numpy()
                         [:, 0, (padding_width /
                                 2):(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2),
                          (padding_width /
                           2):(color_heightmap_2x.shape[0] / 2 -
                               padding_width / 2)]),
                        axis=0)

        elif self.method == 'reinforcement':

            # Return Q values (and remove extra padding)
            for rotate_idx in range(len(output_prob)):
                if rotate_idx == 0:
                    push_predictions = output_prob[rotate_idx][0].cpu(
                    ).data.numpy()[:, 0,
                                   int(padding_width /
                                       2):int(color_heightmap_2x.shape[0] / 2 -
                                              padding_width / 2),
                                   int(padding_width /
                                       2):int(color_heightmap_2x.shape[0] / 2 -
                                              padding_width / 2)]

                    grasp_predictions = output_prob[rotate_idx][1].cpu(
                    ).data.numpy()[:, 0,
                                   int(padding_width /
                                       2):int(color_heightmap_2x.shape[0] / 2 -
                                              padding_width / 2),
                                   int(padding_width /
                                       2):int(color_heightmap_2x.shape[0] / 2 -
                                              padding_width / 2)]
                else:
                    push_predictions = np.concatenate(
                        (push_predictions,
                         output_prob[rotate_idx][0].cpu().data.numpy()
                         [:, 0,
                          int(padding_width /
                              2):int(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2),
                          int(padding_width /
                              2):int(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2)]),
                        axis=0)

                    grasp_predictions = np.concatenate(
                        (grasp_predictions,
                         output_prob[rotate_idx][1].cpu().data.numpy()
                         [:, 0,
                          int(padding_width /
                              2):int(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2),
                          int(padding_width /
                              2):int(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2)]),
                        axis=0)

        return push_predictions, grasp_predictions, state_feat
示例#51
0
def apply(input_sino, model_path, save_path, max_apply, offset, verbose=False):

    if verbose:
        print('   -loading network')

        # This loads the keras network and the first checkpoint file
        model = tf.keras.models.load_model(os.path.join(
            model_path, 'keras_model.h5'),
                                           custom_objects={
                                               'recofunc':
                                               architecture.recofunc,
                                               'shrinkageact':
                                               architecture.shrinkageact,
                                               'shrinkageact_dense':
                                               architecture.shrinkageact_dense,
                                               'shrinkageact64':
                                               architecture.shrinkageact64,
                                               'shrinkageact_slicing':
                                               architecture.slicing,
                                               'shrinkageact_padding':
                                               architecture.padding,
                                               'tf':
                                               tf,
                                               'cfg':
                                               cfg
                                           },
                                           compile=False)

        checkpoint = tf.train.Checkpoint(model=model)
        latest_model = tf.train.latest_checkpoint(model_path)

        restore_status = [checkpoint.restore(latest_model)]

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.7

        config.allow_soft_placement = True  # automatically choose a supported device when the specified one doesn't support an op
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

    with tf.Session(config=config, graph=g) as sess:
        sess.run(init_op)

        for element in restore_status:
            element.run_restore_ops()

        if verbose:
            print('   -succesfully loaded from ' + model_path)
            model.summary()
            print('   -running predictions')

        # Predict the reconstructions and save as .tif
        for i, sino in enumerate(input_sino[offset:]):
            if i == max_apply:
                break
            try:
                prediction = sess.run(
                    model(tf.expand_dims(tf.expand_dims(sino * 1000, 0), -1),
                          training=False))
                prediction = np.array(prediction) * 0.012
                prediction = ndimage.zoom(prediction,
                                          [1, 972 / 486, 972 / 486, 1])
                imageio.imwrite(save_path + '_' + str(i + 1 + offset) + '.tif',
                                np.squeeze(prediction))
            except IndexError:
                pass

        sess.close()

    if verbose:
        print('   -predictions finished')
示例#52
0
 def nearest(self, data_img, dtype=np.float32):
     '''return a 2d np.array polar image'''
     if self.y_centerin_fac:
         data_img = zoom(data_img, 1. / self.y_centerin_fac, order=1)
     data = data_img.ravel()
     return data[self.indices_1d]
示例#53
0
def deepdraw(net,
             base_img,
             octaves,
             random_crop=True,
             visualize=False,
             foci=None,
             clip=True,
             **step_params):

    print "Target imageclasses"
    print foci
    # prepare base image
    image = preprocess(net, base_img)  # (3,224,224)

    # get input dimensions from net
    w = net.blobs['data'].width
    h = net.blobs['data'].height

    print "starting drawing"
    src = net.blobs['data']
    print "Reshaping input image size %d, %d" % (h, w)

    src.reshape(1, 3, h, w)  # resize the network's input image size
    for e, o in enumerate(octaves):
        if 'scale' in o:
            # resize by o['scale'] if it exists
            image = nd.zoom(image, (1, o['scale'], o['scale']))
        _, imw, imh = image.shape
        print "Image shape octave %d, %d" % (imw, imh)
        # select layer
        layer = o['layer']

        for i in xrange(o['iter_n']):
            if imw > w:
                if random_crop:
                    # randomly select a crop
                    #ox = random.randint(0,imw-224)
                    #oy = random.randint(0,imh-224)
                    mid_x = (imw - w) / 2.
                    width_x = imw - w
                    ox = np.random.normal(mid_x, width_x * 0.3, 1)
                    ox = int(np.clip(ox, 0, imw - w))
                    mid_y = (imh - h) / 2.
                    width_y = imh - h
                    oy = np.random.normal(mid_y, width_y * 0.3, 1)
                    oy = int(np.clip(oy, 0, imh - h))
                    # insert the crop into src.data[0]
                    print "Cropping: %d, %d" % (ox, oy)
                    src.data[0] = image[:, ox:ox + w, oy:oy + h]
                else:
                    ox = (imw - w) / 2.
                    oy = (imh - h) / 2.
                    src.data[0] = image[:, ox:ox + w, oy:oy + h]
            else:
                ox = 0
                oy = 0
                src.data[0] = image.copy()

            sigma = o['start_sigma'] + (
                (o['end_sigma'] - o['start_sigma']) * i) / o['iter_n']
            step_size = o['start_step_size'] + (
                (o['end_step_size'] - o['start_step_size']) * i) / o['iter_n']

            make_step(net,
                      end=layer,
                      clip=clip,
                      foci=foci,
                      sigma=sigma,
                      step_size=step_size)

            if visualize:
                vis = deprocess(net, src.data[0])
                if not clip:  # adjust image contrast if clipping is disabled
                    vis = vis * (255.0 / np.percentile(vis, 99.98))
                if i % 1 == 0:
                    writearray(vis, "./octave%d_f%d.jpg" % (e, i))

            if i % 10 == 0:
                print 'finished step %d in octave %d' % (i, e)

            # insert modified image back into original image (if necessary)
            image[:, ox:ox + w, oy:oy + h] = src.data[0]

        print "octave %d image:" % e
        writearray(deprocess(net, image), "./octave_" + str(e) + ".jpg")

    # returning the resulting image
    return deprocess(net, image)
        def get_action(self):

            color_img, depth_img = self.get_camera_data()
            print('color_img.shape: {}'.format(color_img.shape))
            color_img = get_prepared_img(color_img, 'rgb')
            depth_img = get_prepared_img(depth_img, 'depth')
            color_heightmap, depth_heightmap = get_heightmap(
                color_img, depth_img, robot.cam_intrinsics, robot.cam_pose,
                robot.workspace_limits, robot.heightmap_resolution)
            valid_depth_heightmap = depth_heightmap.copy()
            valid_depth_heightmap[np.isnan(valid_depth_heightmap)] = 0
            depth_heightmap = valid_depth_heightmap
            # Apply 2x scale to input heightmaps
            color_heightmap_2x = ndimage.zoom(color_heightmap,
                                              zoom=[2, 2, 1],
                                              order=0)
            depth_heightmap_2x = ndimage.zoom(depth_heightmap,
                                              zoom=[2, 2],
                                              order=0)
            assert (
                color_heightmap_2x.shape[0:2] == depth_heightmap_2x.shape[0:2])

            # Add extra padding (to handle rotations inside network)
            diag_length = float(color_heightmap_2x.shape[0]) * np.sqrt(2)
            diag_length = np.ceil(diag_length / 32) * 32
            padding_width = int(
                (diag_length - color_heightmap_2x.shape[0]) / 2)
            color_heightmap_2x_r = np.pad(color_heightmap_2x[:, :, 0],
                                          padding_width,
                                          'constant',
                                          constant_values=0)
            color_heightmap_2x_r.shape = (color_heightmap_2x_r.shape[0],
                                          color_heightmap_2x_r.shape[1], 1)
            color_heightmap_2x_g = np.pad(color_heightmap_2x[:, :, 1],
                                          padding_width,
                                          'constant',
                                          constant_values=0)
            color_heightmap_2x_g.shape = (color_heightmap_2x_g.shape[0],
                                          color_heightmap_2x_g.shape[1], 1)
            color_heightmap_2x_b = np.pad(color_heightmap_2x[:, :, 2],
                                          padding_width,
                                          'constant',
                                          constant_values=0)
            color_heightmap_2x_b.shape = (color_heightmap_2x_b.shape[0],
                                          color_heightmap_2x_b.shape[1], 1)
            color_heightmap_2x = np.concatenate(
                (color_heightmap_2x_r, color_heightmap_2x_g,
                 color_heightmap_2x_b),
                axis=2)
            depth_heightmap_2x = np.pad(depth_heightmap_2x,
                                        padding_width,
                                        'constant',
                                        constant_values=0)

            # Pre-process color image (scale and normalize)
            image_mean = [0.485, 0.456, 0.406]
            image_std = [0.229, 0.224, 0.225]
            input_color_image = color_heightmap_2x.astype(float) / 255
            for c in range(3):
                input_color_image[:, :, c] = (input_color_image[:, :, c] -
                                              image_mean[c]) / image_std[c]

            # Pre-process depth image (normalize)
            image_mean = [0.01, 0.01, 0.01]
            image_std = [0.03, 0.03, 0.03]
            depth_heightmap_2x.shape = (depth_heightmap_2x.shape[0],
                                        depth_heightmap_2x.shape[1], 1)
            input_depth_image = np.concatenate(
                (depth_heightmap_2x, depth_heightmap_2x, depth_heightmap_2x),
                axis=2)
            for c in range(3):
                input_depth_image[:, :, c] = (input_depth_image[:, :, c] -
                                              image_mean[c]) / image_std[c]

            # Construct minibatch of size 1 (b,c,h,w)
            input_color_image.shape = (input_color_image.shape[0],
                                       input_color_image.shape[1],
                                       input_color_image.shape[2], 1)
            input_depth_image.shape = (input_depth_image.shape[0],
                                       input_depth_image.shape[1],
                                       input_depth_image.shape[2], 1)
            input_color_data = torch.from_numpy(
                input_color_image.astype(np.float32)).permute(3, 2, 0, 1)
            input_depth_data = torch.from_numpy(
                input_depth_image.astype(np.float32)).permute(3, 2, 0, 1)

            # Pass input data through model
            output_prob, state_feat = self.model.forward(
                input_color_data,
                input_depth_data)  # is_volatile, specific_rotation)

            # Return Q values (and remove extra padding)
            for rotate_idx in range(len(output_prob)):
                if rotate_idx == 0:
                    grasp_predictions = output_prob[rotate_idx][0].cpu(
                    ).data.numpy()[:, 0,
                                   int(padding_width /
                                       2):int(color_heightmap_2x.shape[0] / 2 -
                                              padding_width / 2),
                                   int(padding_width /
                                       2):int(color_heightmap_2x.shape[0] / 2 -
                                              padding_width / 2)]
                else:
                    grasp_predictions = np.concatenate(
                        (grasp_predictions,
                         output_prob[rotate_idx][0].cpu().data.numpy()
                         [:, 0,
                          int(padding_width /
                              2):int(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2),
                          int(padding_width /
                              2):int(color_heightmap_2x.shape[0] / 2 -
                                     padding_width / 2)]),
                        axis=0)

            print('grasp_predictions.shape: {}'.format(
                grasp_predictions.shape))
            # Get pixel location and rotation with highest affordance prediction from heuristic algorithms (rotation, y, x)
            best_pix_ind = np.unravel_index(np.argmax(grasp_predictions),
                                            grasp_predictions.shape)
            predicted_value = np.max(grasp_predictions)

            # Compute 3D position of pixel
            print('Action: %s at (%d, %d, %d)' %
                  ('Grasp', best_pix_ind[0], best_pix_ind[1], best_pix_ind[2]))
            best_rotation_angle = np.deg2rad(
                best_pix_ind[0] * (360.0 / robot.model.num_rotations))
            best_pix_x = best_pix_ind[2]
            best_pix_y = best_pix_ind[1]
            primitive_position = [
                best_pix_x * self.heightmap_resolution +
                self.workspace_limits[0][0],
                best_pix_y * self.heightmap_resolution +
                self.workspace_limits[1][0],
                valid_depth_heightmap[best_pix_y][best_pix_x] +
                self.workspace_limits[2][0]
            ]

            return primitive_position  # grasp_predictions, state_feat
def ApplyAugmentation(d3_img,
                      type_of_augmentation=None,
                      dict_parameter=None,
                      seed=1):
    random.seed(seed)
    d3_img = d3_img.reshape((16, 144, 144))
    if dict_parameter is None:
        dict_parameter = {
            'rotation_xy': [-20, 20],
            'rotation_zx': [-20, 20],
            'rotation_zy': [-20, 20],
            'zooming': [1.05, 1.15],
            'down_scale': [0.85, 0.99]
        }
    if type_of_augmentation is None:
        seq = [
            'None',
            'rotation_xy',
            'rotation_zx',
            'rotation_zy',
            'zooming',
            'h_flip',
            #'elastic'
            #'v_flip',
            #'z_flip',
            #'rotate_90_k1',
            #'None'
            #'down_scale',
            #'h_flip',
            #'v_flip',
            #'z_flip',
            #'rotate_90_k1',
            #'rotate_90_k2',
            #'rotate_90_k3'
        ]
        type_of_augmentation = random.choice(seq)

    if type_of_augmentation == 'rotation_xy':
        angle = random.randint(dict_parameter[type_of_augmentation][0],
                               dict_parameter[type_of_augmentation][1])
        new_3d_img = scipy.ndimage.rotate(d3_img,
                                          angle,
                                          axes=(1, 2),
                                          reshape=False)
    elif type_of_augmentation == 'rotation_zx':
        angle = random.randint(dict_parameter[type_of_augmentation][0],
                               dict_parameter[type_of_augmentation][1])
        new_3d_img = scipy.ndimage.rotate(d3_img,
                                          angle,
                                          axes=(0, 2),
                                          reshape=False)
    elif type_of_augmentation == 'rotation_zy':
        angle = random.randint(dict_parameter[type_of_augmentation][0],
                               dict_parameter[type_of_augmentation][1])
        new_3d_img = scipy.ndimage.rotate(d3_img,
                                          angle,
                                          axes=(0, 1),
                                          reshape=False)
    elif type_of_augmentation == 'zooming':
        value_factor = random.uniform(dict_parameter[type_of_augmentation][0],
                                      dict_parameter[type_of_augmentation][1])
        new_img_zoom = ndimage.zoom(d3_img, (1, value_factor, value_factor))
        x_a = new_img_zoom.shape[2] // 2 - 72
        y_a = new_img_zoom.shape[1] // 2 - 72
        new_3d_img = new_img_zoom[:, y_a:y_a + 144, x_a:x_a + 144]
    elif type_of_augmentation == 'down_scale':
        value_factor = random.uniform(dict_parameter[type_of_augmentation][0],
                                      dict_parameter[type_of_augmentation][1])
        new_img_zoom = ndimage.zoom(d3_img, (1, value_factor, value_factor))
        new_img_zoom_tmp = np.zeros_like(d3_img)
        x_a = new_img_zoom.shape[2] // 2
        y_a = new_img_zoom.shape[1] // 2
        x_a_b = new_img_zoom_tmp.shape[2] // 2 - x_a
        y_a_b = new_img_zoom_tmp.shape[1] // 2 - y_a
        new_img_zoom_tmp[:, y_a_b:y_a_b + new_img_zoom.shape[1],
                         x_a_b:x_a_b + new_img_zoom.shape[1]] = new_img_zoom
        new_3d_img = new_img_zoom_tmp.copy()
    elif type_of_augmentation == 'h_flip':
        new_3d_img = np.flip(d3_img, axis=1)
    elif type_of_augmentation == 'v_flip':
        new_3d_img = np.flip(d3_img, axis=2)
    elif type_of_augmentation == 'z_flip':
        new_3d_img = np.flip(d3_img, axis=0)
    elif type_of_augmentation == 'rotate_90_k1':
        new_3d_img = np.rot90(d3_img, axes=(1, 2))
    elif type_of_augmentation == 'rotate_90_k2':
        new_3d_img = np.rot90(d3_img, k=2, axes=(1, 2))
    elif type_of_augmentation == 'rotate_90_k3':
        new_3d_img = np.rot90(d3_img, k=3, axes=(1, 2))
        '''
        elif type_of_augmentation=='elastic':
            transformation = augment.create_identity_transformation(d3_img.shape)
            # jitter in 3D
            transformation += augment.create_elastic_transformation(
                d3_img.shape,
                control_point_spacing=100,
                jitter_sigma=0.2)
            # apply transformation
            new_3d_img = augment.apply_transformation(d3_img, transformation)
        '''
    else:
        new_3d_img = d3_img

    bool_val = random.choice(['T', 'F'])
    if bool_val == 'T':
        new_3d_img = elastic_3d_transform(new_3d_img, seed=seed)

    return new_3d_img.reshape((16, 144, 144, 1))
示例#56
0
def Workflow_npm1(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure NPM1

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [0.5, 15]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    dot_2d_sigma = 2
    dot_2d_sigma_extra = 1
    dot_2d_cutoff = 0.025
    minArea = 5
    low_level_min_size = 700
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio),
                          order=2)

        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)
        gaussian_smoothing_truncate_range = (
            gaussian_smoothing_truncate_range * rescale_ratio)

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range,
    )

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    # step 1: low level thresholding
    # global_otsu = threshold_otsu(structure_img_smooth)
    global_tri = threshold_triangle(structure_img_smooth)
    global_median = np.percentile(structure_img_smooth, 50)

    th_low_level = (global_tri + global_median) / 2
    bw_low_level = structure_img_smooth > th_low_level
    bw_low_level = remove_small_objects(bw_low_level,
                                        min_size=low_level_min_size,
                                        connectivity=1,
                                        in_place=True)
    bw_low_level = dilation(bw_low_level, selem=ball(2))

    # step 2: high level thresholding
    local_cutoff = 0.333 * threshold_otsu(structure_img_smooth)
    bw_high_level = np.zeros_like(bw_low_level)
    lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1)
    for idx in range(num_obj):
        single_obj = lab_low == (idx + 1)
        local_otsu = threshold_otsu(structure_img_smooth[single_obj])
        if local_otsu > local_cutoff:
            bw_high_level[np.logical_and(
                structure_img_smooth > 0.98 * local_otsu, single_obj)] = 1

    out_img_list.append(bw_high_level.copy())
    out_name_list.append("bw_coarse")

    response_bright = dot_slice_by_slice(structure_img_smooth,
                                         log_sigma=dot_2d_sigma)

    response_dark = dot_slice_by_slice(1 - structure_img_smooth,
                                       log_sigma=dot_2d_sigma)
    response_dark_extra = dot_slice_by_slice(1 - structure_img_smooth,
                                             log_sigma=dot_2d_sigma_extra)

    # inner_mask = bw_high_level.copy()
    # for zz in range(inner_mask.shape[0]):
    #    inner_mask[zz,:,:] = binary_fill_holes(inner_mask[zz,:,:])

    holes = np.logical_or(response_dark > dot_2d_cutoff,
                          response_dark_extra > dot_2d_cutoff)
    # holes[~inner_mask] = 0

    bw_extra = response_bright > dot_2d_cutoff
    # bw_extra[~bw_high_level]=0
    bw_extra[~bw_low_level] = 0

    bw_final = np.logical_or(bw_extra, bw_high_level)
    bw_final[holes] = 0

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(bw_final,
                               min_size=minArea,
                               connectivity=1,
                               in_place=True)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_fine")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
示例#57
0
def acolite_map(inputfile=None, output=None, parameters=None, 
                dpi=300, ext='png', mapped=True, max_dim = 1000, limit=None,
                auto_range=False, range_percentiles=(5,95), dataset_rescale=False,
                map_title=True, 
                map_colorbar=False, map_colorbar_orientation='vertical',#'horizontal', 
                rgb_rhot = False, rgb_rhos = False, 
                red_wl = 660, green_wl = 560, blue_wl = 480, rgb_min = [0.0]*3, rgb_max = [0.15]*3, rgb_pan_sharpen = False, map_parameters_pan=True,
                map_fillcolor='White',
                map_scalepos = 'LR', map_scalebar = True, map_scalecolor='Black', map_scalecolor_rgb='White', map_scalelen=None, map_projection='tmerc',
                map_colorbar_edge=True, map_points=None, return_image=False, map_raster=False):

    import os, copy
    import datetime, time, dateutil.parser

    from acolite.shared import datascl,nc_data,nc_datasets,nc_gatts,qmap,closest_idx
    from acolite.acolite import pscale
    import acolite as ac

    from numpy import nanpercentile, log10, isnan, dstack
    from scipy.ndimage import zoom

    import matplotlib

    if not os.path.exists(inputfile):
        print('File {} not found.'.format(inputfile))
        return(False)

    ## run through maps
    maps = {'rhot':rgb_rhot,'rhos':rgb_rhos, 'parameters':parameters != None}
    if all([maps[m] == False for m in maps]): return()
        
    ## get parameter scaling
    psc = pscale()

    ## read netcdf info    
    l2w_datasets = nc_datasets(inputfile)
    print(l2w_datasets)

    gatts = nc_gatts(inputfile)
    if 'MISSION_INDEX' in gatts:
        sat, sen = gatts['MISSION'], gatts['MISSION_INDEX']
        stime = dateutil.parser.parse(gatts['IMAGING_DATE']+' '+gatts['IMAGING_TIME']) 
        obase = '{}_{}_{}'.format(sat, sen, stime.strftime('%Y_%m_%d_%H_%M_%S'))
    else:
        sp = gatts['sensor'].split('_') if 'sensor' in gatts else gatts['SATELLITE_SENSOR'].split('_')  
        sat, sen = sp[0], sp[1]
        stime = dateutil.parser.parse(gatts['isodate'] if 'isodate' in gatts else gatts['ISODATE']) 
        obase = gatts['output_name'] if 'output_name' in gatts else gatts['obase']

    ## find pan sharpening dataset
    if rgb_pan_sharpen:
        if sat not in ['L7','L8']: rgb_pan_sharpen = False
        tmp = os.path.splitext(inputfile)
        l1_pan_ncdf = '{}L1R_pan{}'.format(tmp[0][0:-3],tmp[1])
        if os.path.exists(l1_pan_ncdf):
            pan_data = nc_data(l1_pan_ncdf, 'rhot_pan')
        else:
            print('L1 pan NetCDF file not found')
            rgb_pan_sharpen=False

    if output is not None:
        odir = output
    else:
        odir = gatts['output_dir']
        
    if not os.path.exists(odir): os.makedirs(odir)

    scf= 1.
    rescale = 1.0

    #if dataset_rescale or mapped:
    lon = nc_data(inputfile, 'lon')
    if mapped: 
        lat = nc_data(inputfile, 'lat')
        if rgb_pan_sharpen:
            lon_pan = zoom(lon, zoom=2, order=1)
            lat_pan = zoom(lat, zoom=2, order=1)

    ## set up mapping info
    if True:
        from numpy import linspace, tile, ceil, isnan, nan
        mask_val = -9999.9999
        from scipy.ndimage.interpolation import map_coordinates

        ## rescale to save memory
        dims = lon.shape
        dsc = (dims[0]/max_dim, dims[1]/max_dim)
        scf/=max(dsc)

        if rgb_pan_sharpen: scf = 1.0

        if (scf < 1.) and dataset_rescale:
            sc_dims = (int(ceil(dims[0] * scf)), int(ceil(dims[1] * scf)))
            xdim =  linspace(0,dims[1],sc_dims[1]).reshape(1,sc_dims[1])
            ydim =  linspace(0,dims[0],sc_dims[0]).reshape(sc_dims[0],1)
            xdim = tile(xdim, (sc_dims[0],1))
            ydim = tile(ydim, (1,sc_dims[1]))

            resc = [ydim,xdim]
            xdim, ydim = None, None
            lon = map_coordinates(lon, resc, mode='nearest')
            lat = map_coordinates(lat, resc, mode='nearest')
        else:
            rescale = scf

    ## run through parameters
    for mi in maps:
        if not maps[mi]: continue

        if mi == 'parameters':
            if rgb_pan_sharpen:
                if map_parameters_pan & mapped:
                    lon = lon_pan * 1.0
                    lon_pan = None
                    lat = lat_pan * 1.0
                    lat_pan = None
                pan_data, lon_pan, lat_pan = None, None, None

            print('Mapping {}'.format(mi))
            if type(parameters) is not list: parameters=[parameters]
            for pid, par in enumerate(parameters):
                pard = None

                ## check if this parameter exists
                if par not in l2w_datasets:
                    print('Parameter {} not in file {}.'.format(par, inputfile))
                    continue
                    
                print('Mapping {}'.format(par))
                ## read data
                data = nc_data(inputfile, par)
                if (rgb_pan_sharpen) & (map_parameters_pan):
                    data = zoom(data, zoom=2, order=1)

                ## rescale data
                if (scf != 1.0) and dataset_rescale:
                    data[isnan(data)] = mask_val
                    data = map_coordinates(data, resc, cval=mask_val)
                    data[data <= int(mask_val)] = nan
                    data[data <= 0] = nan

                data_range = nanpercentile(data, range_percentiles)

                ## get parameter mapping configuration
                if par in psc:
                    pard = copy.deepcopy(psc[par])
                else:
                     tmp = par.split('_')
                     par_generic = '_'.join((tmp[0:-1]+['*']))
                     if par_generic in psc: 
                         pard = copy.deepcopy(psc[par_generic])
                         try: ## add wavelength to generic name
                             wave = int(tmp[len(tmp)-1])
                             pard['name'] = '{} ({} nm)'.format(pard['name'], wave)
                         except:
                             pass
                     else: pard= {'color table':'default', 'min':data_range[0], 'max':data_range[1],
                                  'log': False, 'name':par, 'unit':'', 'parameter':par}

                if pard['color table'] == 'default': pard['color table']='viridis'
                ctfile = "{}/{}/{}.txt".format(ac.config['pp_data_dir'], 'Shared/ColourTables', pard['color table'])

                if os.path.exists(ctfile):
                    from matplotlib.colors import ListedColormap
                    from numpy import loadtxt
                    pard['color table'] = ListedColormap(loadtxt(ctfile)/255.)

                if 'title' not in pard: pard['title']='{} [{}]'.format(pard['name'],pard['unit'])
                if auto_range:
                    pard['min']=data_range[0]
                    pard['max']=data_range[1]

                if isnan(pard['min']): pard['min']=data_range[0]
                if isnan(pard['max']): pard['max']=data_range[1]

                ## outputfile
                outputfile = '{}/{}_{}.png'.format(odir,obase,par)

                if map_title:
                    title = '{} {}/{} {}'.format(pard['name'], sat, sen, stime.strftime('%Y-%m-%d (%H:%M UTC)'))
                else:
                    title = None

                ## use qmap option
                if mapped:
                    range = (pard['min'], pard['max'])
                    if 'limit' in gatts:
                        limit = gatts['limit']

                    if ('xx' not in locals()):
                        xx, yy, m = qmap(data, lon, lat, outputfile=outputfile, title=title, rescale=rescale,
                                           colorbar=map_colorbar_orientation, colorbar_edge=map_colorbar_edge, cmap=pard['color table'],
                                           label=pard['title'], range=range, log = pard['log'], map_fillcolor=map_fillcolor,
                                           limit=limit, dpi=dpi, points=map_points, projection=map_projection,
                                           scalebar=map_scalebar, scalepos=map_scalepos, 
                                           scalecolor=map_scalecolor, scalelen=map_scalelen)                
                    else:
                        xx, yy, m = qmap(data, lon, lat, outputfile=outputfile, title=title, rescale=rescale,
                                           colorbar=map_colorbar_orientation, colorbar_edge=map_colorbar_edge, cmap=pard['color table'],
                                           label=pard['title'], range=range, log = pard['log'], map_fillcolor=map_fillcolor, 
                                           limit=limit, dpi=dpi, points=map_points, projection=map_projection,
                                           scalebar=map_scalebar, scalepos=map_scalepos, 
                                           scalecolor=map_scalecolor, scalelen=map_scalelen, xx=xx, yy=yy, m=m)

                else:
                    import matplotlib.cm as cm
                    from matplotlib.colors import ListedColormap
                    cmap = cm.get_cmap(pard['color table'])
                    cmap.set_bad(map_fillcolor)
                    cmap.set_under(map_fillcolor)

                    if not map_raster:
                        ## set up plot
                        fig = matplotlib.figure.Figure()
                        canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
                        ax = fig.add_subplot(111)

                        print(pard['min'], pard['max'])

                        if pard['log']:
                            from matplotlib.colors import LogNorm
                            cax = ax.imshow(data, vmin=pard['min'], vmax=pard['max'], cmap=cmap,
                                               norm=LogNorm(vmin=pard['min'], vmax=pard['max']))
                        else:
                            cax = ax.imshow(data, vmin=pard['min'], vmax=pard['max'], cmap=cmap)

                        if map_colorbar:
                            if map_colorbar_orientation == 'vertical':
                                cbar = fig.colorbar(cax, orientation='vertical')
                                cbar.ax.set_ylabel(pard['title'])
                            else:
                                cbar = fig.colorbar(cax, orientation='horizontal')
                                cbar.ax.set_xlabel(pard['title'])

                            if map_title: ax.set_title(title)
                            ax.axis('off')
                            canvas.print_figure(outputfile, dpi=dpi, bbox_inches='tight')
                    else:
                        from PIL import Image
                        ## rescale for mapping
                        if pard['log']:
                            from numpy import log10
                            datasc = datascl(log10(data), dmin=log10(pard['min']), dmax=log10(pard['max']))
                        else:
                            datasc = datascl(data, dmin=pard['min'], dmax=pard['max'])

                        d = cmap(datasc)
                        for wi in (0,1,2):
                            ## convert back to 8 bit channels (not ideal)
                            d_ = datascl(d[:,:,wi], dmin=0, dmax=1)
                            if wi == 0: im = d_
                            else: im = dstack((im,d_))

                        img = Image.fromarray(im)

                        ## output image    
                        img.save(outputfile)

                print('Wrote {}'.format(outputfile))
        else:
            print('Mapping RGB {}'.format(mi))
            ## RGBs
            waves = [float(ds.split('_')[1]) for ds in l2w_datasets if ds[0:4] == mi]
            if len(waves) == 0:
                print('No appropriate datasets found for RGB {} in {}'.format(mi, inputfile))
                continue

            ## read datasets
            for wi, wl in enumerate([red_wl, green_wl, blue_wl]):
                idx, wave = closest_idx(waves, wl)
                cpar = '{}_{}'.format(mi, int(wave))
                ## read data
                data = nc_data(inputfile, cpar)

                if rgb_pan_sharpen:
                    data = zoom(data, zoom=2, order=1)
                    if wi == 0: vis_i = data * 1.0
                    else: vis_i += data
                    if wi == 2:
                        vis_i /= 3
                        pan_i = vis_i/pan_data
                        vis_i = None

                ## rescale data
                if (scf != 1.0) and dataset_rescale:
                    data[isnan(data)] = mask_val
                    data = map_coordinates(data, resc, cval=mask_val)
                    data[data <= int(mask_val)] = nan
                    data[data <= 0] = nan

                ## stack image
                if wi == 0:
                    image = data
                else:
                    image = dstack((image,data))
                
            ## rescale data between 0 and 1
            for wi in (2,1,0):
                if rgb_pan_sharpen: image[:,:,wi] /= pan_i
                image[:,:,wi] = datascl(image[:,:,wi], dmin=rgb_min[wi], dmax=rgb_max[wi])/255.

            par = r'$\rho_{}$'.format(mi[3]) + ' RGB'
            if map_title:
                title = '{} {}/{} {}'.format(par, sat, sen, stime.strftime('%Y-%m-%d (%H:%M UTC)'))
            else:
                title = None

            ## outputfile
            if rgb_pan_sharpen: 
                outputfile = '{}/{}_rgb_{}_pan.png'.format(odir,obase,mi)
            else:
                outputfile = '{}/{}_rgb_{}.png'.format(odir,obase,mi)

            # use qmap option
            if mapped:
                if 'limit' in gatts:
                    limit = gatts['limit']

                if rgb_pan_sharpen:
                    ret = qmap(image, lon_pan, lat_pan, outputfile=outputfile, title=title, rescale=rescale,
                                               colorbar=map_colorbar_orientation, colorbar_edge=map_colorbar_edge,
                                               limit=limit, dpi=dpi, points=map_points, projection=map_projection,
                                               scalebar=map_scalebar, scalepos=map_scalepos, 
                                               scalecolor=map_scalecolor_rgb, scalelen=map_scalelen)      
                    ret = None     
                else:
                    if ('xx' not in locals()):
                        xx, yy, m = qmap(image, lon, lat, outputfile=outputfile, title=title, rescale=rescale,
                                               colorbar=map_colorbar_orientation, colorbar_edge=map_colorbar_edge,
                                               limit=limit, dpi=dpi, points=map_points, projection=map_projection,
                                               scalebar=map_scalebar, scalepos=map_scalepos, 
                                               scalecolor=map_scalecolor_rgb, scalelen=map_scalelen)                
                    else:
                        xx, yy, m = qmap(image, lon, lat, outputfile=outputfile, title=title, rescale=rescale,
                                               colorbar=map_colorbar_orientation, colorbar_edge=map_colorbar_edge,
                                               limit=limit, dpi=dpi, points=map_points, projection=map_projection,
                                               scalebar=map_scalebar, scalepos=map_scalepos, 
                                               scalecolor=map_scalecolor_rgb, scalelen=map_scalelen, xx=xx, yy=yy, m=m)


            else:
                if not map_raster:
                    ## set up plot
                    fig = matplotlib.figure.Figure()
                    canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
                    ax = fig.add_subplot(111)
                    ax.imshow(image)
                    image = None
                    
                    if map_title: ax.set_title(title)
                    ax.axis('off')
                    canvas.print_figure(outputfile, dpi=dpi, bbox_inches='tight')
                else:
                    from PIL import Image
                    for wi in (0,1,2):
                        # convert again to 8 bit channels (not ideal)
                        data = datascl(image[:,:,wi], dmin=0, dmax=1)
                        if wi == 0:
                            im = data
                        else:
                            im = dstack((im,data))

                    img = Image.fromarray(im)
                    img.save(outputfile)

            print('Wrote {}'.format(outputfile))
示例#58
0
def Workflow_st6gal1(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure ST6GAL1

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [9, 19]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    cell_wise_min_area = 1200
    dot_3d_sigma = 1.6
    dot_3d_cutoff = 0.02
    minArea = 10
    thin_dist = 1
    thin_dist_preserve = 1.6
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio), order=2)

        struct_img = (struct_img - struct_img.min() + 1e-8) / (
            struct_img.max() - struct_img.min() + 1e-8
        )
        gaussian_smoothing_truncate_range = (
            gaussian_smoothing_truncate_range * rescale_ratio
        )

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range,
    )

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    # cell-wise local adaptive thresholding
    th_low_level = threshold_triangle(structure_img_smooth)

    bw_low_level = structure_img_smooth > th_low_level
    bw_low_level = remove_small_objects(
        bw_low_level, min_size=cell_wise_min_area, connectivity=1, in_place=True
    )
    bw_low_level = dilation(bw_low_level, selem=ball(2))

    bw_high_level = np.zeros_like(bw_low_level)
    lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1)

    for idx in range(num_obj):
        single_obj = lab_low == (idx + 1)
        local_otsu = threshold_otsu(structure_img_smooth[single_obj > 0])
        bw_high_level[
            np.logical_and(structure_img_smooth > local_otsu * 0.98, single_obj)
        ] = 1

    # LOG 3d to capture spots
    response = dot_3d(structure_img_smooth, log_sigma=dot_3d_sigma)
    bw_extra = response > dot_3d_cutoff

    # thinning
    bw_high_level = topology_preserving_thinning(
        bw_high_level, thin_dist_preserve, thin_dist
    )

    # combine the two parts
    bw = np.logical_or(bw_high_level, bw_extra)

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
示例#59
0
    def _update_thumbnail(self):
        """Update thumbnail with current image data and colormap."""
        if self.dims.ndisplay == 3 and self.dims.ndim > 2:
            image = np.max(self._data_thumbnail, axis=0)
        else:
            image = self._data_thumbnail

        # float16 not supported by ndi.zoom
        dtype = np.dtype(image.dtype)
        if dtype in [np.dtype(np.float16)]:
            image = image.astype(np.float32)

        raw_zoom_factor = np.divide(self._thumbnail_shape[:2],
                                    image.shape[:2]).min()
        new_shape = np.clip(
            raw_zoom_factor * np.array(image.shape[:2]),
            1,  # smallest side should be 1 pixel wide
            self._thumbnail_shape[:2],
        )
        zoom_factor = tuple(new_shape / image.shape[:2])
        if self.rgb:
            # warning filter can be removed with scipy 1.4
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                downsampled = ndi.zoom(image,
                                       zoom_factor + (1, ),
                                       prefilter=False,
                                       order=0)
            if image.shape[2] == 4:  # image is RGBA
                colormapped = np.copy(downsampled)
                colormapped[..., 3] = downsampled[..., 3] * self.opacity
                if downsampled.dtype == np.uint8:
                    colormapped = colormapped.astype(np.uint8)
            else:  # image is RGB
                if downsampled.dtype == np.uint8:
                    alpha = np.full(
                        downsampled.shape[:2] + (1, ),
                        int(255 * self.opacity),
                        dtype=np.uint8,
                    )
                else:
                    alpha = np.full(downsampled.shape[:2] + (1, ),
                                    self.opacity)
                colormapped = np.concatenate([downsampled, alpha], axis=2)
        else:
            # warning filter can be removed with scipy 1.4
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                downsampled = ndi.zoom(image,
                                       zoom_factor,
                                       prefilter=False,
                                       order=0)
            low, high = self.contrast_limits
            downsampled = np.clip(downsampled, low, high)
            color_range = high - low
            if color_range != 0:
                downsampled = (downsampled - low) / color_range
            downsampled = downsampled**self.gamma
            color_array = self.colormap[1][downsampled.ravel()]
            colormapped = color_array.rgba.reshape(downsampled.shape + (4, ))
            colormapped[..., 3] *= self.opacity
        self.thumbnail = colormapped
示例#60
0
            rsphere=(6378273., 6356889.))

#Change the projection
lon = (x[0], x[1], x[3], x[2], x[0])
lat = (y[0], y[1], y[3], y[2], y[0])
x, y = m(lon, lat)

#New "stere" coorner coordinates
X = array([[x[0], x[1]], [x[3], x[2]]], dtype=float)
Y = array([[y[0], y[1]], [y[3], y[2]]], dtype=float)

rows = 15996
cols = 2528

#coordinates interpolation
X1 = ndi.zoom(X, (rows / 2, cols / 2), order=1)
Y1 = ndi.zoom(Y, (rows / 2, cols / 2), order=1)

#Draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.drawmapboundary(fill_color='dodgerblue')
m.drawrivers()
m.fillcontinents(color='snow', lake_color='aqua', zorder=0)
m.drawparallels(np.arange(70.9, 71.6, 0.1), labels=[1, 0, 0, 1], fontsize=10)
m.drawmeridians(np.arange(-157.4, -156.0, 0.3),
                labels=[1, 0, 0, 1],
                labelstyle='+/-',
                fontsize=10)
m.plot(x, y, linewidth=3, color='r')