예제 #1
0
def descriptorSS(vertices, facet, coef_num_sqrt=13):
    """
	apply spherical harmonics transform on the imaginary part of complex function on the sphere S^2
	and use the truncated coefficients as the descriptor of the given mesh
	refer to:
		Description of 3D-shape using a complex function on the sphere
		D.V. Vranic, D. Saupe, 2002
		(this is the shading-based method mentioned in the paper)	
	basic idea:
		y: S^2 --> [0,inf) in R, u |-->  0  if x(u) = 0 ; dot(u,n(u)), otherwise
		where n(u) is the normal vector of the mesh at the point x(u)*u 
		(the fast intersection point on the surface with ray in direction u)
		get enough sample of y(u) and apply spherical harmonics transform on it
	---------------------------------------------------------------------------------
	para::coef_num_sqrt: the square root of desired number of the dimensions of the 
	    shape of the mesh which is also the number of the truncated coefficients
	output:: coeffs_trunc: list with size coef_num_sqrt^2
	..the desired shape descriptor
	"""
    # get the sample value of y(u)
    zi = gridSampleYU(vertices, facet)
    # generate the sherical harmonics coefficients
    coeffs = SHExpandDH(zi, sampling=2)
    coeffs_trunc = [[
        coeffs[0, k, :(k + 1)].tolist(), coeffs[1, k, 1:(k + 1)].tolist()
    ] for k in range(coef_num_sqrt)]
    coeffs_trunc = [
        var for sublist in coeffs_trunc for subsublist in sublist
        for var in subsublist
    ]
    coeffs_trunc = np.array(coeffs_trunc)
    return coeffs_trunc
예제 #2
0
def showScatterReXU3d(vertices, facet, coef_num_sqrt=13):
    """
	visualize the reconstruction mesh use the truncated spherical harmonics coefficients
	generated by real function x(u) on the sphere
	"""
    # pre: generate the truncated coefficients and reconstruct x(u)
    zi = gridSampleXU(vertices, facet)
    coeffs = SHExpandDH(zi, sampling=2)
    for k in range(coef_num_sqrt):
        coeffs[:, k, (k + 1):] = 0
    zi_filtered = MakeGridDH(coeffs, sampling=2)
    # reconstruct the scatter point of the mesh
    sp = np.array([[
        zi_filtered[v][u], (u - 180.0) / 180.0 * np.pi,
        (v - 90.0) / 180.0 * np.pi
    ] for u in range(360) for v in range(180)])
    xyz = geometry.sp2xyz(sp)
    # plot the scatter point
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2])
    ax.set_xlabel('X label')
    ax.set_ylabel('Y label')
    ax.set_zlabel('Z label')
    plt.show()
예제 #3
0
def descriptorRS(vertices, facet, coef_num_sqrt=13):
    """ 
	apply spherical harmonics transform on the real function on the sphere S^2
	and use the truncated coefficients as the descriptor of the given mesh
	refer to:
		Description of 3D-shape using a complex function on the sphere
		D.V. Vranic, D. Saupe, 2002
		(this is the ray-based method mentioned in the paper)
	basic idea:
		x: S^2 --> [0,inf) in R, u |--> max{x>=0| xu in mesh-surface or {0}}
		get enough sample of x(u) and apply spherical harmonics transform on it
	---------------------------------------------------------------------------------
	para::coef_num_sqrt: the square root of desired number of the dimensions of the 
	    shape of the mesh which is also the number of the truncated coefficients
	output:: coeffs_trunc: list with size coef_num_sqrt^2
	..the desired shape descriptor
	"""
    # get the sample value of x(u)
    zi = gridSampleXU(vertices, facet)
    # generate the sherical harmonics coefficients
    coeffs = SHExpandDH(zi, sampling=2)
    coeffs_trunc = [[
        coeffs[0, k, :(k + 1)].tolist(), coeffs[1, k, 1:(k + 1)].tolist()
    ] for k in range(coef_num_sqrt)]
    coeffs_trunc = [
        var for sublist in coeffs_trunc for subsublist in sublist
        for var in subsublist
    ]
    coeffs_trunc = np.array(coeffs_trunc)
    return coeffs_trunc
예제 #4
0
    def study_area(self,points,north_pole=False,central_meridian=False):
        
        a = float(self.mean_equator_radius.partition('m')[0])/1e3 # km
        
        if self.equi_material is 'Water':
            rho = 1000
        elif self.equi_material is 'Ice':
            rho = 917
        elif self.equi_material is 'Sand':
            rho = 1442
       
        qs,qs_std = [],[]

        mask_grid = Curve2Mask(2*(self.degree_order+1),points,north_pole,sampling=2,centralmeridian=central_meridian)
        mask_shc = SHExpandDH(mask_grid,sampling=2)
        
        area = mask_shc[0,0,0]*4*np.pi*a**2 # km^2
        
        for shc in self.shc:
            q = np.sum(shc*mask_shc)*4*np.pi*a**2*rho/1e9 # Gt
            qs.append(q)
        for shc_std in self.shc_std:    
            q_std = np.sqrt(np.sum((shc_std*mask_shc)**2))*4*np.pi*a**2*rho/1e9 # Gt
            qs_std.append(q_std)
        qs,qs_std = np.array(qs),np.array(qs_std) 
        
        info = self.info.copy()
        info['title'] = 'Integral(over the study area) of '+info['title']
        
        return Series(info,area,qs,qs_std)
예제 #5
0
def space_domain(grids, nodes, research_boundary, r):
    '''
    Space domain method used to perform signal leakage correction in GRACE data processing.
    
    Usage:
    Ms,Ms_std = space_domain(grids,nodes,research_boundary,r)

    Inputs:
    grids -> [object] instance of class Grid
    nodes -> [object] grid nodes that represent mascons in the study area. The nodes are described as a set of discerte grid points [[lat0,lon0],..,[latn,lonn]].
    research_boundary -> [float 2d array] Study area surrounded by a polygon, which is defined as a series of points [[lat0,lon0],..,[latn,lonn]].
    r -> [float] Gaussian filter radius

    Outputs:
    Ms -> [float 2d array] series of mascons or rate of mascons. The first dimension is time and the second dimension is the value of mascon. 
    If the size of the first dimension is 1, it means rate of mascon.
    Ms_std -> [float 2d array] standard deviation for series of mascons or rate of mascons.
    '''

    psf_grids_gau, mas = [], []
    lats_flag, lons_flag = grids.lats_flag, grids.lons_flag
    nlat = len(lats_flag)
    lmax = int(nlat / 2 - 1)

    if grids.region != nodes.region:
        raise Exception(
            'The range of the nodes should be consistent with the range of the grid.'
        )

    for node in nodes.nodes:

        psf_grid_class = SHGrid.from_cap(0.1, node[0], node[1], lmax)
        psf_grid = psf_grid_class.data

        psf_shc = SHExpandDH(psf_grid, sampling=2)
        psf_shc_gau = filter_gaussian(r, psf_shc)
        psf_grid_gau = MakeGridDH(psf_shc_gau, sampling=2)

        psf_grids_gau.append(psf_grid_gau)
    psf_grids_gau = np.array(psf_grids_gau)
    north_pole = create_polygon(research_boundary).contains_points(
        [90, 0])  # Determine if the North Pole is inside the study area
    mask_boundary = Curve2Mask(nlat, research_boundary, north_pole, sampling=2)
    psf_grids_simply = psf_grids_gau[:, mask_boundary.astype(bool)]

    if grids.region != 'globe':
        mask_boundary = mask_boundary[lats_flag][:, lons_flag]

    grids_simply = grids.grids[:, mask_boundary.astype(bool)]

    A = psf_grids_simply.T
    for grid_simply in grids_simply:
        lamb, ma = L_curve(A, grid_simply)
        mas.append(ma)
    mas = np.array(mas)
    return mas, np.zeros_like(mas)
예제 #6
0
def compute_grand_pot(rho,pot_,BETA,MUEX,TRIAL_RHO):
	"computes the grand potential value from the density profile."
	
	rho_ = SHExpandDH(rho, sampling=2, norm=4)
	
	Fid = integrate(rho*( np.log(rho/TRIAL_RHO) - BETA*MUEX ))

	Fex = convolve_product(pot_, rho_, rho_, BETA)
	
	return Fid + Fex
예제 #7
0
def rastio2shDH(rio, nmax=100):
    """Converts an equidistant (sub) grid to spherical harmonic 4-pi normalized coefficients, using the Driscoll Healy approach using shtools"""

    if rio.crs.to_epsg() != 4326:
        raise NotImplementedError("Cannot yet handle non crs-4326 projections")

    if rio.transform[1] != 0 or rio.transform[3] != 0:
        raise NotImplementedError("Cannot yet handle skewed transforms")

    ddist = rio.transform[0]

    if abs(rio.transform[4]) != ddist:
        raise NotImplementedError("Cannot yet handle non equidistant rasters")

    # we need to check whether the grid is correctly aligned
    x0, y0 = rio.xy(0, 0)

    if x0 < 0:
        shft = -180.0
    else:
        shft = 0

    if x0 % ddist != 0 or y0 % ddist != 0:
        raise NotImplementedError(
            "rasterio is not correctly aligned for global gridline registered grid"
        )

    ithPixFromLeft = int((x0 - shft) / ddist)
    ithPixFrom90N = int((90 - y0) / ddist)

    gheight = int(180 / ddist)
    gwidth = 2 * gheight

    #construct a global numpy grid (extend the grid to 0,360 and -90,90)
    globrast = rio.read(1,
                        window=((-ithPixFrom90N, gheight - ithPixFrom90N),
                                (-ithPixFromLeft, gwidth - ithPixFromLeft)),
                        boundless=True)
    if shft == -180.0:
        #shift the array to 0 360 (which is what SHExpandDH expects)
        globrast = np.roll(globrast, int(gwidth / 2), axis=1)

    clm = SHExpandDH(globrast, sampling=2, lmax_calc=nmax)

    #convert to Guided Array
    shg = makeGArray(SHGuide(nmax))

    for i, nmt in enumerate(shg.gp[0]):
        n, m, t = nmt
        shg.mat[i] = clm[int(t), n, m]

    return shg
예제 #8
0
def compute_grad(pot_, rho, TRIAL_RHO, BETA, MUEX):
	"computes the gradient of the grand canonical potential."
	
	rho_ = SHExpandDH(rho, sampling=2, norm=4)			# compute distro expansion in spherical harmonics
									#		     v
	grad_id = np.zeros(rho.shape, order='F')			#       define grad_id and grad_ex_
	grad_ex_= np.zeros(rho_.shape, order='F')			#		     v
	
	dfts.compute_grad_id(rho, grad_id, TRIAL_RHO, BETA, MUEX)	# compute grad_id = ln(rho) - ( ln(rho_trial) + beta*muex )
	dfts.convolve(pot_, rho_, grad_ex_, BETA)			# compute grad_ex_ by convolution theorem
	grad_ex = MakeGridDH(grad_ex_, sampling=2, norm=4)		# compute grad_ex from its spherical harmonics coefficients
									#		     v
	return grad_id + grad_ex					# return the total gradient as the sum of the two terms
예제 #9
0
def compute_step(pot_, rho, psi, grad, BETA):
	"computes the best step to use."
	
	fieldA = psi*grad						# compute vector of the products of psi and grad
	numerator = integrate(fieldA)					# compute the first element of the step
									#		v
	fieldB = psi*psi/rho						# compute the vector of the products psi^2/rho
	denominator = integrate(fieldB)					# compute the first term of the denominator
									#		v
									#		v	
	psi_ = SHExpandDH(psi, sampling=2, norm=4)			# coefficients of the SH expansion of psi
	denominator += convolve_product(pot_,psi_,psi_,BETA)		# compute the last term of the denominator
									#		v
	return numerator/denominator					# return the best step a num/den
예제 #10
0
def showReYU2d(vertices, facet, coef_num_sqrt=13):
    """
	visualize the reconstruction of y(u) with truncated spherical system 
	para::coef_num_sqrt: the square root of desired number of the dimensions of the 
	    shape of the mesh which is also the number of the truncated coefficients
	"""
    # get the sample value of y(u)
    zi = gridSampleYU(vertices, facet)
    # generate the spherical harmonics coefficients
    coeffs = SHExpandDH(zi, sampling=2)
    # generate the filter of the coefficients
    for k in range(coef_num_sqrt):
        coeffs[:, k, (k + 1):] = 0
    # reconstruct y(u) with truncated coefficients
    zi_filtered = MakeGridDH(coeffs, sampling=2)
    fig, ax = plt.subplots(1, 1, figsize=(10, 10))
    ax.imshow(zi_filtered, extent=(-180, 180, -90, 90), cmap='viridis')
    plt.show()
예제 #11
0
def forward_model_initial(FM0_grid, r):
    '''
    Expand the grid data to spherical harmonic coefficients and perform Gaussian filtering, then transfer it back to the grid data. 

    Usage:
    FM0_grid_gau = forward_model_initial(FM0_grid,r)

    Inputs:
    FM0_grid -> [float 2d array] initial grid data
    r -> [float] Gaussian filter radius
    
    Outputs:
    FM0_grid_gau -> [float 2d array] grid data after Gaussian filtering
    '''
    FM0_shc = SHExpandDH(FM0_grid, sampling=2)
    # set deg 0 and deg 1 to zero
    FM0_shc[:, :2, :] = 0
    FM0_shc_gau = filter_gaussian(r, FM0_shc)
    FM0_grid_gau = MakeGridDH(FM0_shc_gau, sampling=2)
    return FM0_grid_gau
예제 #12
0
def sh_filter(data, lmax=4):
    from pyshtools.expand import SHExpandDH, MakeGrid2D, MakeGridDH
    import pyshtools as pysh
    import numpy as np
    cilm = SHExpandDH(data, lmax_calc=lmax, sampling=2)  #, lats, lons, lmax)
    #cilm[:, 0,:] = 0
    #cilm[:, 1,:] = 0

    # print(cilm)

    lons2 = np.arange(0, 361, 2.0)
    lats2 = np.arange(90, -91, -2.0)

    x, y = np.meshgrid(lons2, lats2)
    cilm[0, 2, 0] = 0.0
    # cilm2 = np.zeros(cilm.shape)
    # cilm2[0,2,2]
    clm = pysh.SHCoeffs.from_array(cilm)
    grid = clm.expand(lat=y, lon=x)

    #grid.nlat = 80
    #print(grid)
    return lons2, lats2, grid  #MakeGridDH(cilm, sampling=2)
예제 #13
0
def main():
	#####################INPUT#####################################
	try:
		FILE_PARM = sys.argv[1]
	except IndexError:
		print "A parameter file must be given from the command line."
		sys.exit(1)

	get_parm(FILE_PARM)	#initialize parameters

	#####################INIT######################################

	W_   = get_pot(FILE_POT,L_MAX)			

	rho_ = get_distro(FILE_DISTRO,L_MAX)
	
	MUEX = compute_muex(W_,TRIAL_RHO)
	
	N = L_MAX*2
	
	#####################MINIMIZE##################################

	rho  = MakeGridDH(rho_, sampling=2, norm=4)
	
	show(rho)
	
	rho  = minimize(W_, rho, TRIAL_RHO, BETA, MUEX, EPSILON, EPSILON_CHECK )

	show(rho)
	
	rho_ = SHExpandDH(rho, sampling=2, norm=4)
	
	C = sht.SHCoeffs.from_array(rho_) 

	fig, ax = C.plot_spectrum2d()
	
	write_distro(FILE_OUTPUT_DISTRO, rho_)
예제 #14
0
def spectral_domain(shcs, nodes, research_boundary, r, mode=None, ratio=None):
    '''
    Spectrum domain method used to perform signal leakage correction in GRACE data processing.
    
    Usage:
    Ms,Ms_std = spetral_domain(SHCs,nodes,research_boundary,r)
    Ms,Ms_std = spetral_domain(SHCs,nodes,research_boundary,r,'window',ratio)

    Inputs:
    shcs -> [float 4d array] spherical harmonic coefficients before signal leakage correction
    nodes -> [object] grid nodes that represent mascons in the study area. The nodes are described as a set of discerte grid points [[lat0,lon0],..,[latn,lonn]].
    research_boundary -> [float 2d array] Study area surrounded by a polygon, which is defined as a series of points [[lat0,lon0],..,[latn,lonn]].
    r -> [float] Gaussian filter radius

    Parameters:
    mode -> [optional, str, default = None] If None, the global spherical harmonic coefficients are used to fit mascons. If 'window', the windowed spherical harmonic coefficients are used to fit mascons.
    ratio -> [optional, str, default = None] The ratio of the study area to the global area, which is mainly used to determine the Shannon number approximately. If None, the mode must be set to None. 

    Outputs:
    Ms -> [float 2d array] series of mascons or rate of mascons. The first dimension is time and the second dimension is the value of mascon. 
    If the size of the first dimension is 1, it means rate of mascon.
    Ms_std -> [float 2d array] standard deviation for series of mascons or rate of mascons. 
    '''
    lmax = shcs.shape[-1] - 1
    nlat = (lmax + 1) * 2
    psf_shcs_gau, mas = [], []

    for node in nodes.nodes:

        psf_grid_class = SHGrid.from_cap(0.1, node[0], node[1], lmax)
        psf_grid = psf_grid_class.data

        psf_shc = SHExpandDH(psf_grid, sampling=2)
        psf_shc_gau = filter_gaussian(r, psf_shc)

        psf_shcs_gau.append(SHCilmToVector(psf_shc_gau))
    psf_shcs_gau = np.array(psf_shcs_gau)

    A = psf_shcs_gau.T

    if mode is None:
        for shc in shcs:
            y = SHCilmToVector(shc)
            lamb, ma = L_curve(A, y)
            mas.append(ma)
    else:
        mask_boundary = Curve2Mask(nlat, research_boundary, 0, sampling=2)

        # estimate the Shannon number
        N = np.ceil((lmax + 1)**2 * ratio)  # Rough Shannon number
        slepian = Slepian.from_mask(mask_boundary, lmax, N)
        slepian_tapers = slepian.tapers
        N = np.ceil(slepian.shannon)  # Accurate Shannon Number

        for shc in shcs:
            shc_class = SHCoeffs.from_array(shc)
            slepian_coeffs = slepian.expand(shc_class, N)
            shc_slepian = SlepianCoeffsToSH(slepian_coeffs.falpha,
                                            slepian_tapers, N)

            y = SHCilmToVector(shc_slepian)
            lamb, ma = L_curve(A, y)
            mas.append(ma)
    mas = np.array(mas)

    return mas, np.zeros_like(mas)
예제 #15
0
def plot_concentric_shells_spherical_coords(image_by_slices,
                                            base_folder,
                                            idx_slices=None):
    """Plot the concentric shells for a given three-dimensional volumetric shape.

    The volumetric shape is the three-dimensional diffraction intensity, as calculated by
    :py:mod:`ai4materials.descriptors.diffraction3d.Diffraction3D`.


    Parameters:

    image_by_slices: np.ndarray, shape [n_slices, theta_bins_fine, phi_bins_fine]
        Three-dimensional array containing each concentric shell obtained in spherical coordinate, as calculated by
        :py:mod:`ai4materials.descriptors.diffraction3d.Diffraction3D`
        ``n_slices``, ``theta_bins_fine``, ``phi_bins_fine`` are given by the interpolation and the region of the space
        considered. In our case, ``n_slices=52``, ``theta_bins_fine=256``, ``phi_bins_fine=512``, as defined in
        :py:mod:`ai4materials.descriptors.diffraction3d.Diffraction3D` in ``phi_bins_fine`` and ``theta_bins_fine``.

    base_folder: str
        Folder to save the figures generated. The figures are saved in a subfolder folder ``shells_png`` of
        ``base_folder``.

    idx_slices: list of int, optional (default=None)
        List of integers defining which concentric shells to plot.
        If `None`, all concentric shells - in spherical coordinates - are plotted.

    .. codeauthor:: Angelo Ziletti <*****@*****.**>

    """

    if idx_slices is None:
        idx_slices = range(image_by_slices.shape[0])

    # create folder for saving files
    shells_images_folder = os.path.join(base_folder, 'shells_png')
    if not os.path.exists(shells_images_folder):
        os.makedirs(shells_images_folder)

    filename_png_list = []
    for idx_slice in idx_slices:
        filename_png = os.path.join(
            shells_images_folder,
            'desc_sph_coords_slice' + str(idx_slice) + '.png')
        filename_png_list.append(filename_png)

        logger.debug("Slide idx: {}".format(idx_slice))
        logger.debug("Image max: {}".format(image_by_slices[idx_slice].max()))

        coeffs = SHExpandDH(image_by_slices[idx_slice], sampling=2)

        coeffs_filtered = coeffs.copy()

        imgs = [
            MakeGridDH(coeffs_filtered[:, :, :], sampling=2),
            MakeGridDH(coeffs_filtered[:, :16, :], sampling=2),
            MakeGridDH(coeffs_filtered[:, :32, :], sampling=2),
            MakeGridDH(coeffs_filtered[:, :64, :], sampling=2)
        ]

        fig, axes = plt.subplots(nrows=2, ncols=2)
        for idx_ax, ax in enumerate(axes.flat):
            im = ax.imshow(imgs[idx_ax], interpolation='none')

        fig.subplots_adjust(right=0.8)
        cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
        fig.colorbar(im, cax=cbar_ax)

        plt.savefig(filename_png, dpi=100, format="png")
예제 #16
0
#plt.plot((1.0/np.pi)*lon,(1.0/np.pi)*lat,'.')
#plt.xlim([-1.0,+1.0])
#plt.ylim([-0.5,+0.5])
#plt.show()

levs = np.logspace(-8, 4, 200)
print levs

xx = np.linspace(-1.0 * np.pi, +1.0 * np.pi, nLon)
yy = np.linspace(-0.5 * np.pi, +0.5 * np.pi, nLat)
XX, YY = np.meshgrid(xx, yy)
#plt.contourf(XX, YY, ke2, 100, locator=ticker.LogLocator())
plt.contourf(XX, YY, ke2, norm=LogNorm(), levels=levs)
plt.colorbar(orientation='horizontal',
             ticks=[1.0e-8, 1.0e-6, 1.0e-4, 1.0e-2, 1.0e+0, 1.0e+2, 1.0e+4])
plt.savefig('kinetic_energy_galewsky_7days.png')
plt.show()

coeffs = SHExpandDH(ke2, sampling=2)
power = spectrum(coeffs, unit='per_l')
nl = coeffs.shape[1] / 2
plt.loglog(np.arange(nl), power[:nl])
#plt.loglog(np.arange(nl-6)+6, 1.0e+7*np.power(np.arange(nl-6)+6,-3.0))
plt.loglog(
    np.arange(nl - 6) + 6, 8.0e+6 * np.power(np.arange(nl - 6) + 6, -3.0))
#plt.loglog([50.0,50.0],[1.0e+5,1.0e-1])
plt.xlabel('$k$')
plt.ylabel('$KE$')
plt.savefig('ke_spectra_galewsky_7days.png')
plt.show()
예제 #17
0
    def calculate(self,
                  structure,
                  min_nb_atoms=20,
                  plot_3d=False,
                  plot_slices=False,
                  plot_slices_sph_coords=False,
                  save_diff_intensity=True,
                  **kwargs):
        """Calculate the descriptor for the given ASE structure.

        Parameters:

        structure: `ase.Atoms` object
            Atomic structure.

        min_nb_atoms: int, optional (default=20)
            If the structure contains less than ``min_nb_atoms``, the descriptor is not calculated and an array with
            zeros is return as descriptor. This is because the descriptor is expected to be no longer meaningful for
            such a small amount of atoms present in the chosen structure.

        """

        if len(structure) > min_nb_atoms - 1:

            atoms = scale_structure(
                structure,
                scaling_type=self.atoms_scaling,
                atoms_scaling_cutoffs=self.atoms_scaling_cutoffs,
                extrinsic_scale_factor=self.extrinsic_scale_factor)

            # Source
            src = condor.Source(**self.param_source)

            # Detector
            # solid_angle_correction are meaningless for 3d diffraction
            det = condor.Detector(solid_angle_correction=False,
                                  **self.param_detector)

            # Atoms
            atomic_numbers = map(lambda el: el.number, atoms)
            atomic_numbers = [
                atomic_number + 5 for atomic_number in atomic_numbers
            ]
            # atomic_numbers = [82 for atomic_number in atomic_numbers]

            # convert Angstrom to m (CONDOR uses meters)
            atomic_positions = map(
                lambda pos: [pos.x * 1E-10, pos.y * 1E-10, pos.z * 1E-10],
                atoms)

            par = condor.ParticleAtoms(atomic_numbers=atomic_numbers,
                                       atomic_positions=atomic_positions)

            s = "particle_atoms"
            condor_exp = condor.Experiment(src, {s: par}, det)
            res = condor_exp.propagate3d()

            # retrieve some physical quantities that might be useful for users
            intensity = res["entry_1"]["data_1"]["data"]
            fourier_space = res["entry_1"]["data_1"]["data_fourier"]
            phases = np.angle(fourier_space) % (2 * np.pi)

            # 3D diffraction calculation
            real_space = np.fft.fftshift(
                np.fft.ifftn(
                    np.fft.fftshift(res["entry_1"]["data_1"]["data_fourier"])))
            window = get_window(self.window, self.n_px)
            tot_density = window * real_space.real
            center_of_mass = ndimage.measurements.center_of_mass(tot_density)
            logger.debug("Tot density data dimensions: {}".format(
                tot_density.shape))
            logger.debug(
                "Center of mass of total density: {}".format(center_of_mass))

            # take the fourier transform of structure in real_space
            fft_coeff = fftpack.fftn(tot_density,
                                     shape=(self.nx_fft, self.ny_fft,
                                            self.nz_fft))

            # now shift the quadrants around so that low spatial frequencies are in
            # the center of the 2D fourier transformed image.
            fft_coeff_shifted = fftpack.fftshift(fft_coeff)

            # calculate a 3D power spectrum
            power_spect = np.abs(fft_coeff_shifted)**2

            if self.use_mask:
                xc = (self.nx_fft - 1.0) / 2.0
                yc = (self.ny_fft - 1.0) / 2.0
                zc = (self.nz_fft - 1.0) / 2.0

                # spherical mask
                a, b, c = xc, yc, zc
                x, y, z = np.ogrid[-a:self.nx_fft - a, -b:self.ny_fft - b,
                                   -c:self.nz_fft - c]

                mask_int = x * x + y * y + z * z <= self.mask_r_min * self.mask_r_min
                mask_out = x * x + y * y + z * z >= self.mask_r_max * self.mask_r_max

                for i in range(self.nx_fft):
                    for j in range(self.ny_fft):
                        for k in range(self.nz_fft):
                            if mask_int[i, j, k]:
                                power_spect[i, j, k] = 0.0
                            if mask_out[i, j, k]:
                                power_spect[i, j, k] = 0.0

            # cut the spectrum and keep only the relevant part for crystal-structure recognition of
            # hexagonal closed packed (spacegroup=194)
            # simple cubic (spacegroup=221)
            # face centered cubic (spacegroup=225)
            # diamond (spacegroup=227)
            # body centered cubic (spacegroup=229)
            # this interval (20:108) might need to be varied if other classes are added
            power_spect_cut = power_spect[20:108, 20:108, 20:108]
            # zoom by two times using spline interpolation
            power_spect = ndimage.zoom(power_spect_cut, (2, 2, 2))

            if save_diff_intensity:
                np.save(
                    '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/power_spect.npy',
                    power_spect)

            # power_spect.shape = 176, 176, 176
            if plot_3d:
                plot_3d_volume(power_spect)

            vox = np.copy(power_spect)
            logger.debug("nan in data: {}".format(
                np.count_nonzero(~np.isnan(vox))))

            # optimized
            # these specifications are valid for a power_spect = power_spect[20:108, 20:108, 20:108]
            # and a magnification of 2
            xyz_indices_r = get_slice_volume_indices(
                vox,
                min_r=32.0,
                dr=1.0,
                max_r=83.,
                phi_bins=self.phi_bins,
                theta_bins=self.theta_bins)

            # slow - only for benchmarking the fast implementation below (shells_to_sph, interp_theta_phi_surfaces)
            # (vox_by_slices, theta_phi_by_slices) = _slice_3d_volume_slow(vox)

            # convert 3d shells
            (vox_by_slices, theta_phi_by_slices) = get_shells_from_indices(
                xyz_indices_r, vox)
            if plot_slices:
                plot_concentric_shells(
                    vox_by_slices,
                    base_folder=self.configs['io']['main_folder'],
                    idx_slices=None,
                    create_animation=False)

            image_by_slices = interp_theta_phi_surfaces(
                theta_phi_by_slices,
                theta_bins=self.theta_bins_fine,
                phi_bins=self.phi_bins_fine)

            if plot_slices_sph_coords:
                plot_concentric_shells_spherical_coords(
                    image_by_slices,
                    base_folder=self.configs['io']['main_folder'],
                    idx_slices=None)

            coeffs_list = []
            nl_list = []
            ls_list = []

            for idx_slice in range(image_by_slices.shape[0]):
                logger.debug("img #{} max: {}".format(
                    idx_slice, image_by_slices[idx_slice].max()))

                # set to zero the spherical harmonics coefficients above self.sph_l_cutoff
                coeffs = SHExpandDH(image_by_slices[idx_slice], sampling=2)
                coeffs_filtered = coeffs.copy()
                coeffs_filtered[:, self.sph_l_cutoff:, :] = 0.
                coeffs = coeffs_filtered.copy()

                nl = coeffs.shape[0]
                ls = np.arange(nl)
                coeffs_list.append(coeffs)
                nl_list.append(nl)
                ls_list.append(ls)

            coeffs = np.asarray(coeffs_list).reshape(image_by_slices.shape[0],
                                                     coeffs.shape[0],
                                                     coeffs.shape[1],
                                                     coeffs.shape[2])

            sh_coeffs_list = []

            for idx_slice in range(coeffs.shape[0]):
                sh_coeffs = SHCoeffs.from_array(coeffs[idx_slice])
                sh_coeffs_list.append(sh_coeffs)

            sh_spectrum_list = []
            for sh_coeff in sh_coeffs_list:
                sh_spectrum = sh_coeff.spectrum(convention='l2norm')
                sh_spectrum_list.append(sh_spectrum)

            sh_spectra = np.asarray(sh_spectrum_list).reshape(
                coeffs.shape[0], -1)

            # cut the spherical harmonics expansion to sph_l_cutoff order
            logger.debug(
                'Spherical harmonics spectra maximum before normalization: {}'.
                format(sh_spectra.max()))
            sh_spectra = sh_spectra[:, :self.sph_l_cutoff]
            sh_spectra = (sh_spectra - sh_spectra.min()) / (sh_spectra.max() -
                                                            sh_spectra.min())

            # add results in ASE structure info
            descriptor_data = dict(descriptor_name=self.name,
                                   descriptor_info=str(self),
                                   diffraction_3d_sh_spectrum=sh_spectra)

        else:
            # return array with zeros for structures with less than min_nb_atoms
            sh_spectra = np.zeros((52, int(self.sph_l_cutoff)))
            descriptor_data = dict(descriptor_name=self.name,
                                   descriptor_info=str(self),
                                   diffraction_3d_sh_spectrum=sh_spectra)

        structure.info['descriptor'] = descriptor_data

        return structure