예제 #1
0
    def set_data(self, data: DataType, **kwargs):
        original_data = normalize_to_spectrum(data)
        self.original_data = original_data

        if len(data.dims) > 2:
            assert 'eV' in original_data.dims
            data = data.sel(eV=slice(-0.05, 0.05)).sum('eV', keep_attrs=True)
            data.coords['eV'] = 0
        else:
            data = original_data

        if 'eV' in data.dims:
            data = data.S.transpose_to_back('eV')

        self.data = data.copy(deep=True)

        if not kwargs:
            rng_mul = 1
            if data.coords['hv'] < 12:
                rng_mul = 0.5
            if data.coords['hv'] < 7:
                rng_mul = 0.25

            if 'eV' in self.data.dims:
                kwargs = {
                    'kp': np.linspace(-2, 2, 400) * rng_mul,
                }
            else:
                kwargs = {
                    'kx': np.linspace(-3, 3, 300) * rng_mul,
                    'ky': np.linspace(-3, 3, 300) * rng_mul,
                }

        self.conversion_kwargs = kwargs
예제 #2
0
def normalize_sarpes_photocurrent(data: DataType):
    """
    Normalizes the down channel so that it matches the up channel in terms of mean photocurrent. Destroys the integrity
    of "count" data.

    :param data:
    :return:
    """

    copied = data.copy(deep=True)
    copied.down.values = (
        copied.down *
        (copied.photocurrent_up / copied.photocurrent_down)).values
    return copied
예제 #3
0
def apply_mask(data: DataType, mask, replace=np.nan, radius=None, invert=False):
    """
    Applies a logical mask, i.e. one given in terms of polygons, to a specific
    piece of data. This can be used to set values outside or inside a series of
    polygon masks to a given value or to NaN.

    Expanding or contracting the masked region can be accomplished with the
    radius argument, but by default strict inclusion is used.

    Some masks include a `fermi` parameter which allows for clipping the detector
    boundaries in a semi-automated fashion. If this is included, only 200meV above the Fermi
    level will be included in the returned data. This helps to prevent very large
    and undesirable regions filled with only the replacement value which can complicate
    automated analyses that rely on masking.

    :param data: Data to mask.
    :param mask: Logical definition of the mask, appropriate for passing to `polys_to_mask`
    :param replace: The value to substitute for pixels masked.
    :param radius: Radius by which to expand the masked area.
    :param invert: Allows logical inversion of the masked parts of the data. By default,
                   the area inside the polygon sequence is replaced by `replace`.
    :return:
    """
    data = normalize_to_spectrum(data)
    fermi = mask.get('fermi')

    if isinstance(mask, dict):
        dims = mask.get('dims', data.dims)
        mask = polys_to_mask(mask, data.coords, [s for i, s in enumerate(data.shape) if data.dims[i] in dims], radius=radius, invert=invert)

    masked_data = data.copy(deep=True)
    masked_data.values = masked_data.values * 1.0
    masked_data.values[mask] = replace

    if fermi is not None:
        return masked_data.sel(eV=slice(None, fermi + 0.2))

    return masked_data
예제 #4
0
def _shift_energy_interpolate(data: DataType, shift=None):
    if shift is not None:
        pass
        # raise NotImplementedError("arbitrary shift not yet implemented")

    data = normalize_to_spectrum(data).S.transpose_to_front('eV')

    new_data = data.copy(deep=True)
    new_axis = new_data.coords['eV']
    new_values = new_data.values * 0

    if shift is None:
        closest_to_zero = data.coords['eV'].sel(eV=0, method='nearest')
        shift = -closest_to_zero

    stride = data.T.stride('eV', generic_dim_names=False)

    if np.abs(shift) >= stride:
        n_strides = int(shift / stride)
        new_axis = new_axis + n_strides * stride

        shift = shift - stride * n_strides

    new_axis = new_axis + shift

    weight = float(shift / stride)

    new_values = new_values + data.values * (1 - weight)
    if shift > 0:
        new_values[1:] = new_values[1:] + data.values[:-1] * weight
    if shift < 0:
        new_values[:-1] = new_values[:-1] + data.values[1:] * weight

    new_data.coords['eV'] = new_axis
    new_data.values = new_values

    return new_data
예제 #5
0
def convert_coordinates_to_kspace_forward(arr: DataType, **kwargs):
    """
    Forward converts all the individual coordinates of the data array
    :param arr:
    :param kwargs:
    :return:
    """

    arr = arr.copy(deep=True)

    skip = {'eV', 'cycle', 'delay', 'T'}
    keep = {
        'eV',
    }

    all = {k: v for k, v in arr.indexes.items() if k not in skip}
    kept = {k: v for k, v in arr.indexes.items() if k in keep}

    old_dims = list(all.keys())
    old_dims.sort()

    if not old_dims:
        return None

    dest_coords = {
        ('phi', ): ['kp', 'kz'],
        ('theta', ): ['kp', 'kz'],
        ('beta', ): ['kp', 'kz'],
        (
            'phi',
            'theta',
        ): ['kx', 'ky', 'kz'],
        (
            'beta',
            'phi',
        ): ['kx', 'ky', 'kz'],
        (
            'hv',
            'phi',
        ): ['kx', 'ky', 'kz'],
        ('hv', ): ['kp', 'kz'],
        (
            'beta',
            'hv',
            'phi',
        ): ['kx', 'ky', 'kz'],
        ('hv', 'phi', 'theta'): ['kx', 'ky', 'kz'],
        ('hv', 'phi', 'psi'): ['kx', 'ky', 'kz'],
        (
            'chi',
            'hv',
            'phi',
        ): ['kx', 'ky', 'kz'],
    }.get(tuple(old_dims))

    full_old_dims = old_dims + list(kept.keys())
    projection_vectors = np.ndarray(shape=tuple(
        len(arr.coords[d]) for d in full_old_dims),
                                    dtype=object)

    # these are a little special, depending on the scan type we might not have a phi coordinate
    # that aspect of this is broken for now, but we need not worry
    def broadcast_by_dim_location(data, target_shape, dim_location=None):
        if isinstance(data, xr.DataArray):
            if not data.dims:
                data = data.item()

        if isinstance(data, (
                int,
                float,
        )):
            return np.ones(target_shape) * data

        # else we are dealing with an actual array
        the_slice = [None] * len(target_shape)
        the_slice[dim_location] = slice(None, None, None)

        return np.asarray(data)[the_slice]

    raw_coords = {
        'phi':
        arr.coords['phi'].values - arr.S.phi_offset,
        'beta':
        (0 if arr.coords['beta'] is None else arr.coords['beta'].values) -
        arr.S.beta_offset,
        'theta':
        (0 if arr.coords['theta'] is None else arr.coords['theta'].values) -
        arr.S.theta_offset,
        'hv':
        arr.coords['hv'],
    }

    raw_coords = {
        k: broadcast_by_dim_location(
            v, projection_vectors.shape,
            full_old_dims.index(k) if k in full_old_dims else None)
        for k, v in raw_coords.items()
    }

    # fill in the vectors
    binding_energy = broadcast_by_dim_location(
        arr.coords['eV'] - arr.S.work_function, projection_vectors.shape,
        full_old_dims.index('eV') if 'eV' in full_old_dims else None)
    photon_energy = broadcast_by_dim_location(
        arr.coords['hv'], projection_vectors.shape,
        full_old_dims.index('hv') if 'hv' in full_old_dims else None)
    kinetic_energy = binding_energy + photon_energy

    inner_potential = arr.S.inner_potential

    # some notes on angle conversion:
    # BL4 conventions
    # angle conventions are standard:
    # phi = analyzer acceptance
    # polar = perpendicular scan angle
    # theta = parallel to analyzer slit rotation angle

    # [ 1  0          0          ]   [  cos(polar) 0 sin(polar) ]   [ 0          ]
    # [ 0  cos(theta) sin(theta) ] * [  0          1 0          ] * [ k sin(phi) ]
    # [ 0 -sin(theta) cos(theta) ]   [ -sin(polar) 0 cos(polar) ]   [ k cos(phi) ]
    #
    # =
    #
    # [ 1  0          0          ]     [ sin(polar) * cos(phi) ]
    # [ 0  cos(theta) sin(theta) ] * k [ sin(phi) ]
    # [ 0 -sin(theta) cos(theta) ]     [ cos(polar) * cos(phi) ]
    #
    # =
    #
    # k ( sin(polar) * cos(phi),
    #     cos(theta)*sin(phi) + cos(polar) * cos(phi) * sin(theta),
    #     -sin(theta) * sin(phi) + cos(theta) * cos(polar) * cos(phi),
    #   )
    #
    # main chamber conventions, with no analyzer rotation (referred to as alpha angle in the Igor code
    # angle conventions are standard:
    # phi = analyzer acceptance
    # polar = perpendicular scan angle
    # theta = parallel to analyzer slit rotation angle

    # [ 1 0 0                    ]     [ sin(phi + theta) ]
    # [ 0 cos(polar) sin(polar)  ] * k [ 0                ]
    # [ 0 -sin(polar) cos(polar) ]     [ cos(phi + theta) ]
    #
    # =
    #
    # k (sin(phi + theta), cos(phi + theta) * sin(polar), cos(phi + theta) cos(polar), )
    #

    # for now we are setting the theta angle to zero, this only has an effect for vertical slit analyzers,
    # and then only when the tilt angle is very large

    # TODO check me
    raw_translated = {
        'kx':
        euler_to_kx(kinetic_energy,
                    raw_coords['phi'],
                    raw_coords['beta'],
                    theta=0,
                    slit_is_vertical=arr.S.is_slit_vertical),
        'ky':
        euler_to_ky(kinetic_energy,
                    raw_coords['phi'],
                    raw_coords['beta'],
                    theta=0,
                    slit_is_vertical=arr.S.is_slit_vertical),
        'kz':
        euler_to_kz(kinetic_energy,
                    raw_coords['phi'],
                    raw_coords['beta'],
                    theta=0,
                    slit_is_vertical=arr.S.is_slit_vertical,
                    inner_potential=inner_potential),
    }

    if 'kp' in dest_coords:
        if np.sum(raw_translated['kx']**2) > np.sum(raw_translated['ky']**2):
            sign = raw_translated['kx'] / np.sqrt(raw_translated['kx']**2 +
                                                  1e-8)
        else:
            sign = raw_translated['ky'] / np.sqrt(raw_translated['ky']**2 +
                                                  1e-8)

        raw_translated['kp'] = np.sqrt(raw_translated['kx']**2 +
                                       raw_translated['ky']**2) * sign

    data_vars = {}
    for dest_coord in dest_coords:
        data_vars[dest_coord] = (full_old_dims,
                                 np.squeeze(raw_translated[dest_coord]))

    return xr.Dataset(data_vars, coords=arr.indexes)
def magnify_circular_regions_plot(data: DataType,
                                  magnified_points,
                                  mag=10,
                                  radius=0.05,
                                  cmap='viridis',
                                  color=None,
                                  edgecolor='red',
                                  out=None,
                                  ax=None,
                                  **kwargs):
    data = normalize_to_spectrum(data)
    fig = None
    if ax is None:
        fig, ax = plt.subplots(figsize=kwargs.get('figsize', (
            7,
            5,
        )))

    mesh = data.plot(ax=ax, cmap=cmap)
    clim = list(mesh.get_clim())
    clim[1] = clim[1] / mag

    mask = np.zeros(shape=(len(data.values.ravel()), ))
    pts = np.zeros(shape=(
        len(data.values.ravel()),
        2,
    ))
    mask = mask > 0

    raveled = data.T.ravel()
    pts[:, 0] = raveled[data.dims[0]]
    pts[:, 1] = raveled[data.dims[1]]

    x0, y0 = ax.transAxes.transform((0, 0))  # lower left in pixels
    x1, y1 = ax.transAxes.transform((1, 1))  # upper right in pixes
    dx = x1 - x0
    dy = y1 - y0
    maxd = max(dx, dy)
    xlim, ylim = ax.get_xlim(), ax.get_ylim()

    width = radius * maxd / dx * (xlim[1] - xlim[0])
    height = radius * maxd / dy * (ylim[1] - ylim[0])

    if not isinstance(edgecolor, list):
        edgecolor = [edgecolor for _ in range(len(magnified_points))]

    if not isinstance(color, list):
        color = [color for _ in range(len(magnified_points))]

    pts[:, 1] = (pts[:, 1]) / (xlim[1] - xlim[0])
    pts[:, 0] = (pts[:, 0]) / (ylim[1] - ylim[0])
    print(np.min(pts[:, 1]), np.max(pts[:, 1]))
    print(np.min(pts[:, 0]), np.max(pts[:, 0]))

    for c, ec, point in zip(color, edgecolor, magnified_points):
        patch = matplotlib.patches.Ellipse(point,
                                           width,
                                           height,
                                           color=c,
                                           edgecolor=ec,
                                           fill=False,
                                           linewidth=2,
                                           zorder=4)
        patchfake = matplotlib.patches.Ellipse([point[1], point[0]], radius,
                                               radius)
        ax.add_patch(patch)
        mask = np.logical_or(mask, patchfake.contains_points(pts))

    data_masked = data.copy(deep=True)
    data_masked.values = np.array(data_masked.values, dtype=np.float32)

    cm = matplotlib.cm.get_cmap(name='viridis')
    cm.set_bad(color=(1, 1, 1, 0))
    data_masked.values[np.swapaxes(
        np.logical_not(mask.reshape(data.values.shape[::-1])), 0, 1)] = np.nan

    aspect = ax.get_aspect()
    extent = [xlim[0], xlim[1], ylim[0], ylim[1]]
    ax.imshow(data_masked.values,
              cmap=cm,
              extent=extent,
              zorder=3,
              clim=clim,
              origin='lower')
    ax.set_aspect(aspect)

    for spine in ['left', 'top', 'right', 'bottom']:
        ax.spines[spine].set_zorder(5)

    if out is not None:
        plt.savefig(path_for_plot(out), dpi=400)
        return path_for_plot(out)

    return fig, ax
예제 #7
0
def calculate_shirley_background_full_range(xps: DataType,
                                            eps=1e-7,
                                            max_iters=50,
                                            n_samples=5):
    """
    Calculates a shirley background in the range of `energy_slice` according to:

    S(E) = I(E_right) + k * (A_right(E)) / (A_left(E) + A_right(E))

    Typically

    k := I(E_right) - I(E_left)

    The iterative method is continued so long as the total background is not converged to relative error
    `eps`.

    The method continues for a maximum number of iterations `max_iters`.

    In practice, what we can do is to calculate the cumulative sum of the data along the energy axis of
    both the data and the current estimate of the background
    :param xps:
    :param eps:
    :return:
    """

    xps = normalize_to_spectrum(xps)
    background = xps.copy(deep=True)
    cumulative_xps = np.cumsum(xps.values)
    total_xps = np.sum(xps.values)

    rel_error = np.inf

    i_left = np.mean(xps.values[:n_samples])
    i_right = np.mean(xps.values[-n_samples:])

    iter_count = 0

    k = i_left - i_right
    for iter_count in range(max_iters):
        cumulative_background = np.cumsum(background.values)
        total_background = np.sum(background.values)

        new_bkg = background.copy(deep=True)

        for i in range(len(new_bkg)):
            new_bkg.values[i] = i_right + k * (
                (total_xps - cumulative_xps[i] -
                 (total_background - cumulative_background[i])) /
                (total_xps - total_background + 1e-5))

        rel_error = np.abs(np.sum(new_bkg.values) -
                           total_background) / (total_background)

        background = new_bkg

        if rel_error < eps:
            break

    if (iter_count + 1) == max_iters:
        warnings.warn('Shirley background calculation did not converge ' +
                      'after {} steps with relative error {}!'.format(
                          max_iters, rel_error))

    return background
예제 #8
0
def broadcast_model(model_cls: Union[type, TypeIterable],
                    data: DataType,
                    broadcast_dims,
                    params=None,
                    progress=True,
                    dataset=True,
                    weights=None,
                    safe=False,
                    prefixes=None,
                    window=None,
                    multithread=False):
    """
    Perform a fit across a number of dimensions. Allows composite models as well as models
    defined and compiled through strings.
    :param model_cls:
    :param data:
    :param broadcast_dims:
    :param params:
    :param progress:
    :param dataset:
    :param weights:
    :param safe:
    :param window:
    :return:
    """
    if params is None:
        params = {}

    if isinstance(broadcast_dims, str):
        broadcast_dims = [broadcast_dims]

    data = normalize_to_spectrum(data)
    cs = {}
    for dim in broadcast_dims:
        cs[dim] = data.coords[dim]

    other_axes = set(data.dims).difference(set(broadcast_dims))
    template = data.sum(list(other_axes))
    template.values = np.ndarray(template.shape, dtype=np.object)

    residual = data.copy(deep=True)
    residual.values = np.zeros(residual.shape)

    model = compile_model(parse_model(model_cls),
                          params=params,
                          prefixes=prefixes)
    if isinstance(params, (list, tuple)):
        params = {}

    new_params = model.make_params()

    n_fits = np.prod(np.array(list(template.S.dshape.values())))

    identity = lambda x, *args, **kwargs: x
    wrap_progress = identity
    if progress:
        wrap_progress = tqdm_notebook

    _fit_func = functools.partial(_perform_fit,
                                  data=data,
                                  model=model,
                                  params=params,
                                  safe=safe,
                                  weights=weights,
                                  window=window)

    if multithread:
        with ProcessPoolExecutor() as executor:
            for fit_result, fit_residual, coords in executor.map(
                    _fit_func, template.T.iter_coords()):
                template.loc[coords] = fit_result
                residual.loc[coords] = fit_residual
    else:
        for indices, cut_coords in wrap_progress(
                template.T.enumerate_iter_coords(), desc='Fitting',
                total=n_fits):
            fit_result, fit_residual, _ = _fit_func(cut_coords)
            template.loc[cut_coords] = fit_result
            residual.loc[cut_coords] = fit_residual

    if dataset:
        return xr.Dataset(
            {
                'results': template,
                'data': data,
                'residual': residual,
                'norm_residual': residual / data,
            }, residual.coords)

    template.attrs['original_data'] = data
    return template