Esempio n. 1
0
    def _partition_batches(self, max_batch: int):
        if self._batch_partition == 'lines':
            super()._partition_batches(max_batch)
            return

        if max_batch == 1:
            self._batches = [[i] for i in range(len(self))]
            return

        max_batch_x = int(np.floor(np.sqrt(max_batch)))
        max_batch_y = int(np.floor(np.sqrt(max_batch)))

        Nx = subdivide_into_batches(
            self.gpts[0],
            (self.gpts[0] + (-self.gpts[0] % max_batch_x)) // max_batch_x)
        Ny = subdivide_into_batches(
            self.gpts[1],
            (self.gpts[1] + (-self.gpts[1] % max_batch_y)) // max_batch_y)

        self._batches = []
        Sx = np.concatenate(([0], np.cumsum(Nx)))
        Sy = np.concatenate(([0], np.cumsum(Ny)))

        for i, nx in enumerate(Nx):
            for j, ny in enumerate(Ny):
                x = np.arange(Sx[i], Sx[i] + nx, dtype=np.int)
                y = np.arange(Sy[j], Sy[j] + ny, dtype=np.int)
                self._batches.append(
                    (y[None] + x[:, None] * self.gpts[1]).ravel())
Esempio n. 2
0
    def __init__(self,
                 calculator,
                 gpts: Union[int, Sequence[int]] = None,
                 sampling: Union[float, Sequence[float]] = None,
                 # origin: Union[float, Sequence[float]] = None,
                 slice_thickness=.5,
                 core_size=.005,
                 storage='cpu',
                 precalculate=True):

        self._calculator = calculator
        self._core_size = core_size

        thickness = calculator.atoms.cell[2, 2]
        nz = calculator.hamiltonian.finegd.N_c[2]
        num_slices = int(np.ceil(nz / np.floor(slice_thickness / (thickness / nz))))

        self._voxel_height = thickness / nz
        self._slice_vertical_voxels = subdivide_into_batches(nz, num_slices)

        # TODO: implement support for non-periodic extent

        self._origin = (0., 0.)
        extent = np.diag(orthogonalize_cell(calculator.atoms.copy()).cell)[:2]

        self._grid = Grid(extent=extent, gpts=gpts, sampling=sampling, lock_extent=True)

        super().__init__(precalculate=precalculate, storage=storage)
Esempio n. 3
0
    def partition_scan(self, partitions: Sequence[int]) -> List['GridScan']:
        """
        Partition the scan into smaller grid scans

        Parameters
        ----------
        partitions : two int
            The number of partitions to create in x and y.

        Returns
        -------
        List of GridScan objects
        """
        Nx = subdivide_into_batches(self.gpts[0], partitions[0])
        Ny = subdivide_into_batches(self.gpts[1], partitions[1])
        Sx = np.concatenate(([0], np.cumsum(Nx)))
        Sy = np.concatenate(([0], np.cumsum(Ny)))

        scans = []
        for i, nx in enumerate(Nx):
            for j, ny in enumerate(Ny):
                start = [Sx[i] * self.sampling[0], Sy[j] * self.sampling[1]]
                end = [
                    start[0] + nx * self.sampling[0],
                    start[1] + ny * self.sampling[1]
                ]
                endpoint = [False, False]

                if i + 1 == partitions[0]:
                    endpoint[0] = self.grid.endpoint[0]
                    if endpoint[0]:
                        end[0] -= self.sampling[0]

                if j + 1 == partitions[1]:
                    endpoint[1] = self.grid.endpoint[1]
                    if endpoint[1]:
                        end[1] -= self.sampling[1]

                scan = self.__class__(start,
                                      end,
                                      gpts=(nx, ny),
                                      endpoint=endpoint,
                                      batch_partition='squares',
                                      measurement_shift=(Sx[i], Sy[j]))

                scans.append(scan)
        return scans
Esempio n. 4
0
    def _partition_batches(self, max_batch):
        n = len(self)
        n_batches = (n + (-n % max_batch)) // max_batch
        batch_sizes = subdivide_into_batches(len(self), n_batches)

        self._batches = []

        start = 0
        for batch_size in batch_sizes:
            end = start + batch_size
            indices = np.arange(start, end, dtype=np.int)
            start += batch_size
            self._batches.append(indices)
Esempio n. 5
0
    def __init__(self,
                 calculator,
                 gpts: Union[int, Sequence[int]] = None,
                 sampling: Union[float, Sequence[float]] = None,
                 origin: Union[float, Sequence[float]] = None,
                 orthogonal_cell: Sequence[float] = None,
                 periodic_z: bool = True,
                 slice_thickness=.5,
                 core_size=.005,
                 plane='xy',
                 storage='cpu',
                 precalculate=True):

        self._calculator = calculator
        self._core_size = core_size
        self._plane = plane

        if orthogonal_cell is None:
            atoms = rotate_atoms_to_plane(calculator.atoms, plane)
            thickness = atoms.cell[2, 2]
            nz = calculator.hamiltonian.finegd.N_c[plane_to_axes(plane)[-1]]
            extent = np.diag(orthogonalize_cell(atoms.copy()).cell)[:2]
        else:
            if plane != 'xy':
                raise NotImplementedError()

            thickness = orthogonal_cell[2]
            nz = calculator.hamiltonian.finegd.N_c / np.linalg.norm(calculator.atoms.cell, axis=0) * orthogonal_cell[2]
            nz = int(np.ceil(np.max(nz)))
            extent = orthogonal_cell[:2]

        num_slices = int(np.ceil(nz / np.floor(slice_thickness / (thickness / nz))))
        self._orthogonal_cell = orthogonal_cell
        self._voxel_height = thickness / nz
        self._slice_vertical_voxels = subdivide_into_batches(nz, num_slices)
        self._origin = (0., 0., 0.)
        self._periodic_z = periodic_z

        self._grid = Grid(extent=extent, gpts=gpts, sampling=sampling, lock_extent=True)

        super().__init__(precalculate=precalculate, storage=storage)