예제 #1
0
    def __init__(self,
                 X,
                 ntrees,
                 sample_size,
                 limit=None,
                 ExtensionLevel=0,
                 seed=None,
                 n_jobs=1):
        """ iForest(X, ntrees,  sample_size, limit=None, ExtensionLevel=0)
        Initialize a forest by passing in training data, number of trees to be
        used and the subsample size.

        Parameters
        ----------
        X : list of list of floats
        Training data. List of [x1,x2,...,xn] coordinate points.
        ntrees : int
        Number of trees to be used.
        sample_size : int
        The size of the subsample to be used in creation of each tree.
        Must be smaller than |X|.
        limit : int
        The maximum allowed tree depth. This is by default set to average
        length of unsucessful search in a binary tree.
        ExtensionLevel : int
        Specifies degree of freedom in choosing the hyperplanes for
        dividing up data. Must be smaller than the dimension n of the
        dataset.
        """
        self.ntrees = ntrees
        self.X = X
        self.nobjs = len(X)
        self.sample = sample_size
        self.limit = limit
        self.exlevel = ExtensionLevel
        # Extension Level check. See def for explanation.
        self._checkExtensionLevel()
        if limit is None:
            # Set limit to the default as specified by the paper (average depth
            # of unsuccesful search through a binary tree).
            self.limit = int(_np.ceil(_np.log2(self.sample)))
        self.c = c_factor(self.sample)
        # This loop builds an ensemble of iTrees (the forest).
        sp = _split_n(ntrees, _effective_n_jobs(n_jobs))
        _rn.seed(seed)
        _np.random.seed(seed)
        tr = _Parallel(backend='multiprocessing',
                       n_jobs=n_jobs)(_delayed(_create_tree)(
                           X, sp[i], self.sample, self.limit, self.exlevel)
                                      for i in range(len(sp)))
        self.Trees = [j for i in tr for j in i]
예제 #2
0
    def compute_paths(self, X_in=None, n_jobs=1):
        """
        compute_paths(X_in = None)
        Compute anomaly scores for all data points in a dataset X_in

        Parameters ---------- X_in : list of list of floats Data to be scored.
        iForest.Trees are used for computing the depth reached in each tree by
        each data point.

        Returns
        -------
        float
            Anomaly score for a given data point.
        """
        if X_in is None:
            X_in = self.X
        sp = _np.array_split(X_in, _effective_n_jobs(n_jobs))
        S = _Parallel(backend='multiprocessing', n_jobs=n_jobs)(
            _delayed(_get_anom_score)(sp[i], self.ntrees, self.c, self.Trees)
            for i in range(len(sp)))
        return _np.array([j for i in S for j in i])
예제 #3
0
    def expm(self,
             psi_0,
             H_time_eval=0.0,
             iterate=False,
             n_jobs=1,
             block_diag=False,
             a=-1j,
             start=None,
             stop=None,
             endpoint=None,
             num=None,
             shift=None):
        """Creates symmetry blocks of the Hamiltonian and then uses them to run `_expm_multiply()` in parallel.
		
		**Arguments NOT described below can be found in the documentation for the `exp_op` class.**

		Examples
		--------

		The example below builds on the code snippet shown in the description of the `block_ops` class.

		.. literalinclude:: ../../doc_examples/block_ops-example.py
			:linenos:
			:language: python
			:lines: 60-67

		Parameters
		-----------
		psi_0 : numpy.ndarray, list, tuple
			Quantum state which defined on the full Hilbert space of the problem. 
			Does not need to obey and sort of symmetry.
		t0 : float
			Inistial time to start the evolution at.
		H_time_eval : numpy.ndarray, list
			Times to evaluate the Hamiltonians at when doing the matrix exponentiation. 
		iterate : bool, optional
			Flag to return generator when set to `True`. Otherwise the output is an array of states. 
			Default is 'False'.
		n_jobs : int, optional 
			Number of processes requested for the computation time evolution dynamics. 

			NOTE: one of those processes is used to gather results. For best performance, all blocks 
			should be approximately the same size and `n_jobs-1` must be a common devisor of the number of
			blocks, such that there is roughly an equal workload for each process. Otherwise the computation 
			will be as slow as the slowest process.
		block_diag : bool, optional 
			When set to `True`, this flag puts the Hamiltonian matrices for the separate symemtri blocks
			into a list and then loops over it to do time evolution. When set to `False`, it puts all
			blocks in a single giant sparse block diagonal matrix. Default is `False`.

			This flag is useful if there are a lot of smaller-sized blocks.

		Returns
		--------
		obj
			if `iterate = True`, returns generator which generates the time dependent state in the 
			full H-space basis.

			if `iterate = False`, returns `numpy.ndarray` which has the time-dependent states in the 
			full H-space basis in the rows.

		Raises
		------
		ValueError
			Various `ValueError`s of `exp_op` class.
		RuntimeError
			Terminates when initial state has no projection onto the specified symmetry blocks.

		"""
        from ..operators import hamiltonian

        if iterate:
            if start is None and stop is None:
                raise ValueError(
                    "'iterate' can only be True with time discretization. must specify 'start' and 'stop' points."
                )

            if num is not None:
                if type(num) is not int:
                    raise ValueError("expecting integer for 'num'.")
            else:
                num = 50

            if endpoint is not None:
                if type(endpoint) is not bool:
                    raise ValueError("expecting bool for 'endpoint'.")
            else:
                endpoint = True

        else:
            if start is None and stop is None:
                if num != None:
                    raise ValueError("unexpected argument 'num'.")
                if endpoint != None:
                    raise ValueError("unexpected argument 'endpoint'.")
            else:
                if not (_np.isscalar(start) and _np.isscalar(stop)):
                    raise ValueError(
                        "expecting scalar values for 'start' and 'stop'")

                if not (_np.isreal(start) and _np.isreal(stop)):
                    raise ValueError(
                        "expecting real values for 'start' and 'stop'")

                if num is not None:
                    if type(num) is not int:
                        raise ValueError("expecting integer for 'num'.")
                else:
                    num = 50

                if endpoint is not None:
                    if type(endpoint) is not bool:
                        raise ValueError("expecting bool for 'endpoint'.")
                else:
                    endpoint = True

        P = []
        H_list = []
        psi_blocks = []
        for key, b in _iteritems(self._basis_dict):
            p = self._get_P(key)

            if _sp.issparse(psi_0):
                psi = p.H.dot(psi_0).toarray()
            else:
                psi = p.H.dot(psi_0)

            psi = psi.ravel()
            if _np.linalg.norm(psi) > 1000 * _np.finfo(self.dtype).eps:
                psi_blocks.append(psi)
                P.append(p.tocoo())
                H = self._get_H(key)
                H = H(H_time_eval) * a
                if shift is not None:
                    H += a * shift * _sp.identity(b.Ns, dtype=self.dtype)

                H_list.append(H)

        if block_diag and H_list:
            N_H = len(H_list)
            n_pp = N_H // n_jobs
            n_left = n_pp + N_H % n_jobs

            H_list_prime = []
            psi_blocks_prime = []

            psi_block = _np.hstack(psi_blocks[:n_left])
            H_block = _sp.block_diag(H_list[:n_left], format="csr")

            H_list_prime.append(H_block)
            psi_blocks_prime.append(psi_block)

            for i in range(n_jobs - 1):
                i1 = n_left + i * n_pp
                i2 = n_left + (i + 1) * n_pp
                psi_block = _np.hstack(psi_blocks[i1:i2])
                H_block = _sp.block_diag(H_list[i1:i2], format="csr")

                H_list_prime.append(H_block)
                psi_blocks_prime.append(psi_block)

            H_list = H_list_prime
            psi_blocks = psi_blocks_prime

        H_is_complex = _np.iscomplexobj(
            [_np.float32(1.0).astype(H.dtype) for H in H_list])

        if H_list:
            P = _sp.hstack(P, format="csr")
            if iterate:
                return _block_expm_iter(psi_blocks, H_list, P, start, stop,
                                        num, endpoint, n_jobs)
            else:
                ver = [int(v) for v in _scipy.__version__.split(".")]
                if H_is_complex and (start, stop, num, endpoint) != (
                        None, None, None, None) and ver[1] < 19:
                    mats = _block_expm_iter(psi_blocks, H_list, P, start, stop,
                                            num, endpoint, n_jobs)
                    return _np.array([mat for mat in mats]).T
                else:
                    psi_t = _Parallel(n_jobs=n_jobs)(
                        _delayed(_expm_multiply)(H,
                                                 psi,
                                                 start=start,
                                                 stop=stop,
                                                 num=num,
                                                 endpoint=endpoint)
                        for psi, H in _izip(psi_blocks, H_list))
                    psi_t = _np.hstack(psi_t).T
                    psi_t = P.dot(psi_t)
                    return psi_t
        else:
            raise RuntimeError(
                "initial state has no projection on to specified blocks.")
예제 #4
0
    def evolve(self,
               psi_0,
               t0,
               times,
               iterate=False,
               n_jobs=1,
               block_diag=False,
               stack_state=False,
               imag_time=False,
               solver_name="dop853",
               **solver_args):
        """Creates symmetry blocks of the Hamiltonian and then uses them to run `hamiltonian.evolve()` in parallel.
		
		**Arguments NOT described below can be found in the documentation for the `hamiltonian.evolve()` method.**

		Examples
		--------

		The example below builds on the code snippet shown in the description of the `block_ops` class.

		.. literalinclude:: ../../doc_examples/block_ops-example.py
			:linenos:
			:language: python
			:lines: 69-

		Parameters
		-----------
		psi_0 : numpy.ndarray, list, tuple
			Quantum state which defined on the full Hilbert space of the problem. 
			Does not need to obey and sort of symmetry.
		t0 : float
			Inistial time to start the evolution at.
		times : numpy.ndarray, list
			Contains the times to compute the solution at. Must be some an iterable object.
		iterate : bool, optional
			Flag to return generator when set to `True`. Otherwise the output is an array of states. 
			Default is 'False'.
		n_jobs : int, optional 
			Number of processes requested for the computation time evolution dynamics. 

			NOTE: one of those processes is used to gather results. For best performance, all blocks 
			should be approximately the same size and `n_jobs-1` must be a common devisor of the number of
			blocks, such that there is roughly an equal workload for each process. Otherwise the computation 
			will be as slow as the slowest process.
		block_diag : bool, optional 
			When set to `True`, this flag puts the Hamiltonian matrices for the separate symemtry blocks
			into a list and then loops over it to do time evolution. When set to `False`, it puts all
			blocks in a single giant sparse block diagonal matrix. Default is `False`.

			This flag is useful if there are a lot of smaller-sized blocks.

		Returns
		--------
		obj
			if `iterate = True`, returns generator which generates the time dependent state in the 
			full H-space basis.

			if `iterate = False`, returns `numpy.ndarray` which has the time-dependent states in the 
			full H-space basis in the rows.
		
		Raises
		------
		ValueError
			Variable `imag_time=True` option on `hamiltonian.evolve()` method not supported.
		ValueError
			`iterate=True` requires `times` to be an array or a list.
		RuntimeError
			Terminates when initial state has no projection onto the specified symmetry blocks.

		"""

        if imag_time:
            raise ValueError(
                "imaginary time not supported for block evolution.")
        P = []
        H_list = []
        psi_blocks = []
        for key, b in _iteritems(self._basis_dict):
            p = self._get_P(key)

            if _sp.issparse(psi_0):
                psi = p.H.dot(psi_0).toarray()
            else:
                psi = p.H.dot(psi_0)

            psi = _np.asarray(psi).ravel()

            if _np.linalg.norm(psi) > 1000 * _np.finfo(self.dtype).eps:
                psi_blocks.append(psi)
                P.append(p.tocoo())
                H_list.append(self._get_H(key))

        if block_diag and H_list:
            N_H = len(H_list)
            n_pp = N_H // n_jobs
            n_left = n_pp + N_H % n_jobs

            H_list_prime = []
            psi_blocks_prime = []
            if n_left != 0:
                H_list_prime.append(
                    block_diag_hamiltonian(H_list[:n_left],
                                           None,
                                           None,
                                           None,
                                           None,
                                           self._dtype,
                                           get_proj=False,
                                           **self._no_checks))
                psi_list_prime.append(_np.hstack(psi_blocks[:n_left]))

            for i in range(n_jobs - 1):
                i1 = n_left + i * n_pp
                i2 = n_left + (i + 1) * n_pp
                H_list_prime.append(
                    block_diag_hamiltonian(H_list[i1:i2],
                                           None,
                                           None,
                                           None,
                                           None,
                                           self._dtype,
                                           get_proj=False,
                                           **self._no_checks))
                psi_list_prime.append(_np.hstack(psi_blocks[i1:i2]))

            H_list = H_list_prime
            psi_blocks = psi_blocks_prime

        if len(H_list) > 0:
            P = _sp.hstack(P, format="csr")

            if iterate:
                if _np.isscalar(times):
                    raise ValueError(
                        "If iterate=True times must be a list/array.")
                return _block_evolve_iter(psi_blocks, H_list, P, t0, times,
                                          stack_state, imag_time, solver_name,
                                          solver_args, n_jobs)
            else:
                psi_t = _Parallel(n_jobs=n_jobs)(
                    _delayed(_block_evolve_helper)(
                        H, psi, t0, times, stack_state, imag_time, solver_name,
                        solver_args) for psi, H in _izip(psi_blocks, H_list))
                psi_t = _np.vstack(psi_t)
                psi_t = P.dot(psi_t)
                return psi_t
        else:
            raise RuntimeError(
                "initial state has no projection on to specified blocks.")