Exemple #1
0
    def _forward_and_backward(self):
        """
        Performs a number of VMC optimization steps.

        Args:
            n_steps (int): Number of steps to perform.
        """

        self._sampler.reset()

        # Burnout phase
        self._sampler.generate_samples(self._n_discard)

        # Generate samples and store them
        self._samples = self._sampler.generate_samples(self._n_samples_node,
                                                       samples=self._samples)

        # Compute the local energy estimator and average Energy
        eloc, self._loss_stats = self._get_mc_stats(self._ham)

        # Center the local energy
        eloc -= _mean(eloc)

        samples_r = self._samples.reshape((-1, self._samples.shape[-1]))
        eloc_r = eloc.reshape(-1, 1)

        # Perform update
        if self._sr:
            # When using the SR (Natural gradient) we need to have the full jacobian
            self._grads, self._jac = self._machine.vector_jacobian_prod(
                samples_r,
                eloc_r / self._n_samples,
                self._grads,
                return_jacobian=True)

            self._grads = tree_map(_sum_inplace, self._grads)

            self._dp = self._sr.compute_update(self._jac, self._grads,
                                               self._dp)

        else:
            # Computing updates using the simple gradient
            self._grads = self._machine.vector_jacobian_prod(
                samples_r, eloc_r / self._n_samples, self._grads)

            self._grads = tree_map(_sum_inplace, self._grads)

            #  if Real pars but complex gradient, take only real part
            # not necessary for SR because sr already does it.
            if not self._machine.has_complex_parameters:
                self._dp = tree_map(lambda x: x.real, self._grads)
            else:
                self._dp = self._grads

        return self._dp
Exemple #2
0
def estimate_expectations(ops,
                          sampler,
                          n_samples,
                          n_discard=None,
                          compute_gradients=False):
    """
    For a sequence of linear operators, computes a statistical estimate of the
    respective expectation values, variances, and optionally gradients of the
    expectation values with respect to the variational parameters.

    The estimate is based on `n_samples` configurations
    obtained from `sampler`.

    Args:
        ops: pytree of linear operators
        sampler: A NetKet sampler
        n_samples: Number of MC samples used to estimate expectation values
        n_discard: Number of MC samples dropped from the start of the
            chain (burn-in). Defaults to `n_samples //10`.
        compute_gradients: Whether to compute the gradients of the
            observables.

    Returns:
        Either `stats` or, if `der_logs` is passed, a tuple of `stats` and `grad`:
            stats: A sequence of Stats object containing mean, variance,
                and MC diagonstics for each operator in `ops`.
            grad: A sequence of gradients of the expectation value of `op`,
                  as ndarray of shape `(psi.n_par,)`, for each `op` in `ops`.
    """

    from netket.operator import local_values as _local_values
    from ._C_netket import stats as nst

    psi = sampler.machine

    if not n_discard:
        n_discard = n_samples // 10

    # Burnout phase
    sampler.generate_samples(n_discard)
    # Generate samples
    samples = sampler.generate_samples(n_samples)

    if compute_gradients:
        der_logs = psi.der_log(samples)

    def estimate(op):
        lvs = _local_values(op, psi, samples)
        stats = nst.statistics(lvs)

        if compute_gradients:
            grad = nst.covariance_sv(lvs, der_logs)
            return stats, grad
        else:
            return stats

    return tree_map(estimate, ops)
    def estimate(self, observables):
        """
        Return MCMC statistics for the expectation value of observables in the
        current state of the driver.

        Args:
            observables: A pytree of operators for which statistics should be computed.

        Returns:
            A pytree of the same structure as the input, containing MCMC statistics
            for the corresponding operators as leaves.
        """
        return tree_map(self._estimate_stats, observables)
Exemple #4
0
def test_tree_map():
    # Structure: name -> (input, expected)
    test_trees = {
        "None": (None, None),
        "leaf": (0, 1),
        "list": ([0, 1], [1, 2]),
        "tuple1": ((0,), (1,)),
        "tuple3": ((0, 1, 0), (1, 2, 1)),
        "list of list": ([[0, 1], [1], [0, 1, 0]], [[1, 2], [2], [1, 2, 1]]),
        "dict": ({"a": 0, "b": 1}, {"a": 1, "b": 2}),
        "mixed list": ([(0, 1), [1], 1, {"a": 0}], [(1, 2), [2], 2, {"a": 1}]),
    }

    for name, (inp, expected) in test_trees.items():
        info = "{}, input={}".format(name, inp)
        assert tree_map(lambda x: x + 1, inp) == expected, info
    def run(
        self,
        output_prefix,
        n_iter,
        obs=None,
        save_params_every=50,
        write_every=50,
        step_size=1,
        show_progress=True,
    ):
        """
        Executes the Monte Carlo Variational optimization, updating the weights of the network
        stored in this driver for `n_iter` steps and dumping values of the observables `obs`
        in the output. The output is a json file at `output_prefix`, overwriting files with
        the same prefix.

        Args:
            :output_prefix: The prefix at which json output should be stored (ignored if logger
              is provided).
            :n_iter: the total number of iterations
            :obs: An iterable containing all observables that should be computed
            :save_params_every: Every how many steps the parameters of the network should be
            serialized to disk (ignored if logger is provided)
            :write_every: Every how many steps the json data should be flushed to disk (ignored if
            logger is provided)
            :step_size: Every how many steps should observables be logged to disk (default=1)
            :show_progress: If true displays a progress bar (default=True)
        """
        if obs is None:
            # TODO remove the first case after deprecation of self._obs in 3.0
            if len(self._obs) != 0:
                obs = self._obs
            else:
                obs = {}

        logger = _JsonLog(output_prefix, save_params_every, write_every)

        # Don't log on non-root nodes
        if self._mynode != 0:
            logger = None

        with tqdm(
            self.iter(n_iter, step_size), total=n_iter, disable=not show_progress
        ) as itr:
            for step in itr:
                # if the cost-function is defined then report it in the progress bar
                if self._loss_stats is not None:
                    itr.set_postfix_str(self._loss_name + "=" + str(self._loss_stats))

                obs_data = self.estimate(obs)

                if self._loss_stats is not None:
                    obs_data[self._loss_name] = self._loss_stats

                log_data = tree_map(_obs_stat_to_dict, obs_data)

                if logger is not None:
                    logger(step, log_data, self.machine)

        # flush at the end of the evolution so that final values are saved to
        # file
        if logger is not None:
            logger.flush(self.machine)
    def run(
            self,
            n_iter,
            out=None,
            obs=None,
            show_progress=True,
            save_params_every=50,  # for default logger
            write_every=50,  # for default logger
            step_size=1,  # for default logger
            output_prefix=None,  # TODO: deprecated
    ):
        """
        Executes the Monte Carlo Variational optimization, updating the weights of the network
        stored in this driver for `n_iter` steps and dumping values of the observables `obs`
        in the output `logger`. If no logger is specified, creates a json file at `output_prefix`,
        overwriting files with the same prefix.

        !! Compatibility v2.1
            Before v2.1 the order of the first two arguments, `n_iter` and `output_prefix` was
            reversed. The reversed ordering will still be supported until v3.0, but is deprecated.

        Args:
            :n_iter: the total number of iterations
            :out: A logger object to be used to store simulation log and data.
                If this argument is a string, it will be used as output prefix for the standard JSON logger.
            :obs: An iterable containing all observables that should be computed
            :save_params_every: Every how many steps the parameters of the network should be
            serialized to disk (ignored if logger is provided)
            :write_every: Every how many steps the json data should be flushed to disk (ignored if
            logger is provided)
            :step_size: Every how many steps should observables be logged to disk (default=1)
            :show_progress: If true displays a progress bar (default=True)
            :output_prefix: (Deprecated) The prefix at which json output should be stored (ignored if out
              is provided).
        """

        # TODO Remove this deprecated code in v3.0
        # manage deprecated where argument names are not specified, and
        # prefix is passed as the first positional argument and the number
        # of iterations as a second argument.
        if type(n_iter) is str and type(out) is int:
            n_iter, out = out, n_iter
            warn_deprecation(
                "The positional syntax run(output_prefix, n_iter, **args) is deprecated, use run(n_iter, output_prefix, **args) instead."
            )

        if obs is None:
            # TODO remove the first case after deprecation of self._obs in 3.0
            if len(self._obs) != 0:
                obs = self._obs
            else:
                obs = {}

        # output_prefix is deprecated. out should be used and takes over
        # error out if both are passed
        # TODO: remove in v3.0
        if out is not None and output_prefix is not None:
            raise ValueError(
                "Invalid out and output_prefix arguments. Only one of the two can be passed. Note that output_prefix is deprecated and you should use out."
            )
        elif out is None:
            warn_deprecation(
                "The output_prefix argument is deprecated. Use out instead.")
            out = output_prefix

        # Log only non-root nodes
        if self._mynode == 0:
            # if out is a path, create an overwriting Json Log for output
            if isinstance(out, str):
                logger = _JsonLog(out, "w", save_params_every, write_every)
            else:
                logger = out
        else:
            logger = None

        with tqdm(self.iter(n_iter, step_size),
                  total=n_iter,
                  disable=not show_progress) as itr:
            for step in itr:
                # if the cost-function is defined then report it in the progress bar
                if self._loss_stats is not None:
                    itr.set_postfix_str(self._loss_name + "=" +
                                        str(self._loss_stats))

                obs_data = self.estimate(obs)

                if self._loss_stats is not None:
                    obs_data[self._loss_name] = self._loss_stats

                log_data = tree_map(_obs_stat_to_dict, obs_data)

                if logger is not None:
                    logger(step, log_data, self.machine)

        # flush at the end of the evolution so that final values are saved to
        # file
        if logger is not None:
            logger.flush(self.machine)
Exemple #7
0
    def _forward_and_backward(self):
        """
        Performs a number of VMC optimization steps.

        Args:
            n_steps (int): Number of steps to perform.
        """

        self._sampler.reset()
        self._obs_samples_valid = False

        # Burnout phase
        self._sampler.generate_samples(self._n_discard)

        # Generate samples and store them
        self._samples = self._sampler.generate_samples(self._n_samples_node,
                                                       samples=self._samples)

        # Estimate C^[loc] (local energy) and LdagL
        self._lloc, self._loss_stats = self._get_mc_superop_stats(self._lind)

        # Flatten chain dimension
        samples_r = self._samples.reshape((-1, self._samples.shape[-1]))
        lloc_r = self._lloc.reshape(-1, 1)

        # Compute Log derivatives
        self._der_logs = self._machine.der_log(samples_r)
        # Compute statistical average (also across nodes)
        self._der_logs_ave = tree_map(_mean, self._der_logs, axis=0)

        # Compute \nabla C^[Loc]
        self._der_loc_vals = _der_local_values(self._lind,
                                               self._machine,
                                               samples_r,
                                               center_derivative=False)

        # apply this function to every element of the gradient.
        n_samples_node = lloc_r.shape[0]

        def gradfun(der_loc_vals, der_logs_ave):
            par_dims = der_loc_vals.ndim - 1

            _lloc_r = lloc_r.reshape((n_samples_node, ) +
                                     tuple(1 for i in range(par_dims)))

            grad = _mean(der_loc_vals.conjugate() * _lloc_r, axis=0) - (
                der_logs_ave.conjugate() * self._loss_stats.mean)
            return grad

        self._grads = trees2_map(gradfun, self._der_loc_vals,
                                 self._der_logs_ave)

        # Perform update
        if self._sr:
            # Center the log derivatives
            self._jac = trees2_map(lambda x, y: x - y, self._der_logs,
                                   self._der_logs_ave)

            self._dp = self._sr.compute_update(self._jac, self._grads,
                                               self._dp)

        else:
            #  if Real pars but complex gradient, take only real part
            # not necessary for SR because sr already does it.
            if not self._machine.has_complex_parameters:
                self._dp = tree_map(lambda x: x.real, self._grads)
            else:
                self._dp = self._grads

        return self._dp