def run( self, output_prefix, n_iter, obs=None, save_params_every=50, write_every=50, step_size=1, show_progress=True, ): """ Executes thetime evolution for `n_iter` steps of `dt` and writing values of the observables `obs` to the output. The output are JSON file at `output_prefix.{log,state}`, overwriting files with the same prefix. Args: :output_prefix: The prefix at which JSON output should be stored. :n_iter: the total number of iterations :obs: An iterable containing all observables that should be computed :save_params_every: Every how many steps the parameters of the network should be serialized to disk (ignored if logger is provided) :write_every: Every how many steps the json data should be flushed to disk (ignored if logger is provided) :step_size: Every how many steps should observables be logged to disk (default=1) :show_progress: If true displays a progress bar (default=True) """ if obs is None: obs = {} logger = _JsonLog(output_prefix, save_params_every, write_every) # Don't log on non-root nodes if self._mpi_nodes != 0: logger = None with tqdm( self.iter(n_iter, step_size), total=n_iter, disable=not show_progress ) as itr: for step in itr: # if the cost-function is defined then report it in the progress bar energy = self.estimate(self._h(self.t)) itr.set_postfix_str( "t={:.2e}, Energy={:.6e}".format(self.t, energy.mean.real) ) obs_data = self.estimate(obs) obs_data["Energy"] = energy log_data = {} if self._loss_stats is not None: obs_data[self._loss_name] = self._loss_stats log_data["Time"] = self.t if logger is not None: logger(step, log_data, _MockMachine(self.state)) # flush at the end of the evolution so that final values are saved to # file if logger is not None: logger.flush(_MockMachine(self.state))
def run( self, output_prefix, n_iter, obs=None, save_params_every=50, write_every=50, step_size=1, show_progress=True, ): """ Executes the Monte Carlo Variational optimization, updating the weights of the network stored in this driver for `n_iter` steps and dumping values of the observables `obs` in the output. The output is a json file at `output_prefix`, overwriting files with the same prefix. Args: :output_prefix: The prefix at which json output should be stored (ignored if logger is provided). :n_iter: the total number of iterations :obs: An iterable containing all observables that should be computed :save_params_every: Every how many steps the parameters of the network should be serialized to disk (ignored if logger is provided) :write_every: Every how many steps the json data should be flushed to disk (ignored if logger is provided) :step_size: Every how many steps should observables be logged to disk (default=1) :show_progress: If true displays a progress bar (default=True) """ if obs is None: # TODO remove the first case after deprecation of self._obs in 3.0 if len(self._obs) != 0: obs = self._obs else: obs = {} logger = _JsonLog(output_prefix, save_params_every, write_every) # Don't log on non-root nodes if self._mynode != 0: logger = None with tqdm( self.iter(n_iter, step_size), total=n_iter, disable=not show_progress ) as itr: for step in itr: # if the cost-function is defined then report it in the progress bar if self._loss_stats is not None: itr.set_postfix_str(self._loss_name + "=" + str(self._loss_stats)) obs_data = self.estimate(obs) if self._loss_stats is not None: obs_data[self._loss_name] = self._loss_stats log_data = tree_map(_obs_stat_to_dict, obs_data) if logger is not None: logger(step, log_data, self.machine) # flush at the end of the evolution so that final values are saved to # file if logger is not None: logger.flush(self.machine)
def run( self, n_iter, out=None, obs=None, show_progress=True, save_params_every=50, # for default logger write_every=50, # for default logger step_size=1, # for default logger callback=lambda *x: True, ): """ Executes the Monte Carlo Variational optimization, updating the weights of the network stored in this driver for `n_iter` steps and dumping values of the observables `obs` in the output `logger`. If no logger is specified, creates a json file at `out`, overwriting files with the same prefix. Args: :n_iter: the total number of iterations :out: A logger object, or an iterable of loggers, to be used to store simulation log and data. If this argument is a string, it will be used as output prefix for the standard JSON logger. :obs: An iterable containing all observables that should be computed :save_params_every: Every how many steps the parameters of the network should be serialized to disk (ignored if logger is provided) :write_every: Every how many steps the json data should be flushed to disk (ignored if logger is provided) :step_size: Every how many steps should observables be logged to disk (default=1) :show_progress: If true displays a progress bar (default=True) :callback: Callable or list of callable callback functions to stop training given a condition """ if not isinstance(n_iter, numbers.Number): raise ValueError( "n_iter, the first positional argument to `run`, must be a number!" ) if obs is None: obs = {} if out is None: out = tuple() print( "No output specified (out=[apath|nk.logging.JsonLogger(...)])." "Running the optimization but not saving the output." ) # Log only non-root nodes if self._mynode == 0: # if out is a path, create an overwriting Json Log for output if isinstance(out, str): loggers = (_JsonLog(out, "w", save_params_every, write_every),) else: loggers = _to_iterable(out) else: loggers = tuple() show_progress = False callbacks = _to_iterable(callback) callback_stop = False with tqdm( self.iter(n_iter, step_size), total=n_iter, disable=not show_progress ) as itr: for step in itr: log_data = self.estimate(obs) # if the cost-function is defined then report it in the progress bar if self._loss_stats is not None: itr.set_postfix_str(self._loss_name + "=" + str(self._loss_stats)) log_data[self._loss_name] = self._loss_stats for logger in loggers: logger(self.step_count, log_data, self.machine) for callback in callbacks: if not callback(step, log_data, self): callback_stop = True if callback_stop: break # flush at the end of the evolution so that final values are saved to # file for logger in loggers: logger.flush(self.machine)
def run( self, n_iter, out=None, obs=None, show_progress=True, save_params_every=50, # for default logger write_every=50, # for default logger step_size=1, # for default logger output_prefix=None, # TODO: deprecated ): """ Executes the Monte Carlo Variational optimization, updating the weights of the network stored in this driver for `n_iter` steps and dumping values of the observables `obs` in the output `logger`. If no logger is specified, creates a json file at `output_prefix`, overwriting files with the same prefix. !! Compatibility v2.1 Before v2.1 the order of the first two arguments, `n_iter` and `output_prefix` was reversed. The reversed ordering will still be supported until v3.0, but is deprecated. Args: :n_iter: the total number of iterations :out: A logger object to be used to store simulation log and data. If this argument is a string, it will be used as output prefix for the standard JSON logger. :obs: An iterable containing all observables that should be computed :save_params_every: Every how many steps the parameters of the network should be serialized to disk (ignored if logger is provided) :write_every: Every how many steps the json data should be flushed to disk (ignored if logger is provided) :step_size: Every how many steps should observables be logged to disk (default=1) :show_progress: If true displays a progress bar (default=True) :output_prefix: (Deprecated) The prefix at which json output should be stored (ignored if out is provided). """ # TODO Remove this deprecated code in v3.0 # manage deprecated where argument names are not specified, and # prefix is passed as the first positional argument and the number # of iterations as a second argument. if type(n_iter) is str and type(out) is int: n_iter, out = out, n_iter warn_deprecation( "The positional syntax run(output_prefix, n_iter, **args) is deprecated, use run(n_iter, output_prefix, **args) instead." ) if obs is None: # TODO remove the first case after deprecation of self._obs in 3.0 if len(self._obs) != 0: obs = self._obs else: obs = {} # output_prefix is deprecated. out should be used and takes over # error out if both are passed # TODO: remove in v3.0 if out is not None and output_prefix is not None: raise ValueError( "Invalid out and output_prefix arguments. Only one of the two can be passed. Note that output_prefix is deprecated and you should use out." ) elif out is None: warn_deprecation( "The output_prefix argument is deprecated. Use out instead.") out = output_prefix # Log only non-root nodes if self._mynode == 0: # if out is a path, create an overwriting Json Log for output if isinstance(out, str): logger = _JsonLog(out, "w", save_params_every, write_every) else: logger = out else: logger = None with tqdm(self.iter(n_iter, step_size), total=n_iter, disable=not show_progress) as itr: for step in itr: # if the cost-function is defined then report it in the progress bar if self._loss_stats is not None: itr.set_postfix_str(self._loss_name + "=" + str(self._loss_stats)) obs_data = self.estimate(obs) if self._loss_stats is not None: obs_data[self._loss_name] = self._loss_stats log_data = tree_map(_obs_stat_to_dict, obs_data) if logger is not None: logger(step, log_data, self.machine) # flush at the end of the evolution so that final values are saved to # file if logger is not None: logger.flush(self.machine)
def run( self, n_iter, out=None, obs=None, show_progress=True, save_params_every=50, # for default logger write_every=50, # for default logger step_size=1, # for default logger ): """ Executes the Monte Carlo Variational optimization, updating the weights of the network stored in this driver for `n_iter` steps and dumping values of the observables `obs` in the output `logger`. If no logger is specified, creates a json file at `out`, overwriting files with the same prefix. !! Compatibility v2.1 Before v2.1 the order of the first two arguments, `n_iter` and `out` was reversed. The reversed ordering will still be supported until v3.0, but is deprecated. Args: :n_iter: the total number of iterations :out: A logger object, or an iterable of loggers, to be used to store simulation log and data. If this argument is a string, it will be used as output prefix for the standard JSON logger. :obs: An iterable containing all observables that should be computed :save_params_every: Every how many steps the parameters of the network should be serialized to disk (ignored if logger is provided) :write_every: Every how many steps the json data should be flushed to disk (ignored if logger is provided) :step_size: Every how many steps should observables be logged to disk (default=1) :show_progress: If true displays a progress bar (default=True) """ # TODO Remove this deprecated code in v3.0 # manage deprecated where argument names are not specified, and # prefix is passed as the first positional argument and the number # of iterations as a second argument. if type(n_iter) is str and type(out) is int: n_iter, out = out, n_iter warn_deprecation( "The positional syntax run(out, n_iter, **args) is deprecated, use run(n_iter, out, **args) instead." ) if obs is None: # TODO remove the first case after deprecation of self._obs in 3.0 if len(self._obs) != 0: obs = self._obs else: obs = {} if out is None: out = tuple() print( "No output specified (out=[apath|nk.logging.JsonLogger(...)])." "Running the optimization but not saving the output.") # Log only non-root nodes if self._mynode == 0: # if out is a path, create an overwriting Json Log for output if isinstance(out, str): loggers = (_JsonLog(out, "w", save_params_every, write_every), ) elif hasattr(out, "__iter__"): loggers = out else: loggers = (out, ) else: loggers = tuple() show_progress = False with tqdm(self.iter(n_iter, step_size), total=n_iter, disable=not show_progress) as itr: for step in itr: log_data = self.estimate(obs) # if the cost-function is defined then report it in the progress bar if self._loss_stats is not None: itr.set_postfix_str(self._loss_name + "=" + str(self._loss_stats)) log_data[self._loss_name] = self._loss_stats for logger in loggers: logger(self.step_count, log_data, self.machine) # flush at the end of the evolution so that final values are saved to # file for logger in loggers: logger.flush(self.machine)