def test_diagnose_divergences(self): exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION) sampler_args = SamplerArgs() cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1], output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=1) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'diagnose-good', 'corr_gauss_depth8-1.csv') ] fit = CmdStanMCMC(runset) # TODO - use cmdstan test files instead expected = '\n'.join([ 'Checking sampler transitions treedepth.', '424 of 1000 (42%) transitions hit the maximum ' 'treedepth limit of 8, or 2^8 leapfrog steps.', 'Trajectories that are prematurely terminated ' 'due to this limit will result in slow exploration.', 'For optimal performance, increase this limit.', ]) self.assertIn(expected, fit.diagnose().replace('\r\n', '\n'))
def test_variables(self): # construct fit using existing sampler output exe = os.path.join(DATAFILES_PATH, 'lotka-volterra' + EXTENSION) jdata = os.path.join(DATAFILES_PATH, 'lotka-volterra.data.json') sampler_args = SamplerArgs(iter_sampling=20) cmdstan_args = CmdStanArgs( model_name='lotka-volterra', model_exe=exe, chain_ids=[1], seed=12345, data=jdata, output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=1) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'lotka-volterra.csv') ] runset._set_retcode(0, 0) fit = CmdStanMCMC(runset) self.assertEqual(20, fit.num_draws) self.assertEqual(8, len(fit._stan_variable_dims)) self.assertTrue('z' in fit._stan_variable_dims) self.assertEqual(fit._stan_variable_dims['z'], (20, 2)) vars = fit.stan_variables() self.assertEqual(len(vars), len(fit._stan_variable_dims)) self.assertTrue('z' in vars) self.assertEqual(vars['z'].shape, (20, 20, 2)) self.assertTrue('theta' in vars) self.assertEqual(vars['theta'].shape, (20, 4))
def test_validate_good_run(self): # construct fit using existing sampler output exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION) jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json') sampler_args = SamplerArgs(iter_sampling=100, max_treedepth=11, adapt_delta=0.95) cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=jdata, output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=4) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'runset-good', 'bern-1.csv'), os.path.join(DATAFILES_PATH, 'runset-good', 'bern-2.csv'), os.path.join(DATAFILES_PATH, 'runset-good', 'bern-3.csv'), os.path.join(DATAFILES_PATH, 'runset-good', 'bern-4.csv'), ] self.assertEqual(4, runset.chains) retcodes = runset._retcodes for i in range(len(retcodes)): runset._set_retcode(i, 0) self.assertTrue(runset._check_retcodes()) fit = CmdStanMCMC(runset) self.assertEqual(100, fit.num_draws) self.assertEqual(len(BERNOULLI_COLS), len(fit.column_names)) self.assertEqual('lp__', fit.column_names[0]) drawset = fit.get_drawset() self.assertEqual( drawset.shape, (fit.runset.chains * fit.num_draws, len(fit.column_names)), ) _ = fit.summary() self.assertTrue(True) # TODO - use cmdstan test files instead expected = '\n'.join([ 'Checking sampler transitions treedepth.', 'Treedepth satisfactory for all transitions.', '\nChecking sampler transitions for divergences.', 'No divergent transitions found.', '\nChecking E-BFMI - sampler transitions HMC potential energy.', 'E-BFMI satisfactory for all transitions.', '\nEffective sample size satisfactory.', ]) self.assertIn(expected, fit.diagnose().replace('\r\n', '\n'))
def test_validate_big_run(self): exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION) sampler_args = SamplerArgs(iter_warmup=1500, iter_sampling=1000) cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2], seed=12345, output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=2) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'runset-big', 'output_icar_nyc-1.csv'), os.path.join(DATAFILES_PATH, 'runset-big', 'output_icar_nyc-1.csv'), ] fit = CmdStanMCMC(runset) phis = ['phi[{}]'.format(str(x + 1)) for x in range(2095)] column_names = SAMPLER_STATE + phis self.assertEqual(fit.num_draws_sampling, 1000) self.assertEqual(fit.column_names, tuple(column_names)) self.assertEqual(fit.metric_type, 'diag_e') self.assertEqual(fit.step_size.shape, (2, )) self.assertEqual(fit.metric.shape, (2, 2095)) self.assertEqual((1000, 2, 2102), fit.draws().shape) phis = fit.draws_pd(params=['phi']) self.assertEqual((2000, 2095), phis.shape) with self.assertRaisesRegex(ValueError, r'unknown parameter: gamma'): fit.draws_pd(params=['gamma'])
def test_validate_big_run(self): exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION) sampler_args = SamplerArgs(iter_warmup=1500, iter_sampling=1000) cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2], seed=12345, output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=2) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'runset-big', 'output_icar_nyc-1.csv'), os.path.join(DATAFILES_PATH, 'runset-big', 'output_icar_nyc-1.csv'), ] fit = CmdStanMCMC(runset) phis = ['phi.{}'.format(str(x + 1)) for x in range(2095)] column_names = SAMPLER_STATE + phis self.assertEqual(fit.num_draws, 1000) self.assertEqual(fit.column_names, tuple(column_names)) self.assertEqual(fit.metric_type, 'diag_e') self.assertEqual(fit.stepsize.shape, (2, )) self.assertEqual(fit.metric.shape, (2, 2095)) self.assertEqual((1000, 2, 2102), fit.sample.shape) phis = fit.get_drawset(params=['phi']) self.assertEqual((2000, 2095), phis.shape) phi1 = fit.get_drawset(params=['phi.1']) self.assertEqual((2000, 1), phi1.shape) mo_phis = fit.get_drawset(params=['phi.1', 'phi.10', 'phi.100']) self.assertEqual((2000, 3), mo_phis.shape) phi2095 = fit.get_drawset(params=['phi.2095']) self.assertEqual((2000, 1), phi2095.shape) with self.assertRaises(Exception): fit.get_drawset(params=['phi.2096']) with self.assertRaises(Exception): fit.get_drawset(params=['ph'])
def test_validate_summary_sig_figs(self): # construct CmdStanMCMC from logistic model output, config exe = os.path.join(DATAFILES_PATH, 'logistic' + EXTENSION) rdata = os.path.join(DATAFILES_PATH, 'logistic.data.R') sampler_args = SamplerArgs(iter_sampling=100) cmdstan_args = CmdStanArgs( model_name='logistic', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=rdata, output_dir=DATAFILES_PATH, sig_figs=17, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'logistic_output_1.csv'), os.path.join(DATAFILES_PATH, 'logistic_output_2.csv'), os.path.join(DATAFILES_PATH, 'logistic_output_3.csv'), os.path.join(DATAFILES_PATH, 'logistic_output_4.csv'), ] retcodes = runset._retcodes for i in range(len(retcodes)): runset._set_retcode(i, 0) fit = CmdStanMCMC(runset) sum_default = fit.summary() beta1_default = format(sum_default.iloc[1, 0], '.18g') self.assertTrue(beta1_default.startswith('1.3')) if cmdstan_version_at(2, 25): sum_17 = fit.summary(sig_figs=17) beta1_17 = format(sum_17.iloc[1, 0], '.18g') self.assertTrue(beta1_17.startswith('1.345767078273')) sum_10 = fit.summary(sig_figs=10) beta1_10 = format(sum_10.iloc[1, 0], '.18g') self.assertTrue(beta1_10.startswith('1.34576707')) with self.assertRaises(ValueError): fit.summary(sig_figs=20) with self.assertRaises(ValueError): fit.summary(sig_figs=-1)
def test_variables_3d(self): # construct fit using existing sampler output exe = os.path.join(DATAFILES_PATH, 'multidim_vars' + EXTENSION) jdata = os.path.join(DATAFILES_PATH, 'logistic.data.R') sampler_args = SamplerArgs(iter_sampling=20) cmdstan_args = CmdStanArgs( model_name='multidim_vars', model_exe=exe, chain_ids=[1], seed=12345, data=jdata, output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=1) runset._csv_files = [os.path.join(DATAFILES_PATH, 'multidim_vars.csv')] runset._set_retcode(0, 0) fit = CmdStanMCMC(runset) self.assertEqual(20, fit.num_draws_sampling) self.assertEqual(3, len(fit.stan_vars_dims)) self.assertTrue('y_rep' in fit.stan_vars_dims) self.assertEqual(fit.stan_vars_dims['y_rep'], (5, 4, 3)) var_y_rep = fit.stan_variable(name='y_rep') self.assertEqual(var_y_rep.shape, (20, 5, 4, 3)) var_beta = fit.stan_variable(name='beta') self.assertEqual(var_beta.shape, (20, 2)) var_frac_60 = fit.stan_variable(name='frac_60') self.assertEqual(var_frac_60.shape, (20, )) vars = fit.stan_variables() self.assertEqual(len(vars), len(fit.stan_vars_dims)) self.assertTrue('y_rep' in vars) self.assertEqual(vars['y_rep'].shape, (20, 5, 4, 3)) self.assertTrue('beta' in vars) self.assertEqual(vars['beta'].shape, (20, 2)) self.assertTrue('frac_60' in vars) self.assertEqual(vars['frac_60'].shape, (20, ))
def test_validate_good_run(self): # construct fit using existing sampler output exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION) jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json') sampler_args = SamplerArgs(iter_sampling=100, max_treedepth=11, adapt_delta=0.95) cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=jdata, output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'runset-good', 'bern-1.csv'), os.path.join(DATAFILES_PATH, 'runset-good', 'bern-2.csv'), os.path.join(DATAFILES_PATH, 'runset-good', 'bern-3.csv'), os.path.join(DATAFILES_PATH, 'runset-good', 'bern-4.csv'), ] self.assertEqual(4, runset.chains) retcodes = runset._retcodes for i in range(len(retcodes)): runset._set_retcode(i, 0) self.assertTrue(runset._check_retcodes()) fit = CmdStanMCMC(runset) self.assertEqual(100, fit.num_draws) self.assertEqual(len(BERNOULLI_COLS), len(fit.column_names)) self.assertEqual('lp__', fit.column_names[0]) drawset = fit.get_drawset() self.assertEqual( drawset.shape, (fit.runset.chains * fit.num_draws, len(fit.column_names)), ) summary = fit.summary() self.assertIn('5%', list(summary.columns)) self.assertIn('50%', list(summary.columns)) self.assertIn('95%', list(summary.columns)) self.assertNotIn('1%', list(summary.columns)) self.assertNotIn('99%', list(summary.columns)) summary = fit.summary(percentiles=[1, 45, 99]) self.assertIn('1%', list(summary.columns)) self.assertIn('45%', list(summary.columns)) self.assertIn('99%', list(summary.columns)) self.assertNotIn('5%', list(summary.columns)) self.assertNotIn('50%', list(summary.columns)) self.assertNotIn('95%', list(summary.columns)) with self.assertRaises(ValueError): fit.summary(percentiles=[]) with self.assertRaises(ValueError): fit.summary(percentiles=[-1]) diagnostics = fit.diagnose() self.assertIn('Treedepth satisfactory for all transitions.', diagnostics) self.assertIn('No divergent transitions found.', diagnostics) self.assertIn('E-BFMI satisfactory for all transitions.', diagnostics) self.assertIn('Effective sample size satisfactory.', diagnostics)
def generate_quantities( self, data: Union[Dict, str] = None, mcmc_sample: Union[CmdStanMCMC, List[str]] = None, seed: int = None, gq_output_dir: str = None, ) -> CmdStanGQ: """ Run CmdStan's generate_quantities method which runs the generated quantities block of a model given an existing sample. This function takes a CmdStanMCMC object and the dataset used to generate that sample and calls to the CmdStan ``generate_quantities`` method to generate additional quantities of interest. The ``CmdStanGQ`` object records the command, the return code, and the paths to the generate method output csv and console files. The output files are written either to a specified output directory or to a temporary directory which is deleted upon session exit. Output files are either written to a temporary directory or to the specified output directory. Output filenames correspond to the template '<model_name>-<YYYYMMDDHHMM>-<chain_id>' plus the file suffix which is either '.csv' for the CmdStan output or '.txt' for the console messages, e.g. 'bernoulli-201912081451-1.csv'. Output files written to the temporary directory contain an additional 8-character random string, e.g. 'bernoulli-201912081451-1-5nm6as7u.csv'. :param data: Values for all data variables in the model, specified either as a dictionary with entries matching the data variables, or as the path of a data file in JSON or Rdump format. :param mcmc_sample: Can be either a ``CmdStanMCMC`` object returned by the ``sample`` method or a list of stan-csv files generated by fitting the model to the data using any Stan interface. :param seed: The seed for random number generator. Must be an integer between 0 and 2^32 - 1. If unspecified, ``numpy.random.RandomState()`` is used to generate a seed which will be used for all chains. *NOTE: Specifying the seed will guarantee the same result for multiple invocations of this method with the same inputs. However this will not reproduce results from the sample method given the same inputs because the RNG will be in a different state.* :param gq_output_dir: Name of the directory in which the CmdStan output files are saved. If unspecified, files will be written to a temporary directory which is deleted upon session exit. :return: CmdStanGQ object """ sample_csv_files = [] sample_drawset = None chains = 0 if isinstance(mcmc_sample, CmdStanMCMC): sample_csv_files = mcmc_sample.runset.csv_files sample_drawset = mcmc_sample.draws_pd() chains = mcmc_sample.chains chain_ids = mcmc_sample.chain_ids elif isinstance(mcmc_sample, list): if len(mcmc_sample) < 1: raise ValueError('MCMC sample cannot be empty list') sample_csv_files = mcmc_sample chains = len(sample_csv_files) chain_ids = [x + 1 for x in range(chains)] else: raise ValueError('MCMC sample must be either CmdStanMCMC object' ' or list of paths to sample csv_files.') try: if sample_drawset is None: # assemble sample from csv files config = {} # scan 1st csv file to get config try: config = scan_sampler_csv(sample_csv_files[0]) except ValueError: config = scan_sampler_csv(sample_csv_files[0], True) conf_iter_sampling = None if 'num_samples' in config: conf_iter_sampling = int(config['num_samples']) conf_iter_warmup = None if 'num_warmup' in config: conf_iter_warmup = int(config['num_warmup']) conf_thin = None if 'thin' in config: conf_thin = int(config['thin']) sampler_args = SamplerArgs( iter_sampling=conf_iter_sampling, iter_warmup=conf_iter_warmup, thin=conf_thin, ) args = CmdStanArgs( self._name, self._exe_file, chain_ids=chain_ids, method_args=sampler_args, ) runset = RunSet(args=args, chains=chains, chain_ids=chain_ids) runset._csv_files = sample_csv_files sample_fit = CmdStanMCMC(runset) sample_drawset = sample_fit.draws_pd() except ValueError as exc: raise ValueError('Invalid mcmc_sample, error:\n\t{}\n\t' ' while processing files\n\t{}'.format( repr(exc), '\n\t'.join(sample_csv_files))) from exc generate_quantities_args = GenerateQuantitiesArgs( csv_files=sample_csv_files) generate_quantities_args.validate(chains) with MaybeDictToFilePath(data, None) as (_data, _inits): args = CmdStanArgs( self._name, self._exe_file, chain_ids=chain_ids, data=_data, seed=seed, output_dir=gq_output_dir, method_args=generate_quantities_args, ) runset = RunSet(args=args, chains=chains, chain_ids=chain_ids) parallel_chains_avail = cpu_count() parallel_chains = max(min(parallel_chains_avail - 2, chains), 1) with ThreadPoolExecutor(max_workers=parallel_chains) as executor: for i in range(chains): executor.submit(self._run_cmdstan, runset, i) if not runset._check_retcodes(): msg = 'Error during generate_quantities.\n{}'.format( runset.get_err_msgs()) raise RuntimeError(msg) quantities = CmdStanGQ(runset=runset, mcmc_sample=sample_drawset) return quantities
def sample( self, data: Union[Dict, str] = None, chains: Union[int, None] = None, parallel_chains: Union[int, None] = None, threads_per_chain: Union[int, None] = None, seed: Union[int, List[int]] = None, chain_ids: Union[int, List[int]] = None, inits: Union[Dict, float, str, List[str]] = None, iter_warmup: int = None, iter_sampling: int = None, save_warmup: bool = False, thin: int = None, max_treedepth: float = None, metric: Union[str, List[str]] = None, step_size: Union[float, List[float]] = None, adapt_engaged: bool = True, adapt_delta: float = None, adapt_init_phase: int = None, adapt_metric_window: int = None, adapt_step_size: int = None, fixed_param: bool = False, output_dir: str = None, save_diagnostics: bool = False, show_progress: Union[bool, str] = False, validate_csv: bool = True, ) -> CmdStanMCMC: """ Run or more chains of the NUTS sampler to produce a set of draws from the posterior distribution of a model conditioned on some data. This function validates the specified configuration, composes a call to the CmdStan ``sample`` method and spawns one subprocess per chain to run the sampler and waits for all chains to run to completion. Unspecified arguments are not included in the call to CmdStan, i.e., those arguments will have CmdStan default values. For each chain, the ``CmdStanMCMC`` object records the command, the return code, the sampler output file paths, and the corresponding console outputs, if any. The output files are written either to a specified output directory or to a temporary directory which is deleted upon session exit. Output files are either written to a temporary directory or to the specified output directory. Ouput filenames correspond to the template '<model_name>-<YYYYMMDDHHMM>-<chain_id>' plus the file suffix which is either '.csv' for the CmdStan output or '.txt' for the console messages, e.g. 'bernoulli-201912081451-1.csv'. Output files written to the temporary directory contain an additional 8-character random string, e.g. 'bernoulli-201912081451-1-5nm6as7u.csv'. :param data: Values for all data variables in the model, specified either as a dictionary with entries matching the data variables, or as the path of a data file in JSON or Rdump format. :param chains: Number of sampler chains, must be a positive integer. :param parallel_chains: Number of processes to run in parallel. Must be a positive integer. Defaults to ``multiprocessing.cpu_count()``. :param threads_per_chain: The number of threads to use in parallelized sections within an MCMC chain (e.g., when using the Stan functions ``reduce_sum()`` or ``map_rect()``). This will only have an effect if the model was compiled with threading support. The total number of threads used will be ``parallel_chains * threads_per_chain``. :param seed: The seed for random number generator. Must be an integer between 0 and 2^32 - 1. If unspecified, ``numpy.random.RandomState()`` is used to generate a seed which will be used for all chains. When the same seed is used across all chains, the chain-id is used to advance the RNG to avoid dependent samples. :param chain_ids: The offset for the random number generator, either an integer or a list of unique per-chain offsets. If unspecified, chain ids are numbered sequentially starting from 1. :param inits: Specifies how the sampler initializes parameter values. Initialization is either uniform random on a range centered on 0, exactly 0, or a dictionary or file of initial values for some or all parameters in the model. The default initialization behavior will initialize all parameter values on range [-2, 2] on the *unconstrained* support. If the expected parameter values are too far from this range, this option may improve adaptation. The following value types are allowed: * Single number n > 0 - initialization range is [-n, n]. * 0 - all parameters are initialized to 0. * dictionary - pairs parameter name : initial value. * string - pathname to a JSON or Rdump data file. * list of strings - per-chain pathname to data file. :param iter_warmup: Number of warmup iterations for each chain. :param iter_sampling: Number of draws from the posterior for each chain. :param save_warmup: When ``True``, sampler saves warmup draws as part of the Stan csv output file. :param thin: Period between saved samples. :param max_treedepth: Maximum depth of trees evaluated by NUTS sampler per iteration. :param metric: Specification of the mass matrix, either as a vector consisting of the diagonal elements of the covariance matrix ('diag' or 'diag_e') or the full covariance matrix ('dense' or 'dense_e'). If the value of the metric argument is a string other than 'diag', 'diag_e', 'dense', or 'dense_e', it must be a valid filepath to a JSON or Rdump file which contains an entry 'inv_metric' whose value is either the diagonal vector or the full covariance matrix. If the value of the metric argument is a list of paths, its length must match the number of chains and all paths must be unique. :param step_size: Initial stepsize for HMC sampler. The value is either a single number or a list of numbers which will be used as the global or per-chain initial step size, respectively. The length of the list of step sizes must match the number of chains. :param adapt_engaged: When True, adapt stepsize and metric. :param adapt_delta: Adaptation target Metropolis acceptance rate. The default value is 0.8. Increasing this value, which must be strictly less than 1, causes adaptation to use smaller step sizes which improves the effective sample size, but may increase the time per iteration. :param adapt_init_phase: Iterations for initial phase of adaptation during which step size is adjusted so that the chain converges towards the typical set. :param adapt_metric_window: The second phase of adaptation tunes the metric and stepsize in a series of intervals. This parameter specifies the number of iterations used for the first tuning interval; window size increases for each subsequent interval. :param adapt_step_size: Number of iterations given over to adjusting the step size given the tuned metric during the final phase of adaptation. :param fixed_param: When ``True``, call CmdStan with argument ``algorithm=fixed_param`` which runs the sampler without updating the Markov Chain, thus the values of all parameters and transformed parameters are constant across all draws and only those values in the generated quantities block that are produced by RNG functions may change. This provides a way to use Stan programs to generate simulated data via the generated quantities block. This option must be used when the parameters block is empty. Default value is ``False``. :param output_dir: Name of the directory to which CmdStan output files are written. If unspecified, output files will be written to a temporary directory which is deleted upon session exit. :param save_diagnostics: Whether or not to save diagnostics. If True, csv output files are written to an output file with filename template '<model_name>-<YYYYMMDDHHMM>-diagnostic-<chain_id>', e.g. 'bernoulli-201912081451-diagnostic-1.csv'. :param show_progress: Use tqdm progress bar to show sampling progress. If show_progress=='notebook' use tqdm_notebook (needs nodejs for jupyter). :param validate_csv: If ``False``, skip scan of sample csv output file. When sample is large or disk i/o is slow, will speed up processing. Default is ``True`` - sample csv files are scanned for completeness and consistency. :return: CmdStanMCMC object """ if chains is None: if fixed_param: chains = 1 else: chains = 4 if chains < 1: raise ValueError( 'Chains must be a positive integer value, found {}.'.format( chains)) if chain_ids is None: chain_ids = [x + 1 for x in range(chains)] else: if isinstance(chain_ids, int): if chain_ids < 1: raise ValueError( 'Chain_id must be a positive integer value,' ' found {}.'.format(chain_ids)) chain_ids = [chain_ids + i for i in range(chains)] else: if not len(chain_ids) == chains: raise ValueError( 'Chain_ids must correspond to number of chains' ' specified {} chains, found {} chain_ids.'.format( chains, len(chain_ids))) for chain_id in chain_ids: if chain_id < 0: raise ValueError( 'Chain_id must be a non-negative integer value,' ' found {}.'.format(chain_id)) if parallel_chains is None: parallel_chains = max(min(cpu_count(), chains), 1) elif parallel_chains > chains: self._logger.info( 'Requesting %u parallel_chains for %u chains,' ' running all chains in parallel.', parallel_chains, chains, ) parallel_chains = chains elif parallel_chains < 1: raise ValueError( 'Argument parallel_chains must be a positive integer value, ' 'found {}.'.format(parallel_chains)) if threads_per_chain is None: threads_per_chain = 1 if threads_per_chain < 1: raise ValueError( 'Argument threads_per_chain must be a positive integer value, ' 'found {}.'.format(threads_per_chain)) self._logger.debug('total threads: %u', parallel_chains * threads_per_chain) os.environ['STAN_NUM_THREADS'] = str(threads_per_chain) refresh = None if show_progress: try: import tqdm self._logger.propagate = False except ImportError: self._logger.warning( ('Package tqdm not installed, cannot show progress ' 'information. Please install tqdm with ' "'pip install tqdm'")) show_progress = False # TODO: issue 49: inits can be initialization function sampler_args = SamplerArgs( iter_warmup=iter_warmup, iter_sampling=iter_sampling, save_warmup=save_warmup, thin=thin, max_treedepth=max_treedepth, metric=metric, step_size=step_size, adapt_engaged=adapt_engaged, adapt_delta=adapt_delta, adapt_init_phase=adapt_init_phase, adapt_metric_window=adapt_metric_window, adapt_step_size=adapt_step_size, fixed_param=fixed_param, ) with MaybeDictToFilePath(data, inits) as (_data, _inits): args = CmdStanArgs( self._name, self._exe_file, chain_ids=chain_ids, data=_data, seed=seed, inits=_inits, output_dir=output_dir, save_diagnostics=save_diagnostics, method_args=sampler_args, refresh=refresh, logger=self._logger, ) runset = RunSet(args=args, chains=chains, chain_ids=chain_ids) pbar = None all_pbars = [] with ThreadPoolExecutor(max_workers=parallel_chains) as executor: for i in range(chains): if show_progress: if (isinstance(show_progress, str) and show_progress.lower() == 'notebook'): try: tqdm_pbar = tqdm.tqdm_notebook except ImportError: msg = ( 'Cannot import tqdm.tqdm_notebook.\n' 'Functionality is only supported on the ' 'Jupyter Notebook and compatible platforms' '.\nPlease follow the instructions in ' 'https://github.com/tqdm/tqdm/issues/394#' 'issuecomment-384743637 and remember to ' 'stop & start your jupyter server.') self._logger.warning(msg) tqdm_pbar = tqdm.tqdm else: tqdm_pbar = tqdm.tqdm # enable dynamic_ncols for advanced users # currently hidden feature dynamic_ncols = os.environ.get('TQDM_DYNAMIC_NCOLS', 'False') if dynamic_ncols.lower() in ['0', 'false']: dynamic_ncols = False else: dynamic_ncols = True pbar = tqdm_pbar( desc='Chain {} - warmup'.format(i + 1), position=i, total=1, # Will set total from Stan's output dynamic_ncols=dynamic_ncols, ) all_pbars.append(pbar) executor.submit(self._run_cmdstan, runset, i, pbar) # Closing all progress bars for pbar in all_pbars: pbar.close() if show_progress: # re-enable logger for console self._logger.propagate = True if not runset._check_retcodes(): msg = 'Error during sampling.\n{}'.format( runset.get_err_msgs()) raise RuntimeError(msg) mcmc = CmdStanMCMC(runset, validate_csv, logger=self._logger) return mcmc
def sample( self, data: Union[Dict, str] = None, chains: Union[int, None] = None, cores: Union[int, None] = None, seed: Union[int, List[int]] = None, chain_ids: Union[int, List[int]] = None, inits: Union[Dict, float, str, List[str]] = None, warmup_iters: int = None, sampling_iters: int = None, save_warmup: bool = False, thin: int = None, max_treedepth: float = None, metric: Union[str, List[str]] = None, step_size: Union[float, List[float]] = None, adapt_engaged: bool = True, adapt_delta: float = None, fixed_param: bool = False, output_dir: str = None, save_diagnostics: bool = False, show_progress: Union[bool, str] = False ) -> CmdStanMCMC: """ Run or more chains of the NUTS sampler to produce a set of draws from the posterior distribution of a model conditioned on some data. This function validates the specified configuration, composes a call to the CmdStan ``sample`` method and spawns one subprocess per chain to run the sampler and waits for all chains to run to completion. Unspecified arguments are not included in the call to CmdStan, i.e., those arguments will have CmdStan default values. For each chain, the ``CmdStanMCMC`` object records the command, the return code, the sampler output file paths, and the corresponding console outputs, if any. The output files are written either to a specified output directory or to a temporary directory which is deleted upon session exit. The output filenames are composed of the model name, a timestamp in the form YYYYMMDDhhmm and the chain id, plus the corresponding filetype suffix, either '.csv' for the CmdStan output or '.txt' for the console messages, e.g. `bernoulli-201912081451-1.csv`. Output files written to the temporary directory contain an additional 8-character random string, e.g. `bernoulli-201912081451-1-5nm6as7u.csv`. :param data: Values for all data variables in the model, specified either as a dictionary with entries matching the data variables, or as the path of a data file in JSON or Rdump format. :param chains: Number of sampler chains, should be > 1. :param cores: Number of processes to run in parallel. Must be an integer between 1 and the number of CPUs in the system. If none then set automatically to `chains` but no more than `total_cpu_count - 2` :param seed: The seed for random number generator. Must be an integer between ``0`` and ``2^32 - 1``. If unspecified, ``numpy.random.RandomState()`` is used to generate a seed which will be used for all chains. When the same seed is used across all chains, the chain-id is used to advance the RNG to avoid dependent samples. :param chain_ids: The offset for the random number generator, either an integer or a list of unique per-chain offsets. If unspecified, chain ids are numbered sequentially starting from 1. :param inits: Specifies how the sampler initializes parameter values. Initialization is either uniform random on a range centered on 0, exactly 0, or a dictionary or file of initial values for some or all parameters in the model. The default initialization behavior will initialize all parameter values on range [-2, 2] on the _unconstrained_ support. If the expected parameter values are too far from this range, this option may improve adaptation. The following value types are allowed: * Single number ``n > 0`` - initialization range is [-n, n]. * ``0`` - all parameters are initialized to 0. * dictionary - pairs parameter name : initial value. * string - pathname to a JSON or Rdump data file. * list of strings - per-chain pathname to data file. :param warmup_iters: Number of warmup iterations for each chain. :param sampling_iters: Number of draws from the posterior for each chain. :param save_warmup: When True, sampler saves warmup draws as part of the Stan csv output file. :param thin: Period between saved samples. :param max_treedepth: Maximum depth of trees evaluated by NUTS sampler per iteration. :param metric: Specification of the mass matrix, either as a vector consisting of the diagonal elements of the covariance matrix (``diag`` or ``diag_e``) or the full covariance matrix (``dense`` or ``dense_e``). If the value of the metric argument is a string other than ``diag``, ``diag_e``, ``dense``, or ``dense_e``, it must be a valid filepath to a JSON or Rdump file which contains an entry ``inv_metric`` whose value is either the diagonal vector or the full covariance matrix. If the value of the metric argument is a list of paths, its length must match the number of chains and all paths must be unique. :param step_size: Initial stepsize for HMC sampler. The value is either a single number or a list of numbers which will be used as the global or per-chain initial step_size, respectively. The length of the list of step sizes must match the number of chains. :param adapt_engaged: When True, adapt stepsize and metric. *Note: If True, ``warmup_iters`` must be > 0.* :param adapt_delta: Adaptation target Metropolis acceptance rate. The default value is 0.8. Increasing this value, which must be strictly less than 1, causes adaptation to use smaller step sizes. It improves the effective sample size, but may increase the time per iteration. :param fixed_param: When True, call CmdStan with argument "algorithm=fixed_param" which runs the sampler without updating the Markov Chain, thus the values of all parameters and transformed parameters are constant across all draws and only those values in the generated quantities block that are produced by RNG functions may change. This provides a way to use Stan programs to generate simulated data via the generated quantities block. This option must be used when the parameters block is empty. Default value is False. :param output_dir: Name of the directory to with the CmdStan output files are written. If unspecified, output files will be written to a temporary directory which is deleted upon session exit. :param save_diagnostics: Whether or not to save diagnostics. If True, csv output files are written to ``<basename>-diagnostic-<chain_id>.csv.``, where ``<basename>`` is set with ``csv_basename``. :param show_progress: Use tqdm progress bar to show sampling progress. If show_progress=='notebook' use tqdm_notebook (needs nodejs for jupyter). :return: CmdStanMCMC object """ if chains is None: if fixed_param: chains = 1 else: chains = 4 if chains < 1: raise ValueError( 'chains must be a positive integer value, found {}'.format( chains ) ) if chain_ids is None: chain_ids = [x + 1 for x in range(chains)] else: if isinstance(chain_ids, int): if chain_ids < 1: raise ValueError( 'chain_id must be a positive integer value,' ' found {}'.format(chain_ids) ) offset = chain_ids chain_ids = [x + offset + 1 for x in range(chains)] else: if not len(chain_ids) == chains: raise ValueError( 'chain_ids must correspond to number of chains' ' specified {} chains, found {} chain_ids'.format( chains, len(chain_ids) ) ) for i in len(chain_ids): if chain_ids[i] < 1: raise ValueError( 'chain_id must be a positive integer value,' ' found {}'.format(chain_ids[i]) ) cores_avail = cpu_count() if cores is None: cores = max(min(cores_avail - 2, chains), 1) if cores < 1: raise ValueError( 'cores must be a positive integer value, found {}'.format(cores) ) if cores > cores_avail: self._logger.warning( 'requested %u cores, only %u available', cores, cpu_count() ) cores = cores_avail refresh = None if show_progress: try: import tqdm self._logger.propagate = False except ImportError: self._logger.warning( ( 'tqdm not installed, progress information is not ' 'shown. Please install tqdm with ' "'pip install tqdm'" ) ) show_progress = False # TODO: issue 49: inits can be initialization function sampler_args = SamplerArgs( warmup_iters=warmup_iters, sampling_iters=sampling_iters, save_warmup=save_warmup, thin=thin, max_treedepth=max_treedepth, metric=metric, step_size=step_size, adapt_engaged=adapt_engaged, adapt_delta=adapt_delta, fixed_param=fixed_param, ) with MaybeDictToFilePath(data, inits) as (_data, _inits): args = CmdStanArgs( self._name, self._exe_file, chain_ids=chain_ids, data=_data, seed=seed, inits=_inits, output_dir=output_dir, save_diagnostics=save_diagnostics, method_args=sampler_args, refresh=refresh, ) runset = RunSet(args=args, chains=chains) pbar = None all_pbars = [] with ThreadPoolExecutor(max_workers=cores) as executor: for i in range(chains): if show_progress: if ( isinstance(show_progress, str) and show_progress.lower() == 'notebook' ): try: tqdm_pbar = tqdm.tqdm_notebook except ImportError: msg = ( 'Cannot import tqdm.tqdm_notebook.\n' 'Functionality is only supported on the ' 'Jupyter Notebook and compatible platforms' '.\nPlease follow the instructions in ' 'https://github.com/tqdm/tqdm/issues/394#' 'issuecomment-384743637 and remember to ' 'stop & start your jupyter server.' ) self._logger.warning(msg) tqdm_pbar = tqdm.tqdm else: tqdm_pbar = tqdm.tqdm # enable dynamic_ncols for advanced users # currently hidden feature dynamic_ncols = os.environ.get( 'TQDM_DYNAMIC_NCOLS', 'False' ) if dynamic_ncols.lower() in ['0', 'false']: dynamic_ncols = False else: dynamic_ncols = True pbar = tqdm_pbar( desc='Chain {} - warmup'.format(i + 1), position=i, total=1, # Will set total from Stan's output dynamic_ncols=dynamic_ncols, ) all_pbars.append(pbar) executor.submit(self._run_cmdstan, runset, i, pbar) # Closing all progress bars for pbar in all_pbars: pbar.close() if show_progress: # re-enable logger for console self._logger.propagate = True if not runset._check_retcodes(): msg = 'Error during sampling' for i in range(chains): if runset._retcode(i) != 0: msg = '{}, chain {} returned error code {}'.format( msg, i, runset._retcode(i) ) raise RuntimeError(msg) mcmc = CmdStanMCMC(runset, fixed_param) mcmc._validate_csv_files() return mcmc
def test_validate_big_run(self): exe = os.path.join(datafiles_path, 'bernoulli' + EXTENSION) output = os.path.join(datafiles_path, 'runset-big', 'output_icar_nyc') sampler_args = SamplerArgs() cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2], seed=12345, output_basename=output, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=2) fit = CmdStanMCMC(runset) fit._validate_csv_files() sampler_state = [ 'lp__', 'accept_stat__', 'stepsize__', 'treedepth__', 'n_leapfrog__', 'divergent__', 'energy__', ] phis = ['phi.{}'.format(str(x + 1)) for x in range(2095)] column_names = sampler_state + phis self.assertEqual(fit.columns, len(column_names)) self.assertEqual(fit.column_names, tuple(column_names)) self.assertEqual(fit.metric_type, 'diag_e') self.assertEqual(fit.stepsize.shape, (2, )) self.assertEqual(fit.metric.shape, (2, 2095)) self.assertEqual((1000, 2, 2102), fit.sample.shape) phis = fit.get_drawset(params=['phi']) self.assertEqual((2000, 2095), phis.shape) phi1 = fit.get_drawset(params=['phi.1']) self.assertEqual((2000, 1), phi1.shape) mo_phis = fit.get_drawset(params=['phi.1', 'phi.10', 'phi.100']) self.assertEqual((2000, 3), mo_phis.shape) phi2095 = fit.get_drawset(params=['phi.2095']) self.assertEqual((2000, 1), phi2095.shape) with self.assertRaises(Exception): fit.get_drawset(params=['phi.2096']) with self.assertRaises(Exception): fit.get_drawset(params=['ph'])
def test_metadata(self): # construct CmdStanMCMC from logistic model output, config exe = os.path.join(DATAFILES_PATH, 'logistic' + EXTENSION) rdata = os.path.join(DATAFILES_PATH, 'logistic.data.R') sampler_args = SamplerArgs(iter_sampling=100) cmdstan_args = CmdStanArgs( model_name='logistic', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=rdata, output_dir=DATAFILES_PATH, sig_figs=17, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args) runset._csv_files = [ os.path.join(DATAFILES_PATH, 'logistic_output_1.csv'), os.path.join(DATAFILES_PATH, 'logistic_output_2.csv'), os.path.join(DATAFILES_PATH, 'logistic_output_3.csv'), os.path.join(DATAFILES_PATH, 'logistic_output_4.csv'), ] retcodes = runset._retcodes for i in range(len(retcodes)): runset._set_retcode(i, 0) fit = CmdStanMCMC(runset) col_names = tuple([ 'lp__', 'accept_stat__', 'stepsize__', 'treedepth__', 'n_leapfrog__', 'divergent__', 'energy__', 'beta[1]', 'beta[2]', ]) self.assertEqual(fit.chains, 4) self.assertEqual(fit.chain_ids, [1, 2, 3, 4]) self.assertEqual(fit.num_draws_warmup, 1000) self.assertEqual(fit.num_draws_sampling, 100) self.assertEqual(fit.column_names, col_names) self.assertEqual(fit.num_unconstrained_params, 2) self.assertEqual(fit.metric_type, 'diag_e') self.assertEqual(fit.sampler_config['num_samples'], 100) self.assertEqual(fit.sampler_config['thin'], 1) self.assertEqual(fit.sampler_config['algorithm'], 'hmc') self.assertEqual(fit.sampler_config['metric'], 'diag_e') self.assertAlmostEqual(fit.sampler_config['delta'], 0.80) self.assertTrue('n_leapfrog__' in fit.sampler_vars_cols) self.assertTrue('energy__' in fit.sampler_vars_cols) self.assertTrue('beta' not in fit.sampler_vars_cols) self.assertTrue('energy__' not in fit.stan_vars_dims) self.assertTrue('beta' in fit.stan_vars_dims) self.assertTrue('beta' in fit.stan_vars_cols) self.assertEqual(fit.stan_vars_dims['beta'], tuple([2])) self.assertEqual(fit.stan_vars_cols['beta'], tuple([7, 8]))
def generate_quantities( self, data: Union[Dict, str] = None, mcmc_sample: Union[CmdStanMCMC, List[str]] = None, seed: int = None, gq_output_dir: str = None, ) -> CmdStanGQ: """ Run CmdStan's generate_quantities method which runs the generated quantities block of a model given an existing sample. This function takes a CmdStanMCMC object and the dataset used to generate that sample and calls to the CmdStan ``generate_quantities`` method to generate additional quantities of interest. The ``CmdStanGQ`` object records the command, the return code, and the paths to the generate method output csv and console files. The output files are written either to a specified output directory or to a temporary directory which is deleted upon session exit. Output filenames are composed of the model name, a timestamp in the form YYYYMMDDhhmm and the chain id, plus the corresponding filetype suffix, either '.csv' for the CmdStan output or '.txt' for the console messages, e.g. `bernoulli_ppc-201912081451-1.csv`. Output files written to the temporary directory contain an additional 8-character random string, e.g. `bernoulli_ppc-201912081451-1-5nm6as7u.csv`. :param data: Values for all data variables in the model, specified either as a dictionary with entries matching the data variables, or as the path of a data file in JSON or Rdump format. :param mcmc_sample: Can be either a CmdStanMCMC object returned by CmdStanPy's `sample` method or a list of stan-csv files generated by fitting the model to the data using any Stan interface. :param seed: The seed for random number generator. Must be an integer between ``0`` and ``2^32 - 1``. If unspecified, ``numpy.random.RandomState()`` is used to generate a seed which will be used for all chains. *NOTE: Specifying the seed will guarantee the same result for multiple invocations of this method with the same inputs. However this will not reproduce results from the sample method given the same inputs because the RNG will be in a different state.* :param gq_output_dir: Name of the directory in which the CmdStan output files are saved. If unspecified, files will be written to a temporary directory which is deleted upon session exit. :return: CmdStanGQ object """ sample_csv_files = [] sample_drawset = None chains = 0 if isinstance(mcmc_sample, CmdStanMCMC): sample_csv_files = mcmc_sample.runset.csv_files sample_drawset = mcmc_sample.get_drawset() chains = mcmc_sample.chains elif isinstance(mcmc_sample, list): sample_csv_files = mcmc_sample else: raise ValueError( 'mcmc_sample must be either CmdStanMCMC object' ' or list of paths to sample csv_files' ) try: chains = len(sample_csv_files) if sample_drawset is None: # assemble sample from csv files sampler_args = SamplerArgs() args = CmdStanArgs( self._name, self._exe_file, chain_ids=[x + 1 for x in range(chains)], method_args=sampler_args, ) runset = RunSet(args=args, chains=chains) runset._csv_files = sample_csv_files sample_fit = CmdStanMCMC(runset) sample_fit._validate_csv_files() sample_drawset = sample_fit.get_drawset() except ValueError as e: raise ValueError( 'Invalid mcmc_sample, error:\n\t{}\n\t' ' while processing files\n\t{}'.format( repr(e), '\n\t'.join(sample_csv_files) ) ) generate_quantities_args = GenerateQuantitiesArgs( csv_files=sample_csv_files ) generate_quantities_args.validate(chains) with MaybeDictToFilePath(data, None) as (_data, _inits): args = CmdStanArgs( self._name, self._exe_file, chain_ids=[x + 1 for x in range(chains)], data=_data, seed=seed, output_dir=gq_output_dir, method_args=generate_quantities_args, ) runset = RunSet(args=args, chains=chains) cores_avail = cpu_count() cores = max(min(cores_avail - 2, chains), 1) with ThreadPoolExecutor(max_workers=cores) as executor: for i in range(chains): executor.submit(self._run_cmdstan, runset, i) if not runset._check_retcodes(): msg = 'Error during generate_quantities' for i in range(chains): if runset._retcode(i) != 0: msg = '{}, chain {} returned error code {}'.format( msg, i, runset._retcode(i) ) raise RuntimeError(msg) quantities = CmdStanGQ(runset=runset, mcmc_sample=sample_drawset) quantities._set_attrs_gq_csv_files(sample_csv_files[0]) return quantities
def test_validate_bad_run(self): exe = os.path.join(datafiles_path, 'bernoulli' + EXTENSION) jdata = os.path.join(datafiles_path, 'bernoulli.data.json') sampler_args = SamplerArgs(sampling_iters=100, max_treedepth=11, adapt_delta=0.95) # some chains had errors output = os.path.join(badfiles_path, 'bad-transcript-bern') cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=jdata, output_basename=output, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=4) with self.assertRaisesRegex(Exception, 'Exception'): runset._check_console_msgs() # csv file headers inconsistent output = os.path.join(badfiles_path, 'bad-hdr-bern') cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=jdata, output_basename=output, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=4) retcodes = runset._retcodes for i in range(len(retcodes)): runset._set_retcode(i, 0) self.assertTrue(runset._check_retcodes()) fit = CmdStanMCMC(runset) with self.assertRaisesRegex(ValueError, 'header mismatch'): fit._validate_csv_files() # bad draws output = os.path.join(badfiles_path, 'bad-draws-bern') cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=jdata, output_basename=output, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=4) retcodes = runset._retcodes for i in range(len(retcodes)): runset._set_retcode(i, 0) self.assertTrue(runset._check_retcodes()) fit = CmdStanMCMC(runset) with self.assertRaisesRegex(ValueError, 'draws'): fit._validate_csv_files() # mismatch - column headers, draws output = os.path.join(badfiles_path, 'bad-cols-bern') cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=jdata, output_basename=output, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=4) retcodes = runset._retcodes for i in range(len(retcodes)): runset._set_retcode(i, 0) self.assertTrue(runset._check_retcodes()) fit = CmdStanMCMC(runset) with self.assertRaisesRegex(ValueError, 'bad draw'): fit._validate_csv_files()
def test_validate_bad_run(self): exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION) jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json') sampler_args = SamplerArgs(max_treedepth=11, adapt_delta=0.95) # some chains had errors cmdstan_args = CmdStanArgs( model_name='bernoulli', model_exe=exe, chain_ids=[1, 2, 3, 4], seed=12345, data=jdata, output_dir=DATAFILES_PATH, method_args=sampler_args, ) runset = RunSet(args=cmdstan_args, chains=4) for i in range(4): runset._set_retcode(i, 0) self.assertTrue(runset._check_retcodes()) # errors reported runset._stderr_files = [ os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-1.txt'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-2.txt'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-3.txt'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-4.txt'), ] self.assertEqual(len(runset._get_err_msgs()), 4) # csv file headers inconsistent runset._csv_files = [ os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-1.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-2.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-3.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-4.csv'), ] with self.assertRaisesRegex(ValueError, 'header mismatch'): CmdStanMCMC(runset) # bad draws runset._csv_files = [ os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-1.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-2.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-3.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-4.csv'), ] with self.assertRaisesRegex(ValueError, 'draws'): CmdStanMCMC(runset) # mismatch - column headers, draws runset._csv_files = [ os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-1.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-2.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-3.csv'), os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-4.csv'), ] with self.assertRaisesRegex(ValueError, 'bad draw, expecting 9 items, found 8'): CmdStanMCMC(runset)
def generate_quantities( self, data: Union[Dict, str] = None, mcmc_sample: Union[CmdStanMCMC, List[str]] = None, seed: int = None, gq_csv_basename: str = None, ) -> CmdStanGQ: """ Wrapper for generated quantities call. Given a CmdStanMCMC object containing a sample from the fitted model, along with the corresponding dataset for that fit, run just the generated quantities block of the model in order to get additional quantities of interest. :param data: Values for all data variables in the model, specified either as a dictionary with entries matching the data variables, or as the path of a data file in JSON or Rdump format. :param mcmc_sample: Can be either a CmdStanMCMC object returned by CmdStanPy's `sample` method or a list of stan-csv files generated by fitting the model to the data using any Stan interface. :param seed: The seed for random number generator. Must be an integer between ``0`` and ``2^32 - 1``. If unspecified, ``numpy.random.RandomState()`` is used to generate a seed which will be used for all chains. *NOTE: Specifying the seed will guarantee the same result for multiple invocations of this method with the same inputs. However this will not reproduce results from the sample method given the same inputs because the RNG will be in a different state.* :param gq_csv_basename: A path or file name which will be used as the basename for the sampler output files. The csv output files for each chain are written to file ``<basename>-<chain_id>.csv`` and the console output and error messages are written to file ``<basename>-<chain_id>.txt``. :return: CmdStanGQ object """ sample_csv_files = [] sample_drawset = None chains = 0 if isinstance(mcmc_sample, CmdStanMCMC): sample_csv_files = mcmc_sample.runset.csv_files sample_drawset = mcmc_sample.get_drawset() chains = mcmc_sample.chains elif isinstance(mcmc_sample, list): sample_csv_files = mcmc_sample else: raise ValueError( 'mcmc_sample must be either CmdStanMCMC object' ' or list of paths to sample csv_files' ) try: chains = len(sample_csv_files) if sample_drawset is None: # assemble sample from csv files sampler_args = SamplerArgs() args = CmdStanArgs( self._name, self._exe_file, chain_ids=[x + 1 for x in range(chains)], method_args=sampler_args, ) runset = RunSet(args=args, chains=chains) runset._csv_files = sample_csv_files sample_fit = CmdStanMCMC(runset) sample_fit._validate_csv_files() sample_drawset = sample_fit.get_drawset() except ValueError as e: raise ValueError( 'Invalid mcmc_sample, error:\n\t{}\n\t' ' while processing files\n\t{}'.format( repr(e), '\n\t'.join(sample_csv_files)) ) generate_quantities_args = GenerateQuantitiesArgs( csv_files=sample_csv_files ) generate_quantities_args.validate(chains) with MaybeDictToFilePath(data, None) as (_data, _inits): args = CmdStanArgs( self._name, self._exe_file, chain_ids=[x + 1 for x in range(chains)], data=_data, seed=seed, output_basename=gq_csv_basename, method_args=generate_quantities_args, ) runset = RunSet(args=args, chains=chains) cores_avail = cpu_count() cores = max(min(cores_avail - 2, chains), 1) with ThreadPoolExecutor(max_workers=cores) as executor: for i in range(chains): executor.submit(self._run_cmdstan, runset, i) if not runset._check_retcodes(): msg = 'Error during generate_quantities' for i in range(chains): if runset._retcode(i) != 0: msg = '{}, chain {} returned error code {}'.format( msg, i, runset._retcode(i) ) raise RuntimeError(msg) quantities = CmdStanGQ(runset=runset, mcmc_sample=sample_drawset) quantities._set_attrs_gq_csv_files(sample_csv_files[0]) return quantities