def load_xval(log_dir): # Load a spec (YAML) parser = create_parser(True) log_dir = ".\\.vihds_cache" yaml = locate_yml(log_dir) args = parser.parse_args([yaml]) settings = Config(args) settings.trainer = Trainer(args, log_dir=log_dir) res = XvalMerge(args, settings) res.load() return res
def load_cache(yaml): parser = create_parser(True) args = parser.parse_args([yaml]) settings = Config(args) data_pair = build_datasets(args, settings, settings.data.load) settings.trainer = Trainer(args, log_dir=".") res = Results() res.load() res.elbo_list = [res.elbo] xval = XvalMerge(args, settings) xval.add(1, data_pair, res) xval.finalize() return xval
def process_node_args(name, yamlargs, graph_name): split = False argarr = [] # Should this also see if any one of those 3 arguments are in the command line? if "split" in yamlargs: True argarr.append("--split=" + str(yamlargs.split)) if "spec" in yamlargs: argarr.append(yamlargs.spec) else: raise ValueError("Node " + name + " missing spec property") if "experiment" in yamlargs: argarr.append("--experiment=" + graph_name + "/" + yamlargs.experiment) else: raise ValueError("Node " + name + " missing experiment property") if "seed" in yamlargs: argarr.append("--seed=" + str(yamlargs.seed)) if "train_samples" in yamlargs: argarr.append("--train_samples=" + str(yamlargs.train_samples)) if "test_samples" in yamlargs: argarr.append("--test_samples=" + str(yamlargs.test_samples)) if "epochs" in yamlargs: argarr.append("--epochs=" + str(yamlargs.epochs)) if "test_epoch" in yamlargs: argarr.append("--test_epoch=" + str(yamlargs.test_epoch)) if "plot_epoch" in yamlargs: argarr.append("--plot_epoch=" + str(yamlargs.plot_epoch)) if "gpu" in yamlargs: argarr.append("--gpu=" + str(yamlargs.gpu)) # Should probably add other arguments as well... parser = rxval.create_parser(split) args = parser.parse_args(argarr) return args
def test_shapes(): # yml = 'specs/dr_constant_one.yaml' # Single files yml = "specs/dr_constant_icml.yaml" # Multiple files parser = create_parser(True) args = parser.parse_args([yml]) settings = Config(args) data = build_datasets(args, settings) parameters = Parameters(settings.params) model = build_model(args, settings, data, parameters) # Test dataset size nf = args.folds assert data.n_train == 312 * (nf - 1) / nf, "Training set the correct size" assert data.n_test == 312 / nf, "Test set the correct size" # Test batch loader n_batch = 36 train_loader = DataLoader(dataset=data.train, batch_size=n_batch, shuffle=True) batch = next(iter(train_loader)) batch = batch_to_device(data.train.dataset.times, settings.device, batch) assert batch.devices.shape == torch.Size( [n_batch]), "Batch has right shape for 'devices'" assert batch.dev_1hot.shape == torch.Size( [n_batch, settings.data.device_depth]), "Batch has right shape for 'dev_1hot'" assert batch.inputs.shape == torch.Size( [n_batch, len(settings.data.conditions)]), "Batch has right shape for 'inputs'" assert batch.observations.shape == torch.Size( [n_batch, 4, 86]), "Batch has right shape for 'observations'" # Test conditional encoder shape delta_obs = batch.observations[:, :, 1:] - batch.observations[:, :, :-1] q = model.encoder.conditional(delta_obs) print("q:", q.shape) assert q.shape == (n_batch, settings.params.n_hidden), "Shape of encoder output"
def run(yml): results_dir = tempfile.mkdtemp() os.environ["INFERENCE_RESULTS_DIR"] = results_dir samples = 20 parser = create_parser(True) args = parser.parse_args([ "--train_samples=%d" % samples, "--test_samples=%d" % samples, "--test_epoch=5", "--plot_epoch=0", "--epochs=5", "--seed=0", yml, ]) settings = Config(args) settings.trainer = Trainer(args, add_timestamp=True) data = build_datasets(args, settings) parameters = Parameters(settings.params) model = build_model(args, settings, data, parameters) training = Training(args, settings, data, parameters, model) training.model.train() # Evaluate the encoder to produce a q batch = training.train_data batch = batch_to_device(data.train.dataset.times, settings.device, batch) batch_results, theta, q, p = training.model(batch, args.train_samples) elbo = training.cost(batch, batch_results, theta, q, p).elbo elbo.backward() nans = [] for name, dist in q.distributions.items(): for pname in dist.param_names: grad = getattr(dist, pname).grad if grad is not None: isnan = torch.isnan(grad) if isnan.any(): nans.append("%s.%s" % (name, pname)) assert len(nans) == 0, "NaN gradients for %s" % (", ".join(nans))
def main(): parser = create_parser(False) args = parser.parse_args() settings = Config(args) settings.trainer = Trainer(args, add_timestamp=True) execute(args, settings)
from vihds.run_xval import create_parser, run_on_split from vihds.config import Config, Trainer if __name__ == "__main__": results_dir = tempfile.mkdtemp() os.environ["INFERENCE_RESULTS_DIR"] = results_dir # yml = 'specs\\dr_constant_one.yaml' # Single files # yml = 'specs\\dr_constant_icml.yaml' # Multiple files # yml = 'specs\\dr_constant_precisions.yaml' # Multiple files yml = "specs\\dr_blackbox_icml.yaml" # Multiple files samples = 20 epochs = 5 parser = create_parser(True) args = parser.parse_args( [ "--train_samples=%d" % samples, "--test_samples=%d" % samples, "--test_epoch=5", "--plot_epoch=0", "--epochs=5", "--seed=0", yml, ] ) settings = Config(args) settings.trainer = Trainer(args, add_timestamp=True) pr = cProfile.Profile()