def get_model(): base_channel = _("base_channel", 32) # <-- hyperparameter in_channels = 1 # _('input_channels', 1) return nn.Sequential( EnsureFloat(), ConvBNReLU(in_channels, base_channel, 3, stride=2, padding=1), ConvBNReLU(base_channel, base_channel * 2, 3, stride=2, padding=1), ConvBNReLU(base_channel * 2, base_channel * 4, 3, stride=2, padding=1), GlobalAveragePooling(), nn.Linear(base_channel * 4, 10), )
from hpman.m import _ a = _("a", {"key": 1}) b = _("b", [1, 2, 3]) c = _("c", 23)
from hpman.m import _ a = _("a", {"key": 1}) b = _("b", [1, 2, 3])
from hpman.m import _ _("num_channels", 128) _("num_layers", 50)
import argparse from hpman.hpm_db import L from hpman.m import _ _("optimizer", "adam", choices=["adam", "sgd"]) if __name__ == "__main__": parser = argparse.ArgumentParser() _.parse_file(__file__) occurrences = _.db.select(lambda row: row.name == "optimizer") oc = [oc for oc in occurrences if oc["hints"] is not None][0] choices = oc["hints"]["choices"] value = oc["value"] parser.add_argument("--optimizer", default=value, choices=choices) args = parser.parse_args() print("optimizer: {}".format(args.optimizer))
def mult(): return _("a") * _("b")
def add(): return _("a", 1) + _("b", 2)
def add(): return _("a", 0) + _("b", 0)
def main(): parser = argparse.ArgumentParser() _.parse_file(BASE_DIR) hpargparse.bind(parser, _) parser.parse_args() # we need not to use args # print all hyperparameters print("-" * 10 + " Hyperparameters " + "-" * 10) print(yaml.dump(_.get_values())) optimizer_cls = { "adam": optim.Adam, "sgd": functools.partial(optim.SGD, momentum=0.9), }[_("optimizer", "adam") # <-- hyperparameter ] import model net = model.get_model() if torch.cuda.is_available(): net.cuda() optimizer = optimizer_cls( net.parameters(), lr=_("learning_rate", 1e-3), # <-- hyperparameter weight_decay=_("weight_decay", 1e-5), # <-- hyperparameter ) import dataset train_ds = dataset.get_data_and_labels("train") test_ds = dataset.get_data_and_labels("test") if torch.cuda.is_available(): # since mnist is a small dataset, we store the test dataset all in the # gpu memory test_ds = {k: v.cuda() for k, v in test_ds.items()} rng = np.random.RandomState(_("seed", 42)) # <-- hyperparameter for epoch in range(_("num_epochs", 30)): # <-- hyperparameter net.train() tq = tqdm( enumerate( dataset.iter_dataset_batch( rng, train_ds, _("batch_size", 256), # <-- hyperparameter cuda=torch.cuda.is_available(), ))) for step, minibatch in tq: optimizer.zero_grad() Y_pred = net(minibatch["data"]) loss = model.compute_loss(Y_pred, minibatch["labels"]) loss.backward() optimizer.step() metrics = model.compute_metrics(Y_pred, minibatch["labels"]) metrics["loss"] = loss.detach().cpu().numpy() tq.desc = "e:{} s:{} {}".format( epoch, step, " ".join([ "{}:{}".format(k, v) for k, v in sorted(metrics.items()) ]), ) net.eval() # since mnist is a small dataset, we predict all values at once. Y_pred = net(test_ds["data"]) metrics = model.compute_metrics(Y_pred, test_ds["labels"]) print("eval: {}".format(" ".join( ["{}:{}".format(k, v) for k, v in sorted(metrics.items())])))
def add(): # define a hyperparameter on-the-fly with defaults return _("a", 1) + _("b", 2)
def mult(): # reuse a pre-defined hyperparameters return _("a") * _("b")
from hpman.m import _ def func(): pass hpx = _("1-hpx", 123) hpp = _("2-hpp", func()) xxx = _("3-xxx", {"a": 1, "b": 2}) bbb = _("4-bbb", ["a", 1, 4]) ccc = _("5-ccc", ["a", 1, 4]) xxa = _("6-xxa", 1.24) fff = _("7-fff", 1e-5) ggg = _("8-ggg", 1 // 2) hhh = _("9-hhh", print)
def func(): weight_decay = _("weight_decay", 1e-5) print("weight decay is {}".format(weight_decay))
from hpman.m import _ _("str_from", "source_code")
from hpman.m import _ # forward static parsing _.parse_file(__file__) print(_.get_value("learning_rate")) # define hyperparameters learning_rate = _("learning_rate", 1e-3) # override default value _.set_value("learning_rate", 1e-2) print(_.get_value("learning_rate"))