예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    _.parse_file(__file__)
    hpargparse.bind(parser, _)
    parser.parse_args()

    func()
예제 #2
0
파일: main.py 프로젝트: zilunzhang/hpman
def main():
    basedir = os.path.dirname(os.path.realpath(__file__))
    _.parse_file(basedir)

    parser = argparse.ArgumentParser()
    parser.add_argument("-a", default=_.get_value("a"), type=int)
    parser.add_argument("-b", default=_.get_value("b"), type=int)
    args = parser.parse_args()

    _.set_value("a", args.a)
    _.set_value("b", args.b)

    print("a = {}".format(_.get_value("a")))
    print("b = {}".format(_.get_value("b")))
    print("lib.add() = {}".format(lib.add()))
    print("lib.mult() = {}".format(lib.mult()))
예제 #3
0
def main():
    parser = argparse.ArgumentParser()

    # ... do whatever you want
    parser.add_argument(dest="predefined_arg")

    # analyze everything in this directory
    _.parse_file(BASE_DIR)  # <-- IMPORTANT

    # bind will monkey_patch parser.parse_args to do its job
    hpargparse.bind(parser, _)  # <-- IMPORTANT

    # parse args and set the values
    args = parser.parse_args()

    # ... do whatever you want next
    import lib

    print("a = {}".format(_.get_value("a")))
    print("b = {}".format(_.get_value("b")))
    print("lib.add() = {}".format(lib.add()))
    print("lib.mult() = {}".format(lib.mult()))
예제 #4
0
import argparse

from hpman.hpm_db import L
from hpman.m import _

_("optimizer", "adam", choices=["adam", "sgd"])

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    _.parse_file(__file__)
    occurrences = _.db.select(lambda row: row.name == "optimizer")
    oc = [oc for oc in occurrences if oc["hints"] is not None][0]
    choices = oc["hints"]["choices"]
    value = oc["value"]

    parser.add_argument("--optimizer", default=value, choices=choices)
    args = parser.parse_args()

    print("optimizer: {}".format(args.optimizer))
예제 #5
0
def main():
    parser = argparse.ArgumentParser()
    _.parse_file(BASE_DIR)
    hpargparse.bind(parser, _)
    parser.parse_args()  # we need not to use args

    # print all hyperparameters
    print("-" * 10 + " Hyperparameters " + "-" * 10)
    print(yaml.dump(_.get_values()))

    optimizer_cls = {
        "adam": optim.Adam,
        "sgd": functools.partial(optim.SGD, momentum=0.9),
    }[_("optimizer", "adam")  # <-- hyperparameter
      ]

    import model

    net = model.get_model()
    if torch.cuda.is_available():
        net.cuda()

    optimizer = optimizer_cls(
        net.parameters(),
        lr=_("learning_rate", 1e-3),  # <-- hyperparameter
        weight_decay=_("weight_decay", 1e-5),  # <-- hyperparameter
    )

    import dataset

    train_ds = dataset.get_data_and_labels("train")
    test_ds = dataset.get_data_and_labels("test")
    if torch.cuda.is_available():
        # since mnist is a small dataset, we store the test dataset all in the
        # gpu memory
        test_ds = {k: v.cuda() for k, v in test_ds.items()}

    rng = np.random.RandomState(_("seed", 42))  # <-- hyperparameter

    for epoch in range(_("num_epochs", 30)):  # <-- hyperparameter
        net.train()
        tq = tqdm(
            enumerate(
                dataset.iter_dataset_batch(
                    rng,
                    train_ds,
                    _("batch_size", 256),  # <-- hyperparameter
                    cuda=torch.cuda.is_available(),
                )))
        for step, minibatch in tq:
            optimizer.zero_grad()

            Y_pred = net(minibatch["data"])
            loss = model.compute_loss(Y_pred, minibatch["labels"])

            loss.backward()
            optimizer.step()

            metrics = model.compute_metrics(Y_pred, minibatch["labels"])
            metrics["loss"] = loss.detach().cpu().numpy()
            tq.desc = "e:{} s:{} {}".format(
                epoch,
                step,
                " ".join([
                    "{}:{}".format(k, v) for k, v in sorted(metrics.items())
                ]),
            )

        net.eval()

        # since mnist is a small dataset, we predict all values at once.
        Y_pred = net(test_ds["data"])
        metrics = model.compute_metrics(Y_pred, test_ds["labels"])
        print("eval: {}".format(" ".join(
            ["{}:{}".format(k, v) for k, v in sorted(metrics.items())])))