Esempio n. 1
0
    def set_basic_conf(self):
        space = [
            {
                "name": "width",
                "type": "int",
                "bounds": {
                    "min": 0,
                    "max": 20
                },
            },
            {
                "name": "height",
                "type": "int",
                "bounds": {
                    "min": -100,
                    "max": 100
                },
            },
        ]

        def cost(space, reporter):
            reporter(mean_loss=(space["height"] - 14)**2 -
                     abs(space["width"] - 3))

        search_alg = SigOptSearch(space,
                                  name="SigOpt Example Experiment",
                                  max_concurrent=1,
                                  metric="mean_loss",
                                  mode="min")
        return search_alg, cost
Esempio n. 2
0
def sigopt_experiment(config):
    """
    Requires environment variable SIGOPT_KEY
    """

    exp_config = config["config"]
    if "params_space" not in exp_config:
        raise ValueError("SigOpt experiment require a params_space")
    if "performance_metric" not in exp_config:
        exp_config["performance_metric"] = "val_acc"

    # define algorithm
    algo = SigOptSearch(
        exp_config["params_space"],
        name=config["name"],
        # manually define max concurrent
        max_concurrent=5,
        reward_attr=exp_config["performance_metric"],
    )

    tune.run(RayTrainable, search_alg=algo, **config)
Esempio n. 3
0
    parser.add_argument(
        "--smoke-test", action="store_true", help="Finish quickly for testing")
    args, _ = parser.parse_known_args()

    space = [
        {
            "name": "w1",
            "type": "double",
            "bounds": {
                "min": 0,
                "max": 1
            },
        },
    ]

    algo = SigOptSearch(
        space,
        name="SigOpt Example Multi Objective Experiment",
        observation_budget=10 if args.smoke_test else 1000,
        max_concurrent=1,
        metric=["average", "std", "sharpe"],
        mode=["max", "min", "obs"])

    analysis = tune.run(
        easy_objective,
        name="my_exp",
        search_alg=algo,
        num_samples=10 if args.smoke_test else 1000,
        config={"total_weight": 1})
    print("Best hyperparameters found were: ", analysis.best_config)
Esempio n. 4
0
                    "name": "normal",
                    "scale": 0.2
                },
                "type": "double",
            },
        ],
        metrics=[
            dict(name="std", objective="minimize", strategy="optimize"),
            dict(name="average", strategy="store"),
        ],
        observation_budget=samples,
        parallel_bandwidth=1,
    )

    algo = SigOptSearch(
        connection=conn,
        experiment_id=experiment.id,
        name="SigOpt Example Existing Experiment",
        metric=["average", "std"],
        mode=["obs", "min"],
    )

    analysis = tune.run(easy_objective,
                        name="my_exp",
                        search_alg=algo,
                        num_samples=samples,
                        config={})

    print("Best hyperparameters found were: ",
          analysis.get_best_config("average", "min"))
Esempio n. 5
0
    def set_basic_conf(self):
        space = [
            {
                "name": "width",
                "type": "int",
                "bounds": {
                    "min": 0,
                    "max": 20
                },
            },
            {
                "name": "height",
                "type": "int",
                "bounds": {
                    "min": -100,
                    "max": 100
                },
            },
        ]

        def cost(space, reporter):
            reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))

        # Unfortunately, SigOpt doesn't allow setting of random state. Thus,
        # we always end up with different suggestions, which is unsuitable
        # for the warm start test. Here we make do with points_to_evaluate,
        # and ensure that state is preserved over checkpoints and restarts.
        points = [
            {
                "width": 5,
                "height": 20
            },
            {
                "width": 10,
                "height": -20
            },
            {
                "width": 15,
                "height": 30
            },
            {
                "width": 5,
                "height": -30
            },
            {
                "width": 10,
                "height": 40
            },
            {
                "width": 15,
                "height": -40
            },
            {
                "width": 5,
                "height": 50
            },
            {
                "width": 10,
                "height": -50
            },
            {
                "width": 15,
                "height": 60
            },
            {
                "width": 12,
                "height": -60
            },
        ]

        search_alg = SigOptSearch(space,
                                  name="SigOpt Example Experiment",
                                  max_concurrent=1,
                                  metric="loss",
                                  mode="min",
                                  points_to_evaluate=points)
        return search_alg, cost
Esempio n. 6
0
            "name": "width",
            "type": "int",
            "bounds": {
                "min": 0,
                "max": 20
            },
        },
        {
            "name": "height",
            "type": "int",
            "bounds": {
                "min": -100,
                "max": 100
            },
        },
    ]
    algo = SigOptSearch(space,
                        name="SigOpt Example Experiment",
                        max_concurrent=1,
                        metric="mean_loss",
                        mode="min")
    scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
    analysis = tune.run(easy_objective,
                        name="my_exp",
                        search_alg=algo,
                        scheduler=scheduler,
                        num_samples=10 if args.smoke_test else 1000,
                        config={"steps": 10})

    print("Best hyperparameters found were: ", analysis.best_config)
Esempio n. 7
0
        },
        {
            "name": "height",
            "type": "int",
            "bounds": {
                "min": -100,
                "max": 100
            },
        },
    ]

    config = {
        "num_samples": 10 if args.smoke_test else 1000,
        "config": {
            "iterations": 100,
        },
        "stop": {
            "timesteps_total": 100
        },
    }
    algo = SigOptSearch(space,
                        name="SigOpt Example Experiment",
                        max_concurrent=1,
                        reward_attr="neg_mean_loss")
    scheduler = AsyncHyperBandScheduler(reward_attr="neg_mean_loss")
    run(easy_objective,
        name="my_exp",
        search_alg=algo,
        scheduler=scheduler,
        **config)
Esempio n. 8
0
                "min": 0,
                "max": 20
            },
        },
        {
            "name": "height",
            "type": "int",
            "bounds": {
                "min": -100,
                "max": 100
            },
        },
    ]
    algo = SigOptSearch(
        space,
        name="SigOpt Example Experiment",
        metric="mean_loss",
        mode="min",
    )
    scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
    analysis = tune.run(
        easy_objective,
        name="my_exp",
        search_alg=algo,
        scheduler=scheduler,
        num_samples=4 if args.smoke_test else 100,
        config={"steps": 10},
    )

    print(
        "Best hyperparameters found were: ",
        analysis.get_best_config("mean_loss", "min"),