# :py:func:`determined.Integer`, :py:func:`determined.Double`,
# :py:func:`determined.Categorical`, and :py:func:`determined.Log` are utility
# functions for specifying distributions.
#
# Next, use the ``searcher`` field to configure the desired hyperparameter
# search algorithm. In this case, we're configuring a
# :ref:`topic-guides_hp-tuning-det_adaptive-simple` to optimize over five
# possible choices of hyperparameters. See :ref:`topic-guides_hp-tuning-det`
# for a full list of available hyperparameter tuning algorithms.

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# When running from this code from a notebook, add a `command` argument to
# init() specifying the notebook file name.
context = init(config, mode=experimental.Mode.CLUSTER, context_dir=".")
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),
    tf.keras.layers.Dense(context.get_hparam("num_units"),
                          activation=context.get_hparam("activation")),
    tf.keras.layers.Dropout(context.get_hparam("dropout")),
    tf.keras.layers.Dense(10, activation="softmax"),
])
model = context.wrap_model(model)
model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics=["accuracy"])
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5)

###############################################################################
#
# to set up a hyperparameter search space.  :py:func:`determined.Integer`,
# :py:func:`determined.Double`, :py:func:`determined.Categorical`, and
# :py:func:`determined.Log` are utility functions for specifying distributions.
#
# Next, use the ``searcher`` field to configure the desired hyperparameter
# search algorithm. In this case, we're configuring a :ref:`simple adaptive
# search <topic-guides_hp-tuning-det_adaptive-simple>` to optimize over five
# possible choices of hyperparameters. See :ref:`topic-guides_hp-tuning-det` for
# a full list of available hyperparameter tuning algorithms.

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# When running this code from a notebook, add a `command` argument to init()
# specifying the notebook file name.
context = init(config, context_dir=".")
model = tf.keras.models.Sequential(
    [
        tf.keras.layers.Flatten(input_shape=(28, 28)),
        tf.keras.layers.Dense(
            context.get_hparam("num_units"), activation=context.get_hparam("activation")
        ),
        tf.keras.layers.Dropout(context.get_hparam("dropout")),
        tf.keras.layers.Dense(10, activation="softmax"),
    ]
)
model = context.wrap_model(model)
model.compile(
    tf.keras.optimizers.Adam(name='Adam'),
    loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5)
Exemplo n.º 3
0
    parser.add_argument("--local", action="store_true")
    parser.add_argument("--test", action="store_true")
    parser.add_argument("--use-dataset", action="store_true")
    args = parser.parse_args()

    config = {
        "hyperparameters": {
            "hidden_size": 2,
            "learning_rate": 0.1,
            "global_batch_size": 4,
            "trial_type": "default",
        }
    }

    context = init(
        config=config, local=args.local, test=args.test, context_dir=str(pathlib.Path.cwd())
    )

    model = Sequential()
    model.add(Dense(context.get_hparam("hidden_size"), activation="sigmoid", input_shape=(2,)))
    model.add(Dense(1))

    if args.use_dataset:
        data, labels = utils.xor_data()

        train = context.wrap_dataset(tf.data.Dataset.from_tensor_slices((data, labels)))
        train = train.batch(context.get_hparam("global_batch_size"))
        valid = context.wrap_dataset(tf.data.Dataset.from_tensor_slices((data, labels)))
        valid = valid.batch(context.get_hparam("global_batch_size"))
    else:
        train, valid = utils.make_xor_data_sequences(batch_size=4)
    parser = argparse.ArgumentParser()
    parser.add_argument("--mode", dest="mode", default="cluster")
    parser.add_argument("--use-dataset", action="store_true")
    args = parser.parse_args()

    config = {
        "hyperparameters": {
            "hidden_size": 2,
            "learning_rate": 0.1,
            "global_batch_size": 4,
            "trial_type": "default",
        }
    }

    context = init(config=config,
                   mode=experimental.Mode(args.mode),
                   context_dir=str(pathlib.Path.cwd()))

    model = Sequential()
    model.add(
        Dense(context.get_hparam("hidden_size"),
              activation="sigmoid",
              input_shape=(2, )))
    model.add(Dense(1))

    if args.use_dataset:
        data, labels = utils.xor_data()

        train = context.wrap_dataset(
            tf.data.Dataset.from_tensor_slices((data, labels)))
        train = train.batch(context.get_hparam("global_batch_size"))