def testMetric(self): track.init(trial_name="test_log") session = track.get_session() for i in range(5): track.log(test=i) result_path = os.path.join(session.logdir, EXPR_RESULT_FILE) self.assertTrue(_check_json_val(result_path, "test", i))
def train_mnist(args): track.init(trial_name="track-example", trial_config=vars(args)) batch_size = 128 num_classes = 10 epochs = 1 if args.smoke_test else 12 mnist.load() x_train, y_train, x_test, y_test, input_shape = get_mnist_data() model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(args.hidden, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=args.lr, momentum=args.momentum), metrics=["accuracy"]) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), callbacks=[TuneKerasCallback(track.metric)]) track.shutdown()
def train_mnist(args): track.init(trial_name="track-example", trial_config=vars(args)) batch_size = 128 num_classes = 10 epochs = 1 if args.smoke_test else 12 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(args.hidden, activation="relu"), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(num_classes, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=args.lr, momentum=args.momentum), metrics=["accuracy"]) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), callbacks=[TuneReporterCallback()]) track.shutdown()
def testLocalMetrics(self): """Checks that metric state is updated correctly.""" track.init(trial_name="test_logs") session = track.get_session() self.assertEqual(set(session.trial_config.keys()), {"trial_id"}) result_path = os.path.join(session.logdir, EXPR_RESULT_FILE) track.log(test=1) self.assertTrue(_check_json_val(result_path, "test", 1)) track.log(iteration=1, test=2) self.assertTrue(_check_json_val(result_path, "test", 2))
def testSessionInitShutdown(self): self.assertTrue(track._session is None) # Checks that the singleton _session is created/destroyed # by track.init() and track.shutdown() for _ in range(2): # do it twice to see that we can reopen the session track.init(trial_name="test_init") self.assertTrue(track._session is not None) track.shutdown() self.assertTrue(track._session is None)
def testLogCreation(self): """Checks that track.init() starts logger and creates log files.""" track.init(trial_name="test_init") session = track.get_session() self.assertTrue(session is not None) self.assertTrue(os.path.isdir(session.logdir)) params_path = os.path.join(session.logdir, EXPR_PARAM_FILE) result_path = os.path.join(session.logdir, EXPR_RESULT_FILE) self.assertTrue(os.path.exists(params_path)) self.assertTrue(os.path.exists(result_path)) self.assertTrue(session.logdir == track.trial_dir())
def _trainable_func(self, config, reporter): track.init(_tune_reporter=reporter) output = train_func(config) reporter(**{RESULT_DUPLICATE: True}) track.shutdown() return output
analysis = tune.run(gridd,config={"lr": tune.grid_search([0.1,0.3])}) print("Best config: ",analysis.get_best_config(metric="mean_accuracy")) analysis = tune.run(train,config={"num_epochs": tune.grid_search([5,10,15,20,25])}) print("Best config: ",analysis.get_best_config(metric="mean_accuracy")) analysis = tune.run(train,config={"dropout": tune.grid_search([0.1,0.2,0.3])}) print("Best config: ",analysis.get_best_config(metric="mean_accuracy")) analysis.dataframe() # !ray import ray ray.tune.track.init() ray.tune.track.init() track.log from ray import tune import ray from ray.tune import track track.init()