コード例 #1
0
ファイル: ann.py プロジェクト: JoshuaEbenezer/attention_vqa
                ]  # each numpy file contains 36 brisque features of n frames

    def get_config(self):

        config = super().get_config().copy()
        config.update({
            'feature_length': self.feature_length,
        })
        return config


# read each file as an array
# read the score in from another file
logdir = "logs/hparams/cornia_best"  #+ datetime.now().strftime("%Y%m%d-%H%M%S")

HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([16, 32]))
HP_DROPOUT = hp.HParam('dropout', hp.Discrete([0.1, 0.01, 0.001, 0.0001]))
HP_LEARNING_RATE = hp.HParam('lr', hp.Discrete([0.1, 0.01, 0.001, 0.0001]))
METRIC_LOSS = 'epoch_loss'

HPARAMS = [HP_NUM_UNITS, HP_DROPOUT, HP_LEARNING_RATE]


def model_fn(hparams, seed):
    rng = random.Random(seed)
    x_train = tf.keras.Input(shape=(20000, ))
    #    l_gru = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(100, return_sequences=True,dropout=0.05))(x_train)
    # l_gru = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64, return_sequences=True,dropout=0.1))(l_gru)
    # l_gru = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32, return_sequences=True,dropout=0.1))(l_gru)

    # # print(l_gru)
コード例 #2
0
# noinspection DuplicatedCode
if __name__ == '__main__':
    tf_init()
    """ model runs """

    config.dataset_size = 1000
    config.train = True
    # train dim decided by model
    config.dataset_dim = 128
    config.augment = 2
    config.show = False
    config.scale = 0.25
    config.center_crop_fraction = 0.5

    # model params
    hp_base_width = hp.HParam('base_width', hp.Discrete([16, 32]))

    # non-model params
    hp_class_weights = hp.HParam('class_weights',
                                 hp.Discrete(['none', 'inv_freq', 'eff_num']))
    hp_ds_bg_samples = hp.HParam('ds_bg_samples', hp.Discrete([200, 700,
                                                               1000]))
    hp_aug_level = hp.HParam('aug_level', hp.Discrete([0, 1, 2, 3]))
    hp_scale = hp.HParam('scale', hp.Discrete([0.25, 0.5]))
    hp_crop_fraction = hp.HParam('crop_fraction', hp.Discrete([0.5, 1.0]))
    hparams = [
        hp_aug_level,
        hp_base_width,
        hp_class_weights,
        hp_crop_fraction,
        hp_ds_bg_samples,
config['log_every'] = 10
config['model_fp'] = 'dense_emnist_federated_supervised_{}.h5'

config['optimizer'] = 'SGD'
config['nesterov'] = False
config['momentum'] = 0.99
config['decay'] = 0.0

config[
    'pretrained_model_fp'] = 'saved_logs/dense_emnist_federated_unsup/run_0/dense_emnist_federated_unsup_final_model.h5'

######### EXPERIMENTAL PARAMETERS ###############
hparam_map = {}

hparam_map['supervised_mask_ratio'] = hp.HParam(
    'supervised_mask_ratio', hp.Discrete([0.0, 0.8, 0.9, 0.95, 0.99]))
hparam_map['unsupervised_mask_ratio'] = hp.HParam('unsupervised_mask_ratio',
                                                  hp.Discrete([0.0]))
hparam_map['mask_by'] = hp.HParam('mask_by', hp.Discrete(['example']))
hparam_map['dataset'] = hp.HParam('dataset', hp.Discrete(['emnist']))

hparam_map['batch_size'] = hp.HParam('batch_size', hp.Discrete([20]))
hparam_map['learning_rate'] = hp.HParam('learning_rate', hp.Discrete([0.001]))

hparam_map['num_clients_per_round'] = hp.HParam('num_clients_per_round',
                                                hp.Discrete([100]))
hparam_map['num_epochs'] = hp.HParam('num_epochs', hp.Discrete([10]))

hparam_map['fine_tune'] = hp.HParam('fine_tune', hp.Discrete([False]))

######### METRICS ###############################
コード例 #4
0
class CloudTunerUtilsTest(tf.test.TestCase, parameterized.TestCase):

    def test_convert_study_config_discrete(self):
        hps = hp_module.HyperParameters()
        hps.Choice("learning_rate", [1e-4, 1e-3, 1e-2])
        study_config = utils.make_study_config(
            objective=oracle_module.Objective("val_accuracy", "max"),
            hyperparams=hps
        )
        self._assert_study_config_equal(study_config, STUDY_CONFIG_DISCRETE)

        actual_hps = utils.convert_study_config_to_hps(study_config)
        self._assert_hps_equal(actual_hps, hps)

    def test_convert_study_config_categorical(self):
        hps = hp_module.HyperParameters()
        hps.Choice("model_type", ["LINEAR", "WIDE_AND_DEEP"])
        study_config = utils.make_study_config(
            objective="accuracy", hyperparams=hps)
        self._assert_study_config_equal(study_config, STUDY_CONFIG_CATEGORICAL)

        actual_hps = utils.convert_study_config_to_hps(study_config)
        self._assert_hps_equal(actual_hps, hps)

    @parameterized.parameters(
        (1, 4, None, STUDY_CONFIG_INT),
        (1, 4, 1, STUDY_CONFIG_INT),
        (32, 128, 32, STUDY_CONFIG_INT_STEP))
    def test_convert_study_config_int(self, min_value, max_value, step,
                                      expected_config):
        hps = hp_module.HyperParameters()
        if step:
            hps.Int(
                "units", min_value=min_value, max_value=max_value, step=step)
        else:
            hps.Int("units", min_value=min_value, max_value=max_value)
        study_config = utils.make_study_config(
            objective="accuracy", hyperparams=hps)
        self._assert_study_config_equal(study_config, expected_config)

        actual_hps = utils.convert_study_config_to_hps(study_config)
        self._assert_hps_equal(actual_hps, hps)

    @parameterized.parameters(
        (0.1, 0.5, None, None, STUDY_CONFIG_FLOAT),
        (1, 2, 0.25, None, STUDY_CONFIG_FLOAT_STEP),
        (0.1, 0.8, None, "linear", STUDY_CONFIG_FLOAT_LINEAR_SCALE),
        (1e-4, 1e-1, None, "log", STUDY_CONFIG_FLOAT_LOG_SCALE),
        (1e-4, 1e-1, None, "reverse_log", STUDY_CONFIG_FLOAT_REVERSE_LOG_SCALE))
    def test_convert_study_config_float(self, min_value, max_value, step,
                                        sampling, expected_config):
        hps = hp_module.HyperParameters()
        hps.Float("learning_rate", min_value=min_value, max_value=max_value,
                  step=step, sampling=sampling)
        study_config = utils.make_study_config(
            objective="accuracy", hyperparams=hps)
        self._assert_study_config_equal(study_config, expected_config)

        actual_hps = utils.convert_study_config_to_hps(study_config)
        self._assert_hps_equal(actual_hps, hps)

    def test_convert_study_config_multi_float(self):
        hps = hp_module.HyperParameters()
        hps.Float("theta", min_value=0.0, max_value=1.57)
        hps.Float("r", min_value=0.0, max_value=1.0)
        study_config = utils.make_study_config(
            objective="accuracy", hyperparams=hps)
        self._assert_study_config_equal(study_config, STUDY_CONFIG_MULTI_FLOAT)

        actual_hps = utils.convert_study_config_to_hps(study_config)
        self._assert_hps_equal(actual_hps, hps)

    def test_convert_study_config_bool(self):
        hps = hp_module.HyperParameters()
        hps.Boolean("has_beta")
        study_config = utils.make_study_config(
            objective="accuracy", hyperparams=hps)
        self._assert_study_config_equal(study_config, STUDY_CONFIG_BOOL)

    @parameterized.parameters(
        ("beta", 0.1, STUDY_CONFIG_FIXED_FLOAT),
        ("type", "WIDE_AND_DEEP", STUDY_CONFIG_FIXED_CATEGORICAL),
        ("condition", True, STUDY_CONFIG_FIXED_BOOLEAN))
    def test_convert_study_config_fixed(self, name, value, expected_config):
        hps = hp_module.HyperParameters()
        hps.Fixed(name, value)
        study_config = utils.make_study_config(
            objective="accuracy", hyperparams=hps
        )
        self._assert_study_config_equal(study_config, expected_config)

    def test_convert_vizier_trial_to_dict(self):
        hps = hp_module.HyperParameters()
        hps.Choice("learning_rate", [1e-4, 1e-3, 1e-2])
        params = utils.convert_vizier_trial_to_dict(OPTIMIZER_TRIAL)
        self.assertDictEqual(params, EXPECTED_TRIAL_HPS)

    def test_convert_vizier_trial_to_hps(self):
        hps = hp_module.HyperParameters()
        hps.Choice("learning_rate", [1e-4, 1e-3, 1e-2])
        trial_hps = utils.convert_vizier_trial_to_hps(hps, OPTIMIZER_TRIAL)
        self.assertDictEqual(trial_hps.values, EXPECTED_TRIAL_HPS)

    def test_convert_completed_vizier_trial_to_keras_trial(self):
        hps = hp_module.HyperParameters()
        hps.Choice("learning_rate", [1e-4, 1e-3, 1e-2])
        trial = utils.convert_completed_vizier_trial_to_keras_trial(
            COMPLETED_OPTIMIZER_TRIAL, hps)
        self.assertEqual(trial.trial_id, "trial_1")
        self.assertEqual(trial.score, 0.9)
        self.assertEqual(trial.best_step, 1)
        self.assertEqual(trial.status, trial_module.TrialStatus.COMPLETED)
        self.assertEqual(
            trial.hyperparameters.values, {"learning_rate": 0.0001})

    def test_convert_hyperparams_to_hparams_choice(self):
        hps = hp_module.HyperParameters()
        hps.Choice("learning_rate", [1e-4, 1e-3, 1e-2])
        hparams = utils.convert_hyperparams_to_hparams(hps)
        expected_hparams = {
            hparams_api.HParam("learning_rate",
                               hparams_api.Discrete([1e-4, 1e-3, 1e-2])): 1e-4,
        }
        self.assertEqual(repr(hparams), repr(expected_hparams))

    @parameterized.parameters(
        ("units", 2, 16, None, hparams_api.IntInterval(2, 16), 2),
        ("units", 32, 128, 32, hparams_api.Discrete([32, 64, 96, 128]), 32))
    def test_convert_hyperparams_to_hparams_int(self, name, min_value,
                                                max_value, step,
                                                expected_domain,
                                                expected_value):
        hps = hp_module.HyperParameters()
        if step:
            hps.Int(name, min_value=min_value, max_value=max_value, step=step)
        else:
            hps.Int(name, min_value=min_value, max_value=max_value)
        hparams = utils.convert_hyperparams_to_hparams(hps)
        expected_hparams = {
            hparams_api.HParam(name, expected_domain): expected_value,
        }
        self.assertEqual(repr(hparams), repr(expected_hparams))

    @parameterized.parameters(
        ("learning_rate", 0.5, 1.5, 0.25,
         hparams_api.Discrete([0.5, 0.75, 1.0, 1.25, 1.5]), 0.5),
        ("learning_rate", 1e-4, 1e-1, None,
         hparams_api.RealInterval(1e-4, 1e-1), 1e-4))
    def test_convert_hyperparams_to_hparams_float(self, name, min_value,
                                                  max_value, step,
                                                  expected_domain,
                                                  expected_value):
        hps = hp_module.HyperParameters()
        hps.Float(name, min_value=min_value, max_value=max_value, step=step)
        hparams = utils.convert_hyperparams_to_hparams(hps)
        expected_hparams = {
            hparams_api.HParam(name, expected_domain): expected_value,
        }
        self.assertEqual(repr(hparams), repr(expected_hparams))

    def test_convert_hyperparams_to_hparams_multi_float(self):
        hps = hp_module.HyperParameters()
        hps.Float("theta", min_value=0.0, max_value=1.57)
        hps.Float("r", min_value=0.0, max_value=1.0)
        hparams = utils.convert_hyperparams_to_hparams(hps)
        expected_hparams = {
            hparams_api.HParam("r", hparams_api.RealInterval(0.0, 1.0)): 0.0,
            hparams_api.HParam("theta",
                               hparams_api.RealInterval(0.0, 1.57)): 0.0,
        }
        hparams_repr_list = [repr(hparams[x]) for x in hparams.keys()]
        expected_hparams_repr_list = [
            repr(expected_hparams[x]) for x in expected_hparams.keys()
        ]
        self.assertCountEqual(hparams_repr_list, expected_hparams_repr_list)

    def test_convert_hyperparams_to_hparams_boolean(self):
        hps = hp_module.HyperParameters()
        hps.Boolean("has_beta")
        hparams = utils.convert_hyperparams_to_hparams(hps)
        expected_hparams = {
            hparams_api.HParam("has_beta", hparams_api.Discrete([True, False])):
                False,
        }
        self.assertEqual(repr(hparams), repr(expected_hparams))

    @parameterized.parameters(
        ("beta", 0.1),
        ("type", "WIDE_AND_DEEP"),
        ("num_layers", 2))
    def test_convert_hyperparams_to_hparams_fixed(self, name, value):
        hps = hp_module.HyperParameters()
        hps.Fixed(name, value)
        hparams = utils.convert_hyperparams_to_hparams(hps)
        expected_hparams = {
            hparams_api.HParam(name, hparams_api.Discrete([value])): value,
        }
        self.assertEqual(repr(hparams), repr(expected_hparams))

    def test_convert_hyperparams_to_hparams_fixed_bool(self):
        hps = hp_module.HyperParameters()
        hps.Fixed("condition", True)
        hparams = utils.convert_hyperparams_to_hparams(hps)
        expected_hparams = {
            hparams_api.HParam("condition", hparams_api.Discrete([True])): True,
        }
        self.assertEqual(repr(hparams), repr(expected_hparams))

    @parameterized.parameters(
        ("val_loss", "min",
         [oracle_module.Objective(name="val_loss", direction="min")]),
        (oracle_module.Objective(name="val_acc", direction="max"), None,
         [oracle_module.Objective(name="val_acc", direction="max")]),
        ("accuracy", None,
         [oracle_module.Objective(name="accuracy", direction="max")]),
        (["val_acc", "val_loss"], None, [
            oracle_module.Objective(name="val_acc", direction="max"),
            oracle_module.Objective(name="val_loss", direction="min"),
        ]))
    def test_format_objective(self, objective, direction, expected_oracle_obj):
        formatted_objective = utils.format_objective(objective, direction)
        self.assertEqual(formatted_objective, expected_oracle_obj)

    @parameterized.parameters(
        ("max", "MAXIMIZE"),
        ("min", "MINIMIZE"),
        ("MAXIMIZE", "max"),
        ("MINIMIZE", "min"))
    def test_format_goal(self, metric_direction, expected_goal):
        goal = utils.format_goal(metric_direction)
        self.assertEqual(goal, expected_goal)

    def test_get_trial_id(self):
        trial_id = utils.get_trial_id(OPTIMIZER_TRIAL)
        self.assertEqual(trial_id, "trial_1")

    def _assert_hps_equal(self, hps1, hps2):
        self.assertEqual(len(hps1.space), len(hps2.space))
        hps1_repr_list = [repr(x) for x in hps1.space]
        hps2_repr_list = [repr(x) for x in hps2.space]
        self.assertCountEqual(hps1_repr_list, hps2_repr_list)

    def _assert_study_config_equal(
        self, test_study_config, expected_study_config
    ):
        study_config = copy.deepcopy(test_study_config)
        expected_config = copy.deepcopy(expected_study_config)

        algo = study_config.pop("algorithm")
        self.assertEqual(algo, "ALGORITHM_UNSPECIFIED")

        stopping_config = study_config.pop("automatedStoppingConfig")
        self.assertDictEqual(stopping_config, {
            "decayCurveStoppingConfig": {
                "useElapsedTime": True
            }
        })

        params = study_config.pop("parameters")
        expected_params = expected_config.pop("parameters")
        self.assertCountEqual(params, expected_params)

        # Check the rest of the study config
        self.assertDictEqual(study_config, expected_config)
コード例 #5
0
# @Time: 2020/6/17 18:25
# @Author: R.Jian
# @Note: 超参数设置

from tensorboard.plugins.hparams import api as hp

hp_1 = hp.HParam("dense_1", hp.Discrete([16, 32]))
hp_2 = hp.HParam("dense_2", hp.RealInterval(16, 32))
hp_3 = hp.HParam("dense_3", hp.RealInterval([16, 32]))
コード例 #6
0
ファイル: xDeepFM_test.py プロジェクト: dev-wei/recman
feat_dict["CATEGORY"] = SparseFeat(
    name="CATEGORY",
    feat_size=len(np.unique(df_X.CATEGORY.values)),
    dtype=tf.int64,
    description="0 presents the first category",
)
feat_dict["HISTORICAL_CATEGORIES"] = MultiValCsvFeat(
    name="HISTORICAL_CATEGORIES",
    tags=("a", "b", "c", "d"),
    dtype=tf.string,
    description="workout categories a user used to engage with",
)
feat_dict.initialize(df_X)

hp_params = HyperParams()
hp_params[HyperParams.LearningRate](hp.Discrete([0.01]))
hp_params[HyperParams.Optimizer](hp.Discrete(["adam"]))

metrices = (LogLoss(), RocAucScore())

run_name = datetime.now().strftime("%Y%m%d-%H%M%S")

best_model_finder = BestModelFinder()

for sess_num, hp_val in enumerate(hp_params.grid_search()):
    tb_logger = TensorBoardLogger(hp_params,
                                  run_name=run_name,
                                  sess_num=sess_num,
                                  log_dir=TB_LOG_DIR)
    model = xDeepFM(
        feat_dict,
コード例 #7
0
from tensorflow.keras import optimizers
from tensorflow.keras import regularizers
import matplotlib.pyplot as plt
import numpy as np
import os
from datetime import datetime
from contextlib import redirect_stdout
from tensorboard.plugins.hparams import api as hp
from contextlib import redirect_stdout
from tensorboard.plugins.hparams import api as hp

METRIC_ACCURACY = 'accuracy'
METRIC_LOSS = 'loss'
log_dir = '../logs/train/'

HP_STRUCTURE = hp.HParam('structure_model', hp.Discrete([1, 2, 3, 4]))
HP_DROPOUT = hp.HParam('dropout', hp.Discrete([0.30]))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam']))
HP_LEARNINGRATE = hp.HParam('leraning_rate', hp.Discrete([0.001]))
HP_MOMENTUM = hp.HParam('momentum', hp.Discrete([0.01]))
HP_L2 = hp.HParam('l2', hp.Discrete([0.001]))
HP_ACTIVATION = hp.HParam('activation', hp.Discrete(['relu']))
HP_AUGMENTATION = hp.HParam('data_augmentation', hp.Discrete(["true"]))
hparams = None
batch_sizes = 512
epoch = 10


def init(hp_structure, hp_dropout, hp_optimizer, hp_learningrate, hp_l2,
         hp_activation, BATCH_SIZES, EPOCH):
    HP_STRUCTURE = hp_structure
コード例 #8
0
 def get_hyperparameters(cls):
     import tensorboard.plugins.hparams.api as hp
     from ..training_utils import HParamWithDefault
     return [
         HParamWithDefault('n_phi_layers',
                           hp.Discrete([1, 2, 3, 4, 5]),
                           default=3),
         HParamWithDefault('phi_width',
                           hp.Discrete([16, 32, 64, 128, 256, 512]),
                           default=32),
         HParamWithDefault('phi_dropout',
                           hp.Discrete([0.0, 0.1, 0.2, 0.3]),
                           default=0.),
         HParamWithDefault('n_psi_layers', hp.Discrete([2]), default=2),
         HParamWithDefault('psi_width', hp.Discrete([64]), default=64),
         HParamWithDefault('psi_latent_width',
                           hp.Discrete([128]),
                           default=128),
         HParamWithDefault('dot_prod_dim', hp.Discrete([128]), default=128),
         HParamWithDefault('n_heads', hp.Discrete([4]), default=4),
         HParamWithDefault('attn_dropout',
                           hp.Discrete([0.0, 0.1, 0.25, 0.5]),
                           default=0.1),
         HParamWithDefault('latent_width',
                           hp.Discrete([32, 64, 128, 256, 512, 1024, 2048]),
                           default=128),
         HParamWithDefault('n_rho_layers',
                           hp.Discrete([1, 2, 3, 4, 5]),
                           default=3),
         HParamWithDefault('rho_width',
                           hp.Discrete([16, 32, 64, 128, 256, 512]),
                           default=32),
         HParamWithDefault('rho_dropout',
                           hp.Discrete([0.0, 0.1, 0.2, 0.3]),
                           default=0.),
         HParamWithDefault('max_timescale',
                           hp.Discrete([10., 100., 1000.]),
                           default=100.),
         HParamWithDefault('n_positional_dims',
                           hp.Discrete([4, 8, 16]),
                           default=4)
     ]
コード例 #9
0
    A = np.array(A / np.amax(A))
    return A


def reshape_function_validate(length, width):
    A = []
    for i in range(0, len(X_test)):
        A.append(cv2.resize(X_test[i][:, :, 0], (length, width)))
    A = np.array(A).reshape(-1, length, width, 1)
    A = np.array(A)
    A = np.array(A / np.amax(A))
    return A


HP_NUM_UNITS = hp.HParam('num_units',
                         hp.Discrete([10, 20, 30, 40, 50, 60, 70, 80]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.5))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam']))
METRIC_ACCURACY = 'accuracy'

with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
    hp.hparams_config(
        hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER],
        metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
    )


def ANNE2types(hparams):
    model = tf.keras.models.Sequential()
    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dense(10, activation=tf.nn.relu))
コード例 #10
0
for wt in df_wt:
    img = np.array(wt.values)
    img = resize(img, (160, 160))
    x.append(img.reshape(img.shape[0], img.shape[1], 1))

x = np.array(x)
y = cat.values

#%%
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.25,
                                                    random_state=28)

#%%
HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([1000, 64]))
HP_DROPOUT = hp.HParam('dropout', hp.Discrete([0.2, 0.45]))
HP_LEARNING_RATE = hp.HParam('learning_rate', hp.Discrete([0.0001, 0.00001]))
HP_DECAY = hp.HParam('decay', hp.Discrete([0.01, 0.0001]))

with tf.summary.create_file_writer('logs/84-small-v1').as_default():
    hp.hparams_config(
        hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_LEARNING_RATE, HP_DECAY],
        metrics=[hp.Metric('accuracy', display_name='Accuracy')],
    )


#%%
def train_test_model(logdir, hparams):
    start = timer()
コード例 #11
0
from tensorflow.python.keras.models import load_model

from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

import data2

# Disable eager execution
# tf.compat.v1.disable_eager_execution()

# nvidia-smi -l 1

# rm -rf ./logs/
""" Hparms """
# HP_LSTM = hp.HParam('LSTM_para', hp.Discrete([128, 256]))
HP_clip_size = hp.HParam('batch', hp.Discrete([8, 16, 32]))
# HP_batch_size = hp.HParam('batch', hp.Discrete([32, 64]))
HP_LR = hp.HParam('LR', hp.Discrete([1e-02, 1e-03, 1e-05]))

METRIC_ACCURACY = 'accuracy'

with tf.summary.create_file_writer(
        'logs_2INPUTS_FocLoss_Adam_convLSTM_3D_DA/hparam_tuning').as_default():
    hp.hparams_config(
        hparams=[HP_clip_size, HP_LR],
        metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
    )

logdir = "logs_2INPUTS_FocLoss_Adam_convLSTM_3D_DA/fit/" + datetime.now(
).strftime("%Y%m%d-%H%M%S")
""" Data """
コード例 #12
0
def test_convert_hyperparams_to_hparams():
    def _check_hparams_equal(hp1, hp2):
        assert (hparams_api.hparams_pb(
            hp1,
            start_time_secs=0).SerializeToString() == hparams_api.hparams_pb(
                hp2, start_time_secs=0).SerializeToString())

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Choice("learning_rate", [1e-4, 1e-3, 1e-2])
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {
            hparams_api.HParam("learning_rate",
                               hparams_api.Discrete([1e-4, 1e-3, 1e-2])):
            1e-4
        },
    )

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Int("units", min_value=2, max_value=16)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {hparams_api.HParam("units", hparams_api.IntInterval(2, 16)): 2})

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Int("units", min_value=32, max_value=128, step=32)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {
            hparams_api.HParam("units", hparams_api.Discrete([32, 64, 96, 128])):
            32
        },
    )

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Float("learning_rate", min_value=0.5, max_value=1.25, step=0.25)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {
            hparams_api.HParam("learning_rate",
                               hparams_api.Discrete([0.5, 0.75, 1.0, 1.25])):
            0.5
        },
    )

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Float("learning_rate", min_value=1e-4, max_value=1e-1)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {
            hparams_api.HParam("learning_rate",
                               hparams_api.RealInterval(1e-4, 1e-1)):
            1e-4
        },
    )

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Float("theta", min_value=0.0, max_value=1.57)
    hps.Float("r", min_value=0.0, max_value=1.0)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    expected_hparams = {
        hparams_api.HParam("theta", hparams_api.RealInterval(0.0, 1.57)): 0.0,
        hparams_api.HParam("r", hparams_api.RealInterval(0.0, 1.0)): 0.0,
    }
    hparams_repr_list = [repr(hparams[x]) for x in hparams.keys()]
    expected_hparams_repr_list = [
        repr(expected_hparams[x]) for x in expected_hparams.keys()
    ]
    assert sorted(hparams_repr_list) == sorted(expected_hparams_repr_list)

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Boolean("has_beta")
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {
            hparams_api.HParam("has_beta", hparams_api.Discrete([True, False])):
            False
        },
    )

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Fixed("beta", 0.1)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {hparams_api.HParam("beta", hparams_api.Discrete([0.1])): 0.1})

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Fixed("type", "WIDE_AND_DEEP")
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {
            hparams_api.HParam("type", hparams_api.Discrete(["WIDE_AND_DEEP"])):
            "WIDE_AND_DEEP"
        },
    )

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Fixed("condition", True)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {hparams_api.HParam("condition", hparams_api.Discrete([True])): True},
    )

    hps = kerastuner.engine.hyperparameters.HyperParameters()
    hps.Fixed("num_layers", 2)
    hparams = kerastuner.engine.tuner_utils.convert_hyperparams_to_hparams(hps)
    _check_hparams_equal(
        hparams,
        {hparams_api.HParam("num_layers", hparams_api.Discrete([2])): 2})
コード例 #13
0
ファイル: run.py プロジェクト: MCRen88/ticket-classification
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorboard.plugins.hparams import api as hp

from constants import DATA_PATH, SKIP_PROJECTS
from data import Data
from models import LSTMClassifier

data = Data(path=DATA_PATH, skip_projects=SKIP_PROJECTS)

data.preview()
data.save_distribution('distribution.pdf')
data.print_labels()

# Hyperparameters
HP_EPOCHS = hp.HParam('epochs', hp.Discrete([5, 10]))
HP_EMBEDDING_SIZE = hp.HParam('emb_size', hp.Discrete([200]))
HP_BATCH_SIZE = hp.HParam('batch_size', hp.Discrete([32, 128]))
HP_LSTM_SIZE = hp.HParam('lstm_size', hp.Discrete([100]))
HP_MAX_NB_WORDS = hp.HParam(
    'max_nb_words', hp.Discrete([50000]))
HP_MAX_SEQ_LEN = hp.HParam(
    'max_seq_len', hp.Discrete([500, 250]))

METRIC_ACCURACY = 'accuracy'

with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
    hp.hparams_config(
        hparams=[HP_EPOCHS, HP_EMBEDDING_SIZE, HP_LSTM_SIZE,
                 HP_BATCH_SIZE, HP_MAX_NB_WORDS, HP_MAX_SEQ_LEN],
        metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
コード例 #14
0
ファイル: trainer.py プロジェクト: victorbg91/gaze-tracking
class Model:
    """Class for tensorflow model creation, training, and application."""

    # Paths
    PATH_BASE = os.path.abspath(os.getcwd())
    PATH_BASE_LOGS = os.path.join(PATH_BASE, "logs")
    PATH_EMAIL_CONFIG = os.path.join(PATH_BASE, "config", "email_config.json")
    PATH_MODEL = os.path.join(PATH_BASE, "model")

    # Constants
    DATA_PERCENT_VALIDATION = 0.2
    DATA_PERCENT_TEST = 0.2

    MODEL_IMAGE_SIZE = (64, 64)
    MODEL_POOL_SIZE = 2
    MODEL_NUM_LAYERS = 1
    MODEL_NUM_UNITS = 2
    MODEL_KERNEL_SIZE = 3

    TRAINING_EPOCHS = 1000
    TRAINING_BATCH_SIZE = 64

    # Hyperparameters
    HP_MAX_TESTS = 50
    HP_LEARNING_RATE = hp.HParam("learning_rate_log",
                                 hp.RealInterval(-4., -3.))
    HP_VAR_REGULARIZATION = hp.HParam("var_regul_log",
                                      hp.RealInterval(-10., -0.))
    HP_LEARNING_DECAY = hp.HParam("learning_rate_divisor", hp.Discrete([True]))
    HP_LAST_LAYER = hp.HParam("last_layer", hp.Discrete([200, 300, 400]))
    HP_OPTIMIZER = hp.HParam("optimizer", hp.Discrete(["adam"]))

    HYPERPARAMETERS = [
        HP_LEARNING_RATE, HP_LEARNING_DECAY, HP_LAST_LAYER, HP_OPTIMIZER,
        HP_VAR_REGULARIZATION
    ]

    def __init__(self):
        self.image_proc = data_util.ImageProcessor()
        self.inferator = None

    def load_model(self):
        loaded = tf.keras.models.load_model(
            self.PATH_MODEL)  # tf.saved_model.load(self.PATH_MODEL)
        loaded.summary()
        inferator = loaded.signatures["serving_default"]
        self.inferator = inferator

    def predict(self, inputs):
        assert self.inferator is not None, "Model was not loaded"
        result = self.inferator(**inputs)["dense_2"].numpy().reshape(-1)
        return result

    def _define_model(self, hparams):
        """
        Define the model for gaze prediction.

        The "inspiration" is eye triangulation
        We start by giving separately the left and right eye
        --> the model may detect in which direction the pupil is aimed

        We also want to give the coordinates of the left and right eye
        --> This way, a triangulation may be made

        We also want to give the original width and height
        --> This way, the screen-eye distance may be inferred.

        """
        # Initialize
        var_regularization = VarianceRegularizer(
            10**hparams[self.HP_VAR_REGULARIZATION])
        last_layer = hparams[self.HP_LAST_LAYER]

        # Input layers
        input_left_eye = tf.keras.layers.Input(self.MODEL_IMAGE_SIZE + (1, ))
        input_right_eye = tf.keras.layers.Input(self.MODEL_IMAGE_SIZE + (1, ))
        input_left_coord = tf.keras.layers.Input([4])
        input_right_coord = tf.keras.layers.Input([4])

        # Define the weight-sharing eye branch
        eye_branch = EyeBranch(var_regularization)
        flat_left = eye_branch(input_left_eye)
        flat_right = eye_branch(input_right_eye)

        # Define the main branch
        main = tf.keras.layers.concatenate(
            [flat_left, flat_right, input_left_coord, input_right_coord],
            name="D1_concat")
        main = tf.keras.layers.Dense(
            last_layer,
            activation='relu',
            name="D2_dense",
            kernel_regularizer=var_regularization)(main)
        main = tf.keras.layers.Dense(
            last_layer,
            activation='relu',
            name="D3_dense",
            kernel_regularizer=var_regularization)(main)
        output = tf.keras.layers.Dense(2,
                                       name="D4_output",
                                       activation='sigmoid')(main)

        # Return the model
        model = tf.keras.models.Model(inputs=[
            input_left_eye, input_right_eye, input_left_coord,
            input_right_coord
        ],
                                      outputs=[output])
        return model

    @staticmethod
    def _define_callbacks(path_run, hparams):
        # Initialize
        callbacks = []
        monitor = "loss"

        # Checkpoints
        path_checkpoint = os.path.join(path_run, "checkpoint")
        callback_checkpoint = tf.keras.callbacks.ModelCheckpoint(
            filepath=path_checkpoint,
            monitor=monitor,
            save_best_only=True,
            verbose=1,
            save_weights_only=False)
        callbacks.append(callback_checkpoint)

        # Early stopping if there is no improvement over 100 epochs
        callback_early_stopping_1 = tf.keras.callbacks.EarlyStopping(
            monitor=monitor, patience=100, mode="min", verbose=2)
        callbacks.append(callback_early_stopping_1)

        # Tensorboard
        callback_tensorboard = tf.keras.callbacks.TensorBoard(
            log_dir=path_run,
            update_freq="epoch",
            histogram_freq=10,
            write_graph=True,
            write_images=False,
        )
        callbacks.append(callback_tensorboard)

        # Hyperparameters
        callback_hyperparameters = hp.KerasCallback(path_run, hparams)
        callbacks.append(callback_hyperparameters)

        # TODO implement callback to put images in tensorboard
        # Source:
        # https://stackoverflow.com/questions/43784921/how-to-display-custom-images-in-tensorboard-using-keras?rq=1

        return callbacks

    def _send_email(self, results):
        """Send email alert."""

        # Load the email configuration
        with open(self.PATH_EMAIL_CONFIG, "r") as file:
            email_config = json.load(file)

        # Initialize
        msg = EmailMessage()
        msg["Subject"] = "Tests finished"
        msg["From"] = email_config["from"]
        msg["To"] = email_config["to"]

        # Set the message header
        text = "A new batch of tests is finished."

        text += "\n\nAs a reminder, we are looking for the following MSE values: "
        text += "\n'Better-than-noise' threshold = {:0.2f}".format(1 / 6)
        text += "\n'Better-than-fixed-guess' threshold = {:0.2f}".format(1 /
                                                                         12)
        text += "\nOur objective of 10% error threshold = {:0.2f}".format(0.01)

        # Set the email info
        keys = sorted(results.keys())[:10]
        top_params = [results[key] for key in keys]
        top_params = [{param.name: params[param]
                       for param in params} for params in top_params]

        text += "\n\n" + "-" * 50
        for key, params in zip(keys, top_params):
            text += "\n\n" + str(round(key, 2))
            text += "\n" + str(params)

        # Add the text to the email
        msg.set_content(text)

        # Send the email
        with smtplib.SMTP("smtp.gmail.com", 587) as server:
            server.starttls()
            server.ehlo()
            server.login(email_config["from"], email_config["password"])
            server.ehlo()

            server.send_message(msg)

    def train_model(self, hparams, load_weights=False):
        # Define run options
        run_options = tf.compat.v1.RunOptions(
            report_tensor_allocations_upon_oom=True)

        # Load data
        data_training, data_validation, data_test = self.image_proc.initialize_dataset(
            self.DATA_PERCENT_VALIDATION, self.DATA_PERCENT_TEST,
            self.TRAINING_BATCH_SIZE, self.MODEL_IMAGE_SIZE)

        # Define the model
        try:
            model = self._define_model(hparams)
            model.summary()
        except ValueError as err:
            print(
                "Could not define the model - possibly incompatible hyperparameters."
            )
            print("ERROR: ", err)
            return np.inf

        # Define the learning parameters
        learning_rate = 10**hparams[self.HP_LEARNING_RATE]
        learning_decay = learning_rate / self.TRAINING_EPOCHS if hparams[
            self.HP_LEARNING_DECAY] else 0.

        # Define the optimizer
        if hparams[self.HP_OPTIMIZER] == "adam":
            opt = tf.keras.optimizers.Adam(lr=learning_rate,
                                           decay=learning_decay)
        elif hparams[self.HP_OPTIMIZER] is "sgd":
            opt = tf.keras.optimizers.SGD(lr=learning_rate,
                                          decay=learning_decay)
        else:
            raise ValueError("Unknown optimizer, expected 'adam' or 'sgd'")

        # Define the model
        model.compile(optimizer=opt,
                      loss='mean_squared_error',
                      metrics=["mean_squared_error"],
                      options=run_options)

        if load_weights:
            try:
                # Load the weights and create a dictionary of weights
                imported = tf.saved_model.load(self.PATH_MODEL)
                weights = {
                    layer.name: layer.numpy()
                    for layer in imported.variables
                }
            except OSError:
                weights = {}
                print(
                    "WARNING: COULD NOT LOAD MODEL, starting from standard weight initialization"
                )

            # Initialize the model with the loaded weights
            for i, layer in enumerate(model.layers):
                try:
                    # Get the weight for the layer
                    name = layer.name
                    kernel_weights = weights.get(name + "/kernel:0")
                    bias_weights = weights.get(name + "/bias:0")
                    if kernel_weights is None or bias_weights is None:
                        init_weights = []
                    else:
                        init_weights = [kernel_weights, bias_weights]

                    # Set the weights
                    layer.set_weights(init_weights)

                except ValueError:
                    print("Could not load the weights in the layer: ",
                          layer.name)

        # Define callbacks
        run_id = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        path_run = os.path.join(self.PATH_BASE_LOGS, run_id)
        callbacks = self._define_callbacks(path_run, hparams)

        # Start the model fit
        try:
            model.fit(data_training,
                      epochs=self.TRAINING_EPOCHS,
                      validation_data=data_validation,
                      verbose=2,
                      callbacks=callbacks)
        except tf.errors.ResourceExhaustedError as err:
            print("Error when fitting the model, ran out of memory.")
            print(err)
            return np.inf
        except tf.errors.InvalidArgumentError as err:
            print("Model had a NaN, possibly explosive gradient problem")
            print(
                "After inspection, this problem was cause by improper normalization of the inputs."
            )
            print(err)
            return np.inf

        # Evaluate on the test set
        print("\nResults on the test set")
        result = model.evaluate(data_test)

        return result

    def launch_training_batch(self, load_weights=False):
        # Launch the tests
        results = {}
        for _ in range(self.HP_MAX_TESTS):
            # Clear the previous session
            tf.keras.backend.clear_session()

            # Pick hyperparameters at random
            hparams = {
                param: param.domain.sample_uniform()
                for param in self.HYPERPARAMETERS
            }

            # Print a run message
            msg = {param.name: hparams[param] for param in hparams}
            print("\nStarting a new model with parameters:\n", msg)

            # Train the model and append the results
            res = self.train_model(hparams, load_weights)
            if type(res) is list:
                res = res[-1]
            results[res] = hparams

        # Send alert email
        self._send_email(results)
コード例 #15
0
    def __init__(self,
                 *,
                 n_flow: int,
                 n_pass_through_domain=None,
                 n_cells_domain=(1, 10),
                 n_bins_domain=(2, 10),
                 nn_width_domain=None,
                 nn_depth_domain=(1, 10),
                 nn_activation_domain=("relu", ),
                 roll_step_domain=None,
                 l2_reg_domain=(0., 1.),
                 dropout_rate_domain=(0., 1.),
                 batch_size_domain=(100, 1000000),
                 **init_opts):
        """

        Args:
            n_flow ():
            n_pass_through_domain ():
            n_cells_domain ():
            n_bins_domain ():
            nn_width_domain ():
            nn_depth_domain ():
            nn_activation_domain ():
            roll_step_domain ():
            l2_reg_domain ():
            dropout_rate_domain ():
            batch_size_domain ():
            **init_opts ():
        """
        self.n_flow = n_flow
        self._model = None
        self._inverse_model = None

        # Some domains do not have an explicit default value as we want them to be dependent on other domains
        if n_pass_through_domain is None:
            _n_pass_through_domain = [1, n_flow - 1]
        else:
            _n_pass_through_domain = n_pass_through_domain

        if nn_width_domain is None:
            _nn_width_domain = [n_bins_domain[0], 5 * n_bins_domain[1]]
        else:
            _nn_width_domain = nn_width_domain

        if roll_step_domain is None:
            _roll_step_domain = [1, n_flow - 1]
        else:
            _roll_step_domain = roll_step_domain

        # **Hyperparameters**
        # Note that we include the number of batch points for training.
        # This is because we only have an estimator for the loss (which is defined as an integral)
        # And batch statistics has an impact on convergence
        # Pedagogical note: a contrario if we divide this sample into N minibatches and accumulate the gradients
        # before taking an optimizer step (typically for memory reasons)

        self._hparam = {
            "n_pass_through":
            hp.HParam("n_pass_through",
                      domain=hp.IntInterval(*_n_pass_through_domain),
                      display_name="# Pass"),
            "n_cells":
            hp.HParam("n_cells",
                      domain=hp.IntInterval(*n_cells_domain),
                      display_name="# Cells"),
            "n_bins":
            hp.HParam("n_bins",
                      domain=hp.IntInterval(*n_bins_domain),
                      display_name="# Bins"),
            "nn_width":
            hp.HParam("nn_width",
                      domain=hp.IntInterval(*_nn_width_domain),
                      display_name="NN width"),
            "nn_depth":
            hp.HParam("nn_depth",
                      domain=hp.IntInterval(*nn_depth_domain),
                      display_name="NN depth"),
            "nn_activation":
            hp.HParam("nn_activation",
                      domain=hp.Discrete(nn_activation_domain),
                      display_name="NN activ. fct."),
            "roll_step":
            hp.HParam("roll_step",
                      domain=hp.IntInterval(*_roll_step_domain),
                      display_name="Roll step"),
            "l2_reg":
            hp.HParam("l2_reg",
                      domain=hp.RealInterval(*l2_reg_domain),
                      display_name="L2 reg."),
            "dropout_rate":
            hp.HParam("dropout_rate",
                      domain=hp.RealInterval(*dropout_rate_domain),
                      display_name="Dropout rate"),
            "batch_size":
            hp.HParam("batch_size",
                      domain=hp.IntInterval(*batch_size_domain),
                      display_name="Batch size"),
            "use_batch_norm":
            hp.HParam("use_batch_norm",
                      domain=hp.Discrete([True, False]),
                      display_name="BatchNorm")
        }

        self._metrics = {
            "std": hp.Metric("std",
                             display_name="Integrand standard deviation")
        }

        self.optimizer_object = None
def get_callbacks(name):
    return [
        tfdocs.modelling.EpochDots(),
        tf.keras.callbacks.EarlyStopping(monitor='val_binary_crossentrophy',
                                         patience=200),
        tf.keras.get_callbacks.TensorBoard(logdir / name),
    ]


fashion_mnist = tf.keras.datasets.fashion_mnist

(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([16, 32]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.2))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd']))

METRIC_ACCURACY = 'accuracy'

with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
    hp.hparams_config(
        hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER],
        metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
    )


def train_test_model(hparams):
    model = tf.keras.models.Sequential([
        tf.keras.layers.Flatten(),
コード例 #17
0
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt

# uncomment below to tune on further parameters
'''
HP_CNN_DROPOUT = hp.HParam("fcn_dropout",display_name="CONV2D NW dropout",
                         description="Dropout rate for conv subnet.",
                          hp.RealInterval(0.1, 0.2))

 HP_FC_DROPOUT = hp.HParam("fc_dropout",display_name="f.c. dropout",
                          description="Dropout rate for fully connected subnet.",
                          hp.RealInterval(0.2, 0.5))
'''
HP_EPOCHS = hp.HParam("epochs",
                      hp.Discrete([100, 140]),
                      description="Number of epoch to run")
HP_NEURONS = hp.HParam("num_Dense_layer_neurons",
                       hp.Discrete([128, 256]),
                       description="Neurons per dense layer")
HP_STRIDE = hp.HParam(
    "stride_in_first_layer",
    hp.Discrete([2, 1]),
    description="Value of stride in frist convolutional layer")
HP_L_RATE = hp.HParam("learning_rate",
                      hp.Discrete([0.0001, 0.00001]),
                      description="Learning rate")

HP_METRIC = hp.Metric(constants.METRICS_ACCURACY, display_name='Accuracy')

# creating logs for different hyper-parameters
コード例 #18
0
ファイル: hparam_tuning.py プロジェクト: KenzaB27/ann_labs
from tensorboard.plugins.hparams import api as hp
import tensorflow as tf
tfk = tf.keras
tfkl = tfk.layers

HP_NH1 = hp.HParam('nh1', hp.Discrete([3, 4, 5]))
HP_NH2 = hp.HParam('nh2', hp.Discrete([2, 4, 6]))
HP_LAMBDA = hp.HParam('lambda', hp.Discrete([0.001, 0.01, 0.1]))
HP_LR = hp.HParam('lr', hp.Discrete([0.001, 0.005, 0.01, 0.05]))
HP_ALPHA = hp.HParam('alpha', hp.Discrete([0.9, 0.8]))

METRIC_MSE = 'mse'
N_INPUT = 5
N_OUTPUT = 1
BATCH_SIZE = 50
EPOCHS = 100
PATIENCE = 20
LOG_DIR = 'logs\\hparam_tuning_relu\\'


class HparamTuning():
    def __init__(self, log_dir, X_train, y_train, X_val, y_val, X_test,
                 y_test):
        self.log_dir = log_dir
        self.X_train = X_train
        self.y_train = y_train
        self.X_val = X_val
        self.y_val = y_val
        self.X_test = X_test
        self.y_test = y_test
コード例 #19
0
  vocabulary = data[feature_name].unique()
  cat_c = tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary)
  embeding = feature_column.embedding_column(cat_c, dimension=50)
  feature_columns.append(embeding)

# Crossed columns
vocabulary = data['Sex'].unique()
Sex = tf.feature_column.categorical_column_with_vocabulary_list('Sex', vocabulary)

crossed_feature = feature_column.crossed_column([age_buckets, Sex], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
print(len(feature_columns))


HP_NUM_UNITS1 = hp.HParam('num_units 1', hp.Discrete([50,100,150])) 
HP_NUM_UNITS2 = hp.HParam('num_units 2', hp.Discrete([30,50]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1,0.2))
HP_DROPOUT2 = hp.HParam('dropout2', hp.RealInterval(0.3,0.5))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd','RMSprop']))
HP_L2 = hp.HParam('l2 regularizer', hp.RealInterval(.001,.01))
METRIC_ACCURACY = 'accuracy'

with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
  hp.hparams_config(
    hparams=[HP_NUM_UNITS1,HP_NUM_UNITS2, HP_DROPOUT,HP_L2 ,HP_OPTIMIZER,HP_DROPOUT2],
    metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
  )

feature_layer = tf.keras.layers.DenseFeatures (feature_columns)
コード例 #20
0
    "Summaries will be written every n steps, where n is the value of "
    "this flag.",
)
flags.DEFINE_integer(
    "num_epochs",
    5,
    "Number of epochs per trial.",
)

# We'll use MNIST for this example.
DATASET = tf.keras.datasets.mnist
INPUT_SHAPE = (28, 28)
OUTPUT_CLASSES = 10

HP_CONV_LAYERS = hp.HParam("conv_layers", hp.IntInterval(1, 3))
HP_CONV_KERNEL_SIZE = hp.HParam("conv_kernel_size", hp.Discrete([3, 5]))
HP_DENSE_LAYERS = hp.HParam("dense_layers", hp.IntInterval(1, 3))
HP_DROPOUT = hp.HParam("dropout", hp.RealInterval(0.1, 0.4))
HP_OPTIMIZER = hp.HParam("optimizer", hp.Discrete(["adam", "adagrad"]))

HPARAMS = [
    HP_CONV_LAYERS,
    HP_CONV_KERNEL_SIZE,
    HP_DENSE_LAYERS,
    HP_DROPOUT,
    HP_OPTIMIZER,
]

METRICS = [
    hp.Metric(
        "epoch_accuracy",
コード例 #21
0
ファイル: train.py プロジェクト: Sivaneshmsc/COVID-CXR
def random_hparam_search(cfg, data, callbacks, log_dir):
    '''
    Conduct a random hyperparameter search over the ranges given for the hyperparameters in config.yml and log results
    in TensorBoard. Model is trained x times for y random combinations of hyperparameters.
    :param cfg: Project config
    :param data: Dict containing the partitioned datasets
    :param callbacks: List of callbacks for Keras model (excluding TensorBoard)
    :param log_dir: Base directory in which to store logs
    :return: (Last model trained, resultant test set metrics, test data generator)
    '''

    # Define HParam objects for each hyperparameter we wish to tune.
    hp_ranges = cfg['HP_SEARCH']['RANGES']
    HPARAMS = []
    HPARAMS.append(hp.HParam('KERNEL_SIZE', hp.Discrete(hp_ranges['KERNEL_SIZE'])))
    HPARAMS.append(hp.HParam('MAXPOOL_SIZE', hp.Discrete(hp_ranges['MAXPOOL_SIZE'])))
    HPARAMS.append(hp.HParam('INIT_FILTERS', hp.Discrete(hp_ranges['INIT_FILTERS'])))
    HPARAMS.append(hp.HParam('FILTER_EXP_BASE', hp.IntInterval(hp_ranges['FILTER_EXP_BASE'][0], hp_ranges['FILTER_EXP_BASE'][1])))
    HPARAMS.append(hp.HParam('NODES_DENSE0', hp.Discrete(hp_ranges['NODES_DENSE0'])))
    HPARAMS.append(hp.HParam('CONV_BLOCKS', hp.IntInterval(hp_ranges['CONV_BLOCKS'][0], hp_ranges['CONV_BLOCKS'][1])))
    HPARAMS.append(hp.HParam('DROPOUT', hp.Discrete(hp_ranges['DROPOUT'])))
    HPARAMS.append(hp.HParam('LR', hp.RealInterval(hp_ranges['LR'][0], hp_ranges['LR'][1])))
    HPARAMS.append(hp.HParam('OPTIMIZER', hp.Discrete(hp_ranges['OPTIMIZER'])))
    HPARAMS.append(hp.HParam('L2_LAMBDA', hp.Discrete(hp_ranges['L2_LAMBDA'])))
    HPARAMS.append(hp.HParam('BATCH_SIZE', hp.Discrete(hp_ranges['BATCH_SIZE'])))
    HPARAMS.append(hp.HParam('IMB_STRATEGY', hp.Discrete(hp_ranges['IMB_STRATEGY'])))

    # Define test set metrics that we wish to log to TensorBoard for each training run
    HP_METRICS = [hp.Metric(metric, display_name='Test ' + metric) for metric in cfg['HP_SEARCH']['METRICS']]

    # Configure TensorBoard to log the results
    with tf.summary.create_file_writer(log_dir).as_default():
        hp.hparams_config(hparams=HPARAMS, metrics=HP_METRICS)

    # Complete a number of training runs at different hparam values and log the results.
    repeats_per_combo = cfg['HP_SEARCH']['REPEATS']   # Number of times to train the model per combination of hparams
    num_combos = cfg['HP_SEARCH']['COMBINATIONS']     # Number of random combinations of hparams to attempt
    num_sessions = num_combos * repeats_per_combo       # Total number of runs in this experiment
    model_type = 'DCNN_BINARY' if cfg['TRAIN']['CLASS_MODE'] == 'binary' else 'DCNN_MULTICLASS'
    trial_id = 0
    for group_idx in range(num_combos):
        rand = random.Random()
        HPARAMS = {h: h.domain.sample_uniform(rand) for h in HPARAMS}
        hparams = {h.name: HPARAMS[h] for h in HPARAMS}  # To pass to model definition
        for repeat_idx in range(repeats_per_combo):
            trial_id += 1
            print("Running training session %d/%d" % (trial_id, num_sessions))
            print("Hparam values: ", {h.name: HPARAMS[h] for h in HPARAMS})
            trial_logdir = os.path.join(log_dir, str(trial_id))     # Need specific logdir for each trial
            callbacks_hp = callbacks + [TensorBoard(log_dir=trial_logdir, profile_batch=0, write_graph=False)]

            # Set values of hyperparameters for this run in config file.
            for h in hparams:
                if h in ['LR', 'L2_LAMBDA']:
                    val = 10 ** hparams[h]      # These hyperparameters are sampled on the log scale.
                else:
                    val = hparams[h]
                cfg['NN'][model_type][h] = val

            # Set some hyperparameters that are not specified in model definition.
            cfg['TRAIN']['BATCH_SIZE'] = hparams['BATCH_SIZE']
            cfg['TRAIN']['IMB_STRATEGY'] = hparams['IMB_STRATEGY']

            # Run a training session and log the performance metrics on the test set to HParams dashboard in TensorBoard
            with tf.summary.create_file_writer(trial_logdir).as_default():
                hp.hparams(HPARAMS, trial_id=str(trial_id))
                model, test_metrics, test_generator = train_model(cfg, data, callbacks_hp, verbose=0)
                for metric in HP_METRICS:
                    if metric._tag in test_metrics:
                        tf.summary.scalar(metric._tag, test_metrics[metric._tag], step=1)   # Log test metric
    return
コード例 #22
0
y = df['stroke'].values
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.30,
                                                    random_state=101)

from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
print(np.bincount(y_train))

from tensorboard.plugins.hparams import api as hp
import os

HP_NUM_UNITS_L1 = hp.HParam('num_units_l1', hp.Discrete([32, 49, 79]))
HP_NUM_UNITS_L2 = hp.HParam('num_units_l2', hp.Discrete([32, 16, 8]))
HP_NUM_UNITS_L3 = hp.HParam('num_units_l3', hp.Discrete([8, 4]))
HP_BATCH_SIZE = hp.HParam('batch_size', hp.Discrete([250, 1000]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.4, 0.5))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd']))
HP_LOSS = hp.HParam('loss', hp.Discrete(['mse', 'binary_crossentropy']))

base_dir = os.path.join('logs', 'hparam_tuning',
                        datetime.now().strftime("%Y-%m-%d-%H%M"))
if not os.path.exists(base_dir):
    os.mkdir(base_dir)

from keras import backend as K

コード例 #23
0
ファイル: hparams.py プロジェクト: BingyuZhou/rl-sc2
from tensorboard.plugins.hparams import api as hp

HP_LR = hp.HParam("lr", hp.Discrete([3e-4]))
HP_CLIP = hp.HParam("clip", hp.Discrete([0.3]))
HP_CLIP_VALUE = hp.HParam("clip_value", hp.Discrete([0.0]))
HP_ENTROPY_COEF = hp.HParam("entropy_coef", hp.Discrete([1e-5]))
HP_GRADIENT_NORM = hp.HParam("gradient_norm", hp.Discrete([0.5]))
コード例 #24
0
    Flatten,
    MaxPooling2D,
)
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD, Adam, RMSprop

logging.getLogger().setLevel(logging.INFO)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)

HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10

HP_EPOCHS = hp.HParam("epochs", hp.IntInterval(1, 100))
HP_BATCH_SIZE = hp.HParam("batch-size", hp.Discrete([64, 128, 256, 512]))
HP_LR = hp.HParam("learning-rate", hp.RealInterval(0.0, 1.0))
HP_OPTIMIZER = hp.HParam("optimizer", hp.Discrete(["sgd", "adam", "rmsprop"]))

METRIC_ACCURACY = "accuracy"


def keras_model_fn(learning_rate, optimizer):
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               padding="same",
               name="inputs",
               input_shape=(HEIGHT, WIDTH, DEPTH)))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
コード例 #25
0
labels_test = data_test['labels']

# Scaling the pixel values of all images
print(
    '=============Scaling the pixel values of all images========================='
)
images_train = images_train / 255.0
images_val = images_val / 255.0
images_test = images_test / 255.0

# Defining constants
EPOCHS = 15
BATCH_SIZE = 64

# Defining the hyperparameters we would tune, and their values to be tested
HP_FILTER_SIZE = hp.HParam('filter_size', hp.Discrete([3, 5, 7]))
HP_FILTER_NUM = hp.HParam('filters_number', hp.Discrete([32, 64, 96, 128]))

METRIC_ACCURACY = 'accuracy'

# Logging setup info
with tf.summary.create_file_writer(
        r'logs/Model_0/hparam_tuning/').as_default():
    hp.hparams_config(
        hparams=[HP_FILTER_SIZE, HP_FILTER_NUM],
        metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
    )
# Wrapping our model and training in a function
print(
    '=============Wrapping our model and training in a function=================='
)
コード例 #26
0
ファイル: base.py プロジェクト: OwenMyers/z3support
    def __init__(self, settings_file, working_location):
        """
        Establishes the attributes needed for the run. Also checks that the required paths exist to store the models
        and tensorboard logs

        :param settings_file: path of the settings that contains paths to data and settings to determine search of
            hyper params.
        :param working_location: Consider this the root directory of the project. Paths to things like tensorboard logs
            are taken for this point. Everything you need regarding the output of your run can be found in this
            location.
        """

        # Make sure the required subdirectories are present
        logging.info(f"Working Location: {working_location}")
        assert os.path.exists(
            os.path.join(working_location, 'model_checkpoints'))
        assert os.path.exists(os.path.join(working_location, 'models'))
        assert os.path.exists(os.path.join(working_location, 'settings'))
        assert os.path.exists(os.path.join(working_location,
                                           'tensorboard_raw'))
        assert os.path.exists(os.path.join(working_location, 'study_data'))

        if not os.path.exists(settings_file):
            raise ValueError(
                f"Can't find specified settings file {settings_file}")
        self.config = configparser.ConfigParser()
        self.config.read(settings_file)

        self.timestamp = self.config['Settings']['timestamp']
        self.L = int(self.config['Settings']['L'])
        self.feature_map_start = int(
            self.config['Settings']['FEATURE_MAP_START'])
        self.epochs = int(self.config['Settings']['EPOCHS'])
        batch_sizes = self.parse_int_list_from_config(
            self.config['Settings']['BATCH_SIZES'])
        self.hp_batch_size = hp.HParam('batch_size', hp.Discrete(batch_sizes))
        n_layers = self.parse_int_list_from_config(
            self.config['Settings']['N_LAYERS'])
        self.hp_n_layers = hp.HParam('n_layers', hp.Discrete(n_layers))
        feature_map_steps = self.parse_int_list_from_config(
            self.config['Settings']['FEATURE_MAP_STEPS'])
        self.hp_feature_map_step = hp.HParam('feature_map_step',
                                             hp.Discrete(feature_map_steps))
        stride_sizes = self.parse_int_list_from_config(
            self.config['Settings']['STRIDE_SIZES'])
        self.hp_stride_size = hp.HParam('stride', hp.Discrete(stride_sizes))
        self.hp_use_batch_normalization = hp.HParam('use_batch_normalization',
                                                    hp.Discrete([1, 0]))
        self.hp_use_dropout = hp.HParam('use_dropout', hp.Discrete([1, 0]))
        # quick run of single param or full param sweep. Use True for testing.
        self.quick_run = False
        if 'true' in self.config['Settings']['QUICK_RUN'].lower():
            self.quick_run = True
        self.verbose = self.config['Settings']['VERBOSE']
        self.tensorboard_sub_dir = self.config['Settings'][
            'TENSORBOARD_SUB_DIR']
        self.checkpoint_file = os.path.join(
            working_location, 'model_checkpoints',
            'checkpoint_{}.hdf5'.format(self.config['Settings']['timestamp']))
        # We are saving the model as json so we have the proper structure to load the weights into. Weights come from
        # self.checkpoint_file
        self.checkpoint_json_file = os.path.join(
            working_location, 'model_checkpoints',
            'pickled_compiled_model_{}.pkl'.format(
                self.config['Settings']['timestamp']))
        self.best_model_file = os.path.join(
            working_location, 'models',
            'best_hyper_param_autoencoder_{}'.format(
                self.config['Settings']['timestamp']))
        self.best_activations_file = os.path.join(
            working_location, 'models',
            'best_hyper_param_activations_{}'.format(
                self.config['Settings']['timestamp']))
        self.study_data_location = os.path.join(
            working_location, 'study_data',
            self.config['Settings']['timestamp'])
        self.training_data_location = os.path.join(self.study_data_location,
                                                   'training_data.npy')
        self.testing_data_location = os.path.join(self.study_data_location,
                                                  'testing_data.npy')
        self.data_label_location = os.path.join(self.study_data_location,
                                                'data_labels.npy')
        if not os.path.exists(self.study_data_location):
            os.mkdir(self.study_data_location)
        # This will be a list of the different sources, e.g. path to transformed Z3 data, and path to transformed Z2
        # data.
        self.data = self.get_all_data_sources(self.config)
        self.checkpointer = ModelCheckpoint(
            filepath=self.checkpoint_file,
            monitor='val_loss',
            verbose=1,
            save_best_only=True,
            mode='auto',
            save_freq='epoch',
            # Want to use False but seems to be broken: https://github.com/tensorflow/tensorflow/issues/39679
            save_weights_only=True)
        self.run_location = working_location

        if self.quick_run:
            self.hp_batch_size = hp.HParam('batch_size', hp.Discrete([50]))
            self.hp_n_layers = hp.HParam('n_layers', hp.Discrete([3]))
            self.hp_feature_map_step = hp.HParam('feature_map_step',
                                                 hp.Discrete([16]))
            self.hp_stride_size = hp.HParam('stride', hp.Discrete([1]))
            self.tensorboard_sub_dir = 'quick_run'
images_val = images_val / 255.0
images_test = images_test / 255.0

# Defining constants
### Updated to 20 Epochs ###
EPOCHS = 20
BATCH_SIZE = 64

# Defining the hyperparameters we would tune, and their values to be tested
### For this model I am hardcoding the parameters that were the most successful from the last model run. ###
### Added HP_LAMBDA_REG so that we can change the L2 variable. ###
#HP_FILTER_SIZE_1 = hp.HParam('filter_size_1', hp.Discrete([3,5,7]))
#HP_FILTER_NUM = hp.HParam('filters_number', hp.Discrete([32,64,96,128]))
#HP_FILTER_SIZE_2 = hp.HParam('filter_size_2', hp.Discrete([3,5]))
#HP_DENSE_SIZE = hp.HParam('dense_size', hp.Discrete([256,512,1024]))
HP_FILTER_SIZE_1 = hp.HParam('filter_size_1', hp.Discrete([5]))
HP_FILTER_NUM = hp.HParam('filters_number', hp.Discrete([32]))
HP_FILTER_SIZE_2 = hp.HParam('filter_size_2', hp.Discrete([3]))
HP_DENSE_SIZE = hp.HParam('dense_size', hp.Discrete([256]))
HP_LAMBDA_REG = hp.HParam(
    'lambda',
    hp.Discrete([
        0.0, 1e-5, 3e-5, 7e-5, 9e-5, 1e-4, 3e-4, 5e-4, 7e-4, 9e-4, 1e-3, 3e-3,
        5e-3, 7e-3, 9e-3, 1e-2, 3e-2, 5e-2, 7e-2, 9e-2, 0.1
    ]))

METRIC_ACCURACY = 'accuracy'

# Logging setup info
with tf.summary.create_file_writer(
        r'logs/Model_7_L2Reg/hparam_tuning/').as_default():
コード例 #28
0
def main():
    """This function first setup the hyperparameters interested to tune, and calls the function run and
     train_test_model to train each combination and store all results to tensortboard"""

    # define all hyper-parameters interested to tune and all metrics interested to measure and store
    global HP_NUM_UNITS_DLl, HP_DROPOUT_DLS, HP_NUM_UNITS_DL2
    global HP_OPTIMIZER_PARAM, METRIC_TRAIN_ACCURACY, METRIC_VAL_ACCURACY, METRIC_TEST1_ACCURACY, METRIC_TEST2_ACCURACY, METRIC_TIME, HP_BATCH_SIZE, HP_RES
    global session_num, run_name

    HP_NUM_UNITS_DLl = hp.HParam('dl1_num_units', hp.Discrete([128]))
    HP_DROPOUT_DLS = hp.HParam('dl1&2_dropout', hp.Discrete([0.25]))
    HP_NUM_UNITS_DL2 = hp.HParam('dl2_num_units', hp.Discrete([64]))
    HP_BATCH_SIZE = hp.HParam('batch_size', hp.Discrete(batch_sizes_))
    HP_RES = hp.HParam('res', hp.Discrete(res_xys))

    HP_OPTIMIZER_PARAM = hp.HParam('optimizer parameter', hp.Discrete([0.001]))
    METRIC_TRAIN_ACCURACY = 'Train Accuracy'
    METRIC_VAL_ACCURACY = 'Validation Accuracy'
    METRIC_TEST1_ACCURACY = 'Test1 Accuracy'
    METRIC_TEST2_ACCURACY = 'Test2 Accuracy'

    METRIC_TIME = 'average seconds per epoch'

    # initialize all tunable hyper-parameters and saveable metrics on Tensorboard
    with tf.summary.create_file_writer('logs/hparam_tuning_DL_3').as_default():
        hp.hparams_config(hparams=[
            HP_NUM_UNITS_DLl, HP_DROPOUT_DLS, HP_NUM_UNITS_DL2, HP_BATCH_SIZE,
            HP_RES, HP_OPTIMIZER_PARAM
        ],
                          metrics=[
                              hp.Metric(METRIC_VAL_ACCURACY,
                                        display_name='Validation Accuracy'),
                              hp.Metric(
                                  METRIC_TIME,
                                  display_name='average seconds per epoch')
                          ])

    session_num = session_num_init

    # grid search of all possible unique combinations of hyper-parameters
    for a, b, c, d, e, f in product(HP_NUM_UNITS_DLl.domain.values,
                                    HP_DROPOUT_DLS.domain.values,
                                    HP_NUM_UNITS_DL2.domain.values,
                                    HP_OPTIMIZER_PARAM.domain.values,
                                    HP_BATCH_SIZE.domain.values,
                                    HP_RES.domain.values):
        hparams = {
            HP_NUM_UNITS_DLl: a,
            HP_DROPOUT_DLS: b,
            HP_NUM_UNITS_DL2: c,
            HP_OPTIMIZER_PARAM: d,
            HP_BATCH_SIZE: e,
            HP_RES: f
        }
        run_name = "run-%d" % session_num
        print('--- Starting trial: %s' % run_name)
        print({h.name: hparams[h] for h in hparams})
        run('logs/hparam_tuning_DL_3/' + run_name, hparams)
        session_num += 1

        # clear all unneeded memories
        gc.collect()
コード例 #29
0
- HP_EPOCHS
- HP_DENSE_LAYER
- HP_DROPOUT

Visualize the results on tensorboard'''

import tensorflow as tf
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras import optimizers
from tensorboard.plugins.hparams import api as hp
from input_pipeline.preprocessing import train_generator, val_generator, N_img_height, N_img_width
from evaluation.metrics import test_images_list, test_labels

#Define the hyperparameters
HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([256, 512]))
HP_DROPOUT = hp.HParam('dropout', hp.Discrete([0.3, 0.4, 0.5]))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd']))
HP_EPOCHS = hp.HParam('epochs', hp.Discrete([100, 150, 200]))

METRIC_ACCURACY = 'accuracy'

path_hparams = input('Enter the path to save the tuning logs: ')

with tf.summary.create_file_writer(path_hparams +
                                   'logs/hparam_tuning').as_default():
    hp.hparams_config(
        hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER, HP_EPOCHS],
        metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
    )
コード例 #30
0
ファイル: 78-gpu-2.py プロジェクト: alamkanak/EEG-Correlation
for wt in df_wt:
    img = np.array(wt.values)
    img = resize(img, (160, 160))
    x.append(img.reshape(img.shape[0], img.shape[1], 1))

x = np.array(x)
y = cat.values

#%%
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.25,
                                                    random_state=28)

#%%
HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([128, 256, 512]))
HP_DROPOUT = hp.HParam('dropout', hp.Discrete([0.3]))
HP_LEARNING_RATE = hp.HParam('learning_rate', hp.Discrete([0.0001, 0.00001]))
HP_CNN_FILTER_1 = hp.HParam('filter_1', hp.Discrete([16, 64, 256]))
HP_CNN_FILTER_2 = hp.HParam('filter_2', hp.Discrete([16, 64, 256]))
HP_BATCH_NORM = hp.HParam('batch_norm', hp.Discrete([False, True]))
HP_CNN_KERNEL_X_1 = hp.HParam('kernel_1_x', hp.Discrete([80, 30, 5]))
HP_CNN_KERNEL_Y_1 = hp.HParam('kernel_1_y', hp.Discrete([80, 30, 5]))
HP_CNN_KERNEL_X_2 = hp.HParam('kernel_2_x', hp.Discrete([80, 30, 5]))
HP_CNN_KERNEL_Y_2 = hp.HParam('kernel_2_y', hp.Discrete([80, 30, 5]))

with tf.summary.create_file_writer('logs/78-hparam-tuning-v3').as_default():
    hp.hparams_config(
        hparams=[
            HP_NUM_UNITS, HP_DROPOUT, HP_LEARNING_RATE, HP_CNN_KERNEL_X_1,
            HP_CNN_KERNEL_Y_1, HP_CNN_KERNEL_X_2, HP_CNN_KERNEL_Y_2,