Exemple #1
0
    def write_parameters_table_to_tensorboard(self):
        """
        Write the summaries, static and dynamic hyperparameters to the table in tensorboard's hparams section. This
        method is called once for creating the hparams table.
        """
        # Check if needed to track hyperparameters:
        if (len(self._static_hyperparameters) == 0
                and len(self._dynamic_hyperparameters) == 0):
            return

        # Prepare the static hyperparameters values:
        non_graph_parameters = {"Date": str(datetime.now()).split(".")[0]}
        hp_param_list = [hp_api.HParam("Date")]
        for parameter, value in self._static_hyperparameters.items():
            non_graph_parameters[parameter] = value
            hp_param_list.append(hp_api.HParam(parameter))

        # Prepare the summaries values and the dynamic hyperparameters values (both registered as metrics):
        graph_parameters = {}
        hp_metric_list = []
        for metric in self._training_results:
            for prefix in ["training", "validation"]:
                metric_name = f"{self._Sections.SUMMARY}/{prefix}_{metric}"
                graph_parameters[metric_name] = 0.0
                hp_metric_list.append(hp_api.Metric(metric_name))
        for parameter, epochs in self._dynamic_hyperparameters.items():
            parameter_name = f"{self._Sections.HYPERPARAMETERS}/{parameter}"
            graph_parameters[parameter_name] = epochs[-1]
            hp_metric_list.append(hp_api.Metric(parameter_name))

        # Write the hyperparameters and summaries to the table:
        with self._file_writer.as_default():
            hp_api.hparams_config(hparams=hp_param_list,
                                  metrics=hp_metric_list)
            hp_api.hparams(non_graph_parameters, trial_id=self._run_name)
Exemple #2
0
def _write_hparams_config(log_dir, searchspace):
    HPARAMS = _create_hparams_config(searchspace)
    METRICS = [
        hp.Metric(
            "epoch_accuracy",
            group="validation",
            display_name="accuracy (val.)",
        ),
        hp.Metric(
            "epoch_loss",
            group="validation",
            display_name="loss (val.)",
        ),
        hp.Metric(
            "epoch_accuracy",
            group="train",
            display_name="accuracy (train)",
        ),
        hp.Metric(
            "epoch_loss",
            group="train",
            display_name="loss (train)",
        ),
    ]

    with tf.summary.create_file_writer(log_dir).as_default():
        hp.hparams_config(hparams=HPARAMS, metrics=METRICS)
Exemple #3
0
def log_hyperparameters():
    """

    Blueprint for hyperparameter and metric logging in tensorboard during hyperparameter tuning

    Returns:
        logparams (list): List containing the hyperparameters to log in tensorboard.
        metrics (list): List containing the metrics to log in tensorboard.

    """

    logparams = [
        hp.HParam(
            "latent_dim",
            hp.Discrete([2, 4, 6, 8, 12, 16]),
            display_name="latent_dim",
            description="encoding size dimensionality",
        ),
        hp.HParam(
            "n_components",
            hp.IntInterval(min_value=1, max_value=25),
            display_name="n_components",
            description="latent component number",
        ),
        hp.HParam(
            "gram_weight",
            hp.RealInterval(min_value=0.0, max_value=1.0),
            display_name="gram_weight",
            description="weight of the gram loss",
        ),
    ]

    metrics = [
        hp.Metric(
            "val_number_of_populated_clusters",
            display_name="number of populated clusters",
        ),
        hp.Metric(
            "val_reconstruction_loss",
            display_name="reconstruction loss",
        ),
        hp.Metric(
            "val_gram_loss",
            display_name="gram loss",
        ),
        hp.Metric(
            "val_vq_loss",
            display_name="vq loss",
        ),
        hp.Metric(
            "val_total_loss",
            display_name="total loss",
        ),
    ]

    return logparams, metrics
Exemple #4
0
    def on_test_begin(self, logs=None):
        eval_writer = self._get_writer(self._validation_run_name)
        with eval_writer.as_default():
            hp.hparams_config(
                hparams=[hp.HParam(k) for k, v in self.model.hparams.items()],
                metrics=[hp.Metric("accuracy"),
                         hp.Metric("f1_score")],
            )

        return super(TensorBoard, self).on_train_begin(logs=logs)
Exemple #5
0
def run(hparams, logdir):
    with tf.summary.create_file_writer(logdir).as_default():
        hp.hparams_config(
            hparams=[HP_HIDDEN, HP_EPOCHS, HP_LEARNING_RATE],
            metrics=[
                hp.Metric('mean_squared_error', display_name='mse'),
                hp.Metric('r2', display_name='r2')
            ],
        )
        mse, r2 = train_test_model(hparams, logdir)
        tf.summary.scalar('mean_squared_error', mse, step=1)
        tf.summary.scalar('r2', r2, step=1)
def main(args):
    del args

    tv, vv = FLAGS.tv, FLAGS.vv

    data_version = f'tv{tv}-vv{vv}'
    log_dir = 'tensorboard/hparam'
    with tf.summary.create_file_writer(log_dir).as_default():
        hp.hparams_config(
            hparams=[
                HP_BATCH_SIZE, HP_OPTIMIZER, HP_LEARNING_RATE, HP_NUM_LAYERS
            ],
            metrics=[
                hp.Metric(METRIC_LOSS, display_name='Entropy Loss'),
                hp.Metric(METRIC_MAE, display_name='MAE')
            ],
        )

    for num_layers in HP_NUM_LAYERS.domain.values:
        for batch_size in HP_BATCH_SIZE.domain.values:
            for optimizer in HP_OPTIMIZER.domain.values:
                for learning_rate in HP_LEARNING_RATE.domain.values:
                    trial_version = f'bs{batch_size}-{optimizer}-lr{learning_rate}-nl{num_layers}'
                    hparam_flags = [
                        '--num_layers',
                        str(num_layers),
                        '--batch_size',
                        str(batch_size),
                        '--optimizer',
                        str(optimizer),
                        '--learning_rate',
                        str(learning_rate),
                        '--tv',
                        str(tv),
                        '--vv',
                        str(vv),
                    ]
                    trial_dir = f'{log_dir}/{data_version}/{trial_version}'
                    trial_id = f'{data_version}-{trial_version}'
                    output = run(hparam_flags)
                    val_loss = output['val_loss']
                    val_mae = output['val_mae']
                    hparams = {
                        'num_layers': num_layers,
                        'batch_size': batch_size,
                        'optimizer': optimizer,
                        'learning_rate': learning_rate,
                    }
                    with tf.summary.create_file_writer(trial_dir).as_default():
                        hp.hparams(hparams, trial_id)
                        tf.summary.scalar(METRIC_LOSS, val_loss, step=1)
                        tf.summary.scalar(METRIC_MAE, val_mae, step=1)
def hparams_combinations(hparams):
    hp.hparams_config(hparams=list(hparams.values()),
                      metrics=[
                          hp.Metric('accuracy', display_name='Accuracy'),
                          hp.Metric('recall', display_name='Recall'),
                          hp.Metric('precision', display_name='Precision'),
                      ])
    hparams_keys = list(hparams.keys())
    hparams_values = list(product(*[h.domain.values
                                    for h in hparams.values()]))
    hparams = [dict(zip(hparams_keys, values)) for values in hparams_values]
    shuffle(hparams)
    return hparams
Exemple #8
0
def hparam_tuning(configs):
    """used to tune hyperparameters with tensorboard's hparam module
    currently uses the following:

    LATENT_DIMS = hp.HParam('latent_dim', hp.Discrete([2, 3]))
    RECON_WEIGHTS = hp.HParam('reconstruction_weight',
                              hp.RealInterval(0.9, 0.99))
    INTERMEDIATE_DIM = hp.HParam('intermediate_dim', hp.Discrete([12, 24, 48]))

    metrics displayed are:
        - average reconstruction loss over sample
        - average accuracy of latent space clustering with svm


    Args:
        configs (dict): training configs.

    Returns:
        None.

    """
    # hyperparameters to tune
    logdir = "tensorboard_logs/aae/" + datetime.now().strftime("%Y%m%d-%H%M%S")

    with tf.summary.create_file_writer(logdir + "/hparam_tuning").as_default():
        hp.hparams_config(
            hparams=[LATENT_DIMS, RECON_WEIGHTS, INTERMEDIATE_DIM],
            metrics=[
                hp.Metric("acc", display_name='accuracy score'),
                hp.Metric("mse", display_name='reconstruction error')
            ],
        )

    session_num = 0

    for intermediate_dim in INTERMEDIATE_DIM.domain.values:
        for recon_weights in np.arange(RECON_WEIGHTS.domain.min_value,
                                       RECON_WEIGHTS.domain.max_value, 0.05):
            for latent_dims in LATENT_DIMS.domain.values:
                hparams = {
                    INTERMEDIATE_DIM: intermediate_dim,
                    RECON_WEIGHTS: recon_weights,
                    LATENT_DIMS: latent_dims,
                }
                run_name = "run-%d" % session_num
                print('--- Starting trial: %s' % run_name)
                print({h.name: hparams[h] for h in hparams})
                run(logdir + '/hparam_tuning/' + run_name, configs, hparams)
                session_num += 1
Exemple #9
0
def hp_tuning(estimator_input, input_width, model_path):

    HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([128, 256, 512, 1024]))
    HP_DROPOUT = hp.HParam('dropout', hp.Discrete([0.0, 0.05, 0.1, 0.5]))
    HP_LEARNING_RATE = [0.00001, 0.0005, 0.001]
    HP_ACTIVATION = hp.HParam('activation', hp.Discrete(['relu', 'tanh']))

    METRIC_ACCURACY = 'accuracy'

    with tf.summary.create_file_writer(model_path + '/logs/').as_default():
        hp.hparams_config(
            hparams=[HP_NUM_UNITS, HP_ACTIVATION],
            metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
        )
    session_num = 0

    for num_units in HP_NUM_UNITS.domain.values:
        for dropout_rate in HP_DROPOUT.domain.values:
            for activation in HP_ACTIVATION.domain.values:
                for learning_rate in HP_LEARNING_RATE:
                    hparams = {
                        'num_units': num_units,
                        'dropout': dropout_rate,
                        'activation': activation,
                        'learning_rate': learning_rate
                    }
                    run_name = id_from_hp(hparams)
                    print('--- Starting trial: %s' % run_name)
                    print({k: v for k, v in hparams.items()})
                    run(model_path + '/logs/' + run_name, hparams,
                        estimator_input, input_width)
                    session_num += 1
    def tuning(self):
        self.HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([3, 6]))
        self.HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.2))
        self.HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam',
                                                                'sgd']))

        self.METRIC_ACCURACY = 'accuracy'

        with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
            hp.hparams_config(
                hparams=[
                    self.HP_NUM_UNITS, self.HP_DROPOUT, self.HP_OPTIMIZER
                ],
                metrics=[
                    hp.Metric(self.METRIC_ACCURACY, display_name='Accuracy')
                ],
            )
        session_num = 0

        for num_units in self.HP_NUM_UNITS.domain.values:
            for dropout_rate in (self.HP_DROPOUT.domain.min_value,
                                 self.HP_DROPOUT.domain.max_value):
                for optimizer in self.HP_OPTIMIZER.domain.values:
                    hparams = {
                        self.HP_NUM_UNITS: num_units,
                        self.HP_DROPOUT: dropout_rate,
                        self.HP_OPTIMIZER: optimizer,
                    }
                    run_name = "run-%d" % session_num
                    print('--- Starting trial: %s' % run_name)
                    print({h.name: hparams[h] for h in hparams})
                    self.run('logs/hparam_tuning/' + run_name, hparams)
                    session_num += 1
 def hyperparameter_setup(self):
     tf.summary.trace_on(graph=True, profiler=True)
     with tf.summary.create_file_writer(self.log_dir).as_default():
         hp.hparams_config(
             hparams=[HP_NH2, HP_LAMBDA],
             metrics=[hp.Metric(METRIC_MSE, display_name='mse')],
         )
Exemple #12
0
    def __init__(self, logdir: str,
                 hparams: Dict[str, Union[Tuple[float, float],
                                          List]], metrics: Dict[str, str]):
        self._hparams = []
        for name, param in hparams.items():
            if isinstance(param, Tuple):
                min, max = param
                if isinstance(min, float):
                    self._hparams.append(
                        hp.HParam(
                            name, hp.RealInterval(min_value=min,
                                                  max_value=max)))
                elif isinstance(min, int):
                    self._hparams.append(
                        hp.HParam(name,
                                  hp.IntInterval(min_value=min,
                                                 max_value=max)))
            elif isinstance(param, List):
                self._hparams.append(hp.HParam(name, hp.Discrete(param)))

        self._metrics = metrics
        self._writer = tf.summary.create_file_writer(logdir=logdir)
        with self._writer.as_default():
            hp.hparams_config(
                hparams=self._hparams,
                metrics=[
                    hp.Metric(name, display_name=display)
                    for name, display in metrics.items()
                ],
            )
Exemple #13
0
    def configure_hparams(self, hp_val, metrics):
        log.info(f"Configure hyper-params on session: {self.sess_num}")
        log.info(hp_val)

        with self.trace_writer.as_default():
            hp.hparams_config(
                hparams=[hparam.tf_hparam for hparam in self.hp_params.values()],
                metrics=[
                    hp.Metric(
                        f"{prefix}{eval_func}", display_name=f"{prefix}{eval_func}"
                    )
                    for prefix, eval_func in list(
                        itertools.product(
                            *list([["TRAIN_", "VALID_", "TEST_"], metrics])
                        )
                    )
                ],
            )
            hp.hparams(
                dict(
                    (
                        self.hp_params[param_name].tf_hparam,
                        param_val
                        if not self.hp_params[param_name].advanced_dtype
                        else str(param_val),
                    )
                    for param_name, param_val in hp_val.items()
                )
            )
Exemple #14
0
def hypertune_model():
    """
    This function uses teh previous functions to iterate through a set of parameters and run all possible combinations.
    ----------
    Parameters:
    NONE    
    ----------
    """
    with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
        hp.hparams_config(
            hparams=[
                HP_NUM_CONV_UNITS, HP_NUM_LSTM_UNITS, HP_NUM_DENSE_UNITS,
                HP_DROPOUT
            ],
            metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')])
    session_num = 86
    for num_conv_units in HP_NUM_CONV_UNITS.domain.values:
        for num_dense_units in HP_NUM_DENSE_UNITS.domain.values:
            for num_lstm_units in HP_NUM_LSTM_UNITS.domain.values:
                for dropout_rate in (HP_DROPOUT.domain.min_value,
                                     HP_DROPOUT.domain.max_value):
                    hparams = {
                        HP_NUM_CONV_UNITS: num_conv_units,
                        HP_NUM_DENSE_UNITS: num_dense_units,
                        HP_NUM_LSTM_UNITS: num_lstm_units,
                        HP_DROPOUT: dropout_rate,
                    }
                    run_name = "run-%d" % session_num
                    print('--- Starting trial: %s' % run_name)
                    print({h.name: hparams[h] for h in hparams})
                    run('logs/hparam_tuning/' + run_name, hparams)
                    session_num += 1
Exemple #15
0
def run_all(train_dataset, val_dataset, logdir, verbose=False):
    """Perform random search over the hyperparameter space.
    Arguments:
      logdir: The top-level directory into which to write data. This
        directory should be empty or nonexistent.
      verbose: If true, print out each run's name as it begins.
    """
    rng = random.Random(0)

    with tf.summary.create_file_writer(logdir).as_default():
        hp.hparams_config(
            HPARAMS,
            metrics=[
                hp.Metric(
                    "batch_loss",
                    group="train",
                    display_name="loss (train)",
                ),
                hp.Metric(METRIC_LOSS,
                          group='validation',
                          display_name='Val loss')
            ],
        )

    sessions_per_group = 1
    num_sessions = 30 * sessions_per_group
    session_index = 0  # across all session groups
    for group_index in xrange(30):
        hparams = {h: h.domain.sample_uniform(rng) for h in HPARAMS}
        hparams_string = str(hparams)
        for repeat_index in xrange(sessions_per_group):
            session_id = str(session_index)
            session_index += 1
            if verbose:
                print("--- Running training session %d/%d" %
                      (session_index, num_sessions))
                print(hparams_string)
                print("--- repeat #: %d" % (repeat_index + 1))
            run(
                train_dataset,
                val_dataset,
                base_logdir=logdir,
                session_id=session_id,
                hparams=hparams,
            )
Exemple #16
0
    def __init__(self,
                 num_session_groups=10,
                 HP_CONV_LAYERS=hp.HParam("conv_layers", hp.IntInterval(1, 3)),
                 HP_CONV_KERNEL_SIZE=hp.HParam("conv_kernel_size", hp.Discrete([3, 5])),
                 HP_DENSE_LAYERS=hp.HParam("dense_layers", hp.IntInterval(1, 3)),
                 HP_DROPOUT=hp.HParam("dropout", hp.RealInterval(0.1, 0.4)),
                 HP_OPTIMIZER=hp.HParam("optimizer", hp.Discrete(["adam", "adagrad"]))):
        self.HP_CONV_LAYERS = HP_CONV_LAYERS
        self.HP_CONV_KERNEL_SIZE = HP_CONV_KERNEL_SIZE
        self.HP_DENSE_LAYERS = HP_DENSE_LAYERS
        self.HP_DROPOUT = HP_DROPOUT
        self.HP_OPTIMIZER = HP_OPTIMIZER
        self.num_session_groups = num_session_groups

        self.HPARAMS = [
            HP_CONV_LAYERS,
            HP_CONV_KERNEL_SIZE,
            HP_DENSE_LAYERS,
            HP_DROPOUT,
            HP_OPTIMIZER,
        ]

        self.METRICS = [
            hp.Metric(
                "epoch_accuracy",
                group="train",
                display_name="accuracy (train)",
            ),
            hp.Metric(
                "epoch_loss",
                group="train",
                display_name="loss (train)",
            ),
            hp.Metric(
                "epoch_accuracy",
                group="validation",
                display_name="accuracy (val.)",
            ),
            hp.Metric(
                "epoch_loss",
                group="validation",
                display_name="loss (val.)",
            )
        ]
Exemple #17
0
    def evaluate(self, avpool=True):
        predictions = self._models["full"].predict(self._val_ds)
        num_categories = len(self.categories)

        for i in range(num_categories):
            category = self.categories[i]
            preds = predictions[:, i]
            y_true = self._val_labels[:, i]

            acc = np.mean(y_true == (preds >= 0.5).astype(int))

            auc = roc_auc_score(y_true, preds)
            self._record_scalars(**{
                f"val_accuracy_{category}": acc,
                f"val_auc_{category}": auc
            },
                                 metric=True)

        # choose the hyperparameters to record
        if not hasattr(self, "_hparams_config"):
            from tensorboard.plugins.hparams import api as hp

            metrics = []
            for c in self.categories:
                metrics.append(f"val_accuracy_{c}")
                metrics.append(f"val_auc_{c}")

            hparams = {
                hp.HParam("lam", hp.RealInterval(0., 10000.)):
                self.config["lam"],
                hp.HParam("tau", hp.RealInterval(0., 10000.)):
                self.config["tau"],
                hp.HParam("mu", hp.RealInterval(0., 10000.)):
                self.config["mu"],
                hp.HParam("batch_size", hp.RealInterval(0., 10000.)):
                self.input_config["batch_size"],
                hp.HParam("lr", hp.RealInterval(0., 10000.)):
                self.config["lr"],
                hp.HParam("lr_decay", hp.RealInterval(0., 10000.)):
                self.config["lr_decay"],
                hp.HParam("decay_type",
                          hp.Discrete(["cosine", "exponential", "staircase"])):
                self.config["decay_type"],
                hp.HParam("opt_type", hp.Discrete(["sgd", "adam", "momentum"])):
                self.config["opt_type"],
                hp.HParam("weight_decay", hp.RealInterval(0., 10000.)):
                self.config["weight_decay"]
            }

            self._hparams_config = hp.hparams_config(
                hparams=list(hparams.keys()),
                metrics=[hp.Metric(m) for m in metrics])
            # record hyperparameters
            base_dir, run_name = os.path.split(self.logdir)
            if len(run_name) == 0:
                base_dir, run_name = os.path.split(base_dir)
Exemple #18
0
    def _init_dir(self):
        try:
            os.mkdir(self.save_dir)
        except FileNotFoundError:
            print(
                'Unable to create save_dir directory. Ensure that all but the last directory in the path already exist.'
            )
            raise
        except FileExistsError:
            print('Directory already exists.')
            if self.overwrite:
                print('Overwriting directory.')
                shutil.rmtree(self.save_dir[:-1])
                os.mkdir(self.save_dir)
            else:
                print('Generating new directory.')
                v = 2
                new_path = self.save_dir
                while os.path.exists(new_path):
                    new_path = self.save_dir[:-1] + '_v' + str(v) + '/'
                    v += 1
                self.save_dir = new_path
                os.mkdir(self.save_dir)
                print('New directory:\n{}'.format(self.save_dir))

        assert self.save_dir[-1] == '/', \
                'Last character of self.save_dir must be \'/\', but is \'{}\''.format(self.save_dir[-1])

        with tf.summary.create_file_writer(self.save_dir).as_default():
            hp.hparams_config(
                hparams=self.hparams,
                metrics=[
                    hp.Metric(self.METRIC_TEST_ACCURACY,
                              display_name='Test Accuracy'),
                    hp.Metric(self.METRIC_TEST_ACCURACY_UNCERTAINTY,
                              display_name='Test Accuracy Uncertainty'),
                    hp.Metric(self.METRIC_TRAINING_LOSS,
                              display_name='Training Loss'),
                    hp.Metric(self.METRIC_VALIDATION_ACCURACY,
                              display_name='Validation Accuracy')
                ])
Exemple #19
0
    def definitions_tensorflow():
        metrics = [hp.Metric('tp'),
                   hp.Metric('fp'),
                   hp.Metric('tn'),
                   hp.Metric('fn'),
                   hp.Metric('precision'),
                   hp.Metric('recall'),
                   hp.Metric('auc')]

        return metrics
Exemple #20
0
def hparams_combinations(hparams, metrics):
    hp.hparams_config(hparams=list(hparams.values()),
                      metrics=[
                          hp.Metric(metric, display_name=metric.capitalize())
                          for metric in metrics
                      ])
    hparams_keys = list(hparams.keys())
    hparams_values = list(product(*[h.domain.values
                                    for h in hparams.values()]))
    hparams = [dict(zip(hparams_keys, values)) for values in hparams_values]
    shuffle(hparams)
    return hparams
def train():

    run_experiment_fn = build_experiment_fn()

    log_hparams_path = os.path.join(FLAGS.tb_log_dir, 'hparam_tuning')

    with tf.summary.create_file_writer(log_hparams_path).as_default():
        hp.hparams_config(hparams=[
            HP_TOKEN_TYPE, HP_VOCAB_SIZE, HP_EMBED_SIZE, HP_MEMORY_SIZE,
            HP_MEMORY_HOPS, HP_BATCH_SIZE, HP_EPOCHS, HP_LEARNING_RATE
        ],
                          metrics=[
                              hp.Metric(METRIC_ACCURACY,
                                        display_name='Accuracy'),
                              hp.Metric(METRIC_LOSS, display_name='Loss')
                          ])

    session_num = 0

    for token_type in HP_TOKEN_TYPE.domain.values:
        for vocab_size in HP_VOCAB_SIZE.domain.values:
            for embedding_size in HP_EMBED_SIZE.domain.values:
                for memory_size in HP_MEMORY_SIZE.domain.values:
                    for memory_hops in HP_MEMORY_HOPS.domain.values:
                        for batch_size in HP_BATCH_SIZE.domain.values:
                            for epochs in HP_EPOCHS.domain.values:
                                for learning_rate in HP_LEARNING_RATE.domain.values:
                                    run_experiment_fn(
                                        session_num, {
                                            HP_TOKEN_TYPE: token_type,
                                            HP_MEMORY_SIZE: memory_size,
                                            HP_VOCAB_SIZE: vocab_size,
                                            HP_BATCH_SIZE: batch_size,
                                            HP_EMBED_SIZE: embedding_size,
                                            HP_MEMORY_HOPS: memory_hops,
                                            HP_LEARNING_RATE: learning_rate,
                                            HP_EPOCHS: epochs
                                        })
                                    session_num += 1
def init(hp_structure, hp_dropout, hp_optimizer, hp_learningrate, hp_l2,
         hp_activation, BATCH_SIZES, EPOCH):
    HP_STRUCTURE = hp_structure
    HP_DROPOUT = hp_dropout
    HP_OPTIMIZER = hp_optimizer
    HP_LEARNINGRATE = hp_learningrate
    HP_L2 = hp_l2
    HP_ACTIVATION = hp_activation
    batch_sizes = BATCH_SIZES
    epoch = EPOCH

    with tf.summary.create_file_writer(log_dir).as_default():
        hp.hparams_config(
            hparams=[
                hp_structure, hp_dropout, hp_optimizer, HP_MOMENTUM,
                hp_learningrate, hp_l2, hp_activation, HP_AUGMENTATION
            ],
            metrics=[
                hp.Metric(METRIC_ACCURACY, display_name='Accuracy_test'),
                hp.Metric(METRIC_LOSS, display_name='loss_test')
            ],
        )
 def define_hparams(self, config):
     self.hparams_def = {}
     hparams = [
         self.hparam_from_config_space_item(c) for c in config['space']
     ]
     metrics = [hp.Metric('loss', display_name='loss')]
     with tf.summary.create_file_writer(self.log_dir +
                                        '/hparam_tuning').as_default():
         hp.hparams_config(
             hparams=hparams,
             metrics=metrics,
         )
     return self.hparams_def
Exemple #24
0
def tune_hparams(epochs, train_dataset, val_dataset, HPARAMS, logdir):
    with tf.summary.create_file_writer(str(logdir)).as_default():
        hp.hparams_config(
            hparams=HPARAMS,
            metrics=[hp.Metric('train_loss', display_name='TrainMSELoss'),
                     hp.Metric('val_loss', display_name='ValMSELoss')]
        )
    with open(logdir / 'config.txt', 'w') as f:
        content = "\n".join([param.name + ":" + str(param.domain.values) for param in HPARAMS])
        f.write(content)

    session_num = 0
    import itertools
    domains = [param.domain.values for param in HPARAMS]
    for assignment in itertools.product(*domains):
        hparams = {param: val for param, val in zip(HPARAMS, assignment)}
        print(f'--- Starting trial: run-{session_num}')
        print({h.name: hparams[h] for h in hparams})
        model, outdir = train_once(epochs=epochs, train_dataset=train_dataset, val_dataset=val_dataset,
                                   hparams=hparams, base_log_dir=logdir)
        evaluate.test_on_track(model, outdir)
        session_num += 1
Exemple #25
0
def init_hparams(hparams, FLAGS):

    hparam_dir = os.path.join(FLAGS.artifacts_dir, 'tblogs')
    os.makedirs(hparam_dir, exist_ok=True)

    metric = hp.Metric('sparse_categorical_accuracy', display_name='acc')

    with tf.summary.create_file_writer(hparam_dir).as_default():
        hp.hparams_config(
            hparams=hparams,
            metrics=[metric],
        )

    return hparam_dir
    def _initialization(self, study: optuna.Study) -> None:
        completed_trials = [
            trial for trial in study.get_trials(deepcopy=False)
            if trial.state == optuna.trial.TrialState.COMPLETE
        ]
        for trial in completed_trials:
            self._add_distributions(trial.distributions)

        with self._writer.as_default():
            hp.hparams_config(hparams=list(self._hp_params.values()),
                              metrics=[
                                  hp.Metric(self._metric_name,
                                            display_name=self._metric_name)
                              ])
Exemple #27
0
def hparam_tuning(train_dataset: tf.data.Dataset, val_dataset: tf.data.Dataset,
                  epochs: int, log_dir: str):
    """Performs hyperparameter tuning.

    Args:
        train_dataset: A `tf.data` dataset. Should return a tuple
            of `(inputs, labels)`.
        val_dataset: A `tf.data` dataset on which to evaluate
            the loss and any metrics at the end of each epoch.
            Should return a tuple of `(inputs, labels)`.
        epochs: Number of epochs to train the model.
        log_dir: The directory where logs will be written.
    """

    HP_DROPOUT = hp.HParam('dropout', hp.Discrete([0.1, 0.2, 0.5]))
    HP_LEARNING_RATE = hp.HParam('learning_rate',
                                 hp.Discrete([1e-3, 1e-4, 1e-5]))
    HP_OPTIMIZER = hp.HParam('optimizer',
                             hp.Discrete(['sgd', 'rmsprop', 'adam']))

    METRIC_ACCURACY = 'accuracy'

    with tf.summary.create_file_writer(log_dir).as_default():
        hp.hparams_config(
            hparams=[HP_DROPOUT, HP_LEARNING_RATE, HP_OPTIMIZER],
            metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')])

    trial_step = 0

    for dropout in HP_DROPOUT.domain.values:
        for learning_rate in HP_LEARNING_RATE.domain.values:
            for optimizer in HP_OPTIMIZER.domain.values:
                hparams = {
                    HP_DROPOUT: dropout,
                    HP_LEARNING_RATE: learning_rate,
                    HP_OPTIMIZER: optimizer,
                }

                trial_id = f'run-{trial_step}'
                trial_dir = os.path.join(log_dir, trial_id)
                logging.info(f'--- Starting trial: {trial_id}')
                logging.info({h.name: hparams[h] for h in hparams})

                accuracy = train_keras(train_dataset, val_dataset, dropout,
                                       learning_rate, optimizer, epochs,
                                       trial_dir, hparams)
                write_trial_log(METRIC_ACCURACY, accuracy, hparams, trial_dir)

                trial_step += 1
Exemple #28
0
 def __init__(self, log_dir, ckpt_dir, initial_model_path, custom_model_fn, train_ds, val_ds, metric):
     self.log_dir = log_dir
     self.ckpt_dir = ckpt_dir
     make_directory(log_dir)
     make_directory(ckpt_dir)
     self.metric = metric
     self.initial_model_path = initial_model_path
     self.train_ds = train_ds
     self.val_ds = val_ds
     self.custom_model_fn = custom_model_fn
     with tf.summary.create_file_writer(log_dir).as_default():
         hp.hparams_config(
         hparams=make_hparam_list(),
         metrics=[hp.Metric(metric, display_name=metric)],
       )
Exemple #29
0
def create_hparams_callback(log_dir, opt_metric, hparams, args):
    """
    Set up Hprams plugin config and callback for Tensorboard
    """
    hparams_dir = os.path.join(log_dir, 'validation')
    opt_metric = 'epoch_' + opt_metric

    # Hparams callback to log the hyperparameter values
    with tf.summary.create_file_writer(hparams_dir).as_default():
        hp.hparams_config(hparams=[hp.HParam(hparam) for hparam in hparams],
                          metrics=[hp.Metric(opt_metric)])
    hparams_cb = hp.KerasCallback(
        writer=hparams_dir,
        hparams={hparam: args[hparam]
                 for hparam in hparams})
    return hparams_cb
Exemple #30
0
    def search(self, data_zip, groups, folds=5, positive_weight=1.0, epochs=40):
        hparam_tuning_fn = f"{self.run_dir_prefix}/hparam_tuning"
        with tf.summary.create_file_writer(hparam_tuning_fn).as_default():
            hp.hparams_config(
                hparams=list(self.hparams_domains.values()),
                metrics=[hp.Metric("auc", display_name='Average AUC')]
            )
        num = 0
        best_auc = 0
        best_params = None
        for run_num, hparam_run in enumerate(self.get_hparams_combinations()):
            auc = self.run_log_cv(hparam_run, run_num,
                            data_zip, groups, folds, positive_weight, epochs)
            if auc > best_auc:
                best_params = hparam_run
                best_auc = auc

        logging.info("[Hyperparameter Detective] Best AVG AUC is %s with hparams %s", best_auc, best_params)