コード例 #1
0
def test_keras_pruning_callback_monitor_is_invalid() -> None:

    study = optuna.create_study(pruner=DeterministicPruner(True))
    trial = study.ask()
    callback = KerasPruningCallback(trial, "InvalidMonitor")

    with pytest.warns(UserWarning):
        callback.on_epoch_end(0, {"loss": 1.0})
コード例 #2
0
def objective(trial):
    # Clear clutter from previous session graphs.
    keras.backend.clear_session()

    # The data is split between train and validation sets.
    (x_train, y_train), (x_valid, y_valid) = mnist.load_data()
    x_train = x_train.reshape(60000,
                              784)[:N_TRAIN_EXAMPLES].astype("float32") / 255
    x_valid = x_valid.reshape(10000,
                              784)[:N_VALID_EXAMPLES].astype("float32") / 255

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train[:N_TRAIN_EXAMPLES], CLASSES)
    y_valid = keras.utils.to_categorical(y_valid[:N_VALID_EXAMPLES], CLASSES)

    # Generate our trial model.
    model = create_model(trial)

    # Fit the model on the training data.
    # The KerasPruningCallback checks for pruning condition every epoch.
    model.fit(
        x_train,
        y_train,
        batch_size=BATCHSIZE,
        callbacks=[KerasPruningCallback(trial, "val_acc")],
        epochs=EPOCHS,
        validation_data=(x_valid, y_valid),
        verbose=1,
    )

    # Evaluate the model accuracy on the validation set.
    score = model.evaluate(x_valid, y_valid, verbose=0)
    return score[1]
コード例 #3
0
    def objective(trial):

        discard_state_train, discard_state_test, discard_ans_vector_train, discard_ans_vector_test = train_test_split(
            discard_state_nml, discard_ans_vector_nml, test_size=0.25)
        one_hot_discard_state_train = one_hot(discard_state_train, num)
        one_hot_discard_state_test = one_hot(discard_state_test, num)

        # Generate our trial model.
        model = create_model(trial)

        # Fit the model on the training data.
        # The KerasPruningCallback checks for pruning condition every epoch.
        model.fit(one_hot_discard_state_train,
                  discard_ans_vector_train,
                  epochs=EPOCHS,
                  callbacks=[KerasPruningCallback(trial, 'val_acc')],
                  validation_data=(one_hot_discard_state_test,
                                   discard_ans_vector_test),
                  verbose=0)

        # Evaluate the model accuracy on the test set.
        score = model.evaluate(one_hot_discard_state_test,
                               discard_ans_vector_test,
                               verbose=0)
        return score[1]
コード例 #4
0
    def objective(trial):
        # Clear clutter form previous session graphs.
        keras.backend.clear_session()

        discard_state_train, discard_state_test, discard_ans_vector_train, discard_ans_vector_test = train_test_split(
            discard_state_nml, discard_ans_vector_nml, test_size=0.25)
        one_hot_discard_state_train = one_hot(
            discard_state_train, num).reshape(len(discard_state_train),
                                              input_shape[0], input_shape[1],
                                              1)
        one_hot_discard_state_test = one_hot(discard_state_test, num).reshape(
            len(discard_state_test), input_shape[0], input_shape[1], 1)
        # Generate our trial model.
        model = create_model(trial)

        # Fit the model on the training data.
        # The KerasPruningCallback checks for pruning condition every epoch.
        model.fit(one_hot_discard_state_train,
                  discard_ans_vector_train,
                  epochs=EPOCHS,
                  callbacks=[KerasPruningCallback(trial, 'val_acc')],
                  validation_data=(one_hot_discard_state_test,
                                   discard_ans_vector_test),
                  verbose=0)

        # Evaluate the model accuracy on the test set.
        score = model.evaluate(one_hot_discard_state_test,
                               discard_ans_vector_test,
                               verbose=0)
        return score[1]
コード例 #5
0
ファイル: test_keras.py プロジェクト: toshihikoyanase/optuna
    def objective(trial: optuna.trial.Trial) -> float:

        model = Sequential()
        model.add(Dense(1, activation="sigmoid", input_dim=20))
        model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
        model.fit(
            np.zeros((16, 20), np.float32),
            np.zeros((16,), np.int32),
            batch_size=1,
            epochs=epochs,
            callbacks=[KerasPruningCallback(trial, "accuracy", interval=interval)],
            verbose=0,
        )

        return 1.0
コード例 #6
0
    def objective(trial):
        # type: (optuna.trial.Trial) -> float

        model = Sequential()
        model.add(Dense(1, activation='sigmoid', input_dim=20))
        model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
        model.fit(
            np.zeros((16, 20), np.float32),
            np.zeros((16, ), np.int32),
            batch_size=1,
            epochs=1,
            callbacks=[KerasPruningCallback(trial, 'accuracy')],
            verbose=0)

        return 1.0
コード例 #7
0
def test_keras_pruning_callback_observation_isnan() -> None:

    study = optuna.create_study(pruner=DeterministicPruner(True))
    trial = study.ask()
    callback = KerasPruningCallback(trial, "loss")

    with pytest.raises(optuna.TrialPruned):
        callback.on_epoch_end(0, {"loss": 1.0})

    with pytest.raises(optuna.TrialPruned):
        callback.on_epoch_end(0, {"loss": float("nan")})
コード例 #8
0
ファイル: test_keras.py プロジェクト: vishnu-itachi/optuna
def test_keras_pruning_callback_observation_isnan():
    # type: () -> None

    study = optuna.create_study(pruner=DeterministicPruner(True))
    trial = create_running_trial(study, 1.0)
    callback = KerasPruningCallback(trial, "loss")

    with pytest.raises(optuna.exceptions.TrialPruned):
        callback.on_epoch_end(0, {"loss": 1.0})

    with pytest.raises(optuna.exceptions.TrialPruned):
        callback.on_epoch_end(0, {"loss": float("nan")})
コード例 #9
0
def test_keras_pruning_callback_observation_isnan():
    # type: () -> None

    study = optuna.create_study(pruner=DeterministicPruner(True))
    trial = study._run_trial(func=lambda _: 1.0, catch=(Exception, ))
    callback = KerasPruningCallback(trial, 'loss')

    with pytest.raises(optuna.structs.TrialPruned):
        callback.on_epoch_end(0, {'loss': 1.0})

    with pytest.raises(optuna.structs.TrialPruned):
        callback.on_epoch_end(0, {'loss': float('nan')})
コード例 #10
0
def test_keras_pruning_callback_observation_isnan():
    # type: () -> None

    # TODO(higumachan): remove this "if" section after Tensorflow supports Python 3.7.
    if not _available:
        pytest.skip(
            'This test requires keras '
            'but this version can not install keras(tensorflow) with pip.')

    study = optuna.create_study(pruner=DeterministicPruner(True))
    trial = study._run_trial(func=lambda _: 1.0, catch=(Exception, ))
    callback = KerasPruningCallback(trial, 'loss')

    with pytest.raises(optuna.structs.TrialPruned):
        callback.on_epoch_end(0, {'loss': 1.0})

    with pytest.raises(optuna.structs.TrialPruned):
        callback.on_epoch_end(0, {'loss': float('nan')})
コード例 #11
0
def objective(trial):

    # Open data file
    f_in = h5py.File(DT_FL_IN, "r")
    dt_in = f_in[DT_DST_IN]

    f_out = h5py.File(DT_FL_OUT, "r")
    dt_out = f_out[DT_DST_OUT]

    WD = 2
    # Dummy y_data
    x_data, _ = format_data(dt_in, wd=WD, get_y=True)
    _, y_data = format_data(dt_out, wd=WD, get_y=True)
    x_data = np.squeeze(x_data)

    # Split data and get slices
    idxs = split(x_data.shape[0],
                 N_TRAIN,
                 N_VALID,
                 test_last=dt_in.attrs["idx"])
    slc_trn, slc_vld, slc_tst = slicer(x_data.shape, idxs)

    # Get data
    x_train = x_data[slc_trn[0]]
    y_train = y_data[slc_trn[0]]
    x_val = x_data[slc_vld[0]]
    y_val = y_data[slc_vld[0]]

    conv_shape = y_train.shape[1:3]
    # Strides cfg
    strd = [2, 2, 5, 5]

    # Limits and options
    epochs = 60
    # Filters
    flt_lm = [[4, 128], [4, 128], [4, 128]]
    d_lm = [1, 50]
    # Kernel
    k_lm = [3, 5]
    # Regularizer
    l2_lm = [1e-7, 1e-3]
    # Activation functions
    act_opts = ["relu", "elu", "tanh", "linear"]
    # Latent space cfg
    lt_sz = [5, 150]
    lt_dv = [0.3, 0.7]
    # Learning rate
    lm_lr = [1e-5, 1e-1]

    # Clear tensorflow session
    tf.keras.backend.clear_session()
    # Input
    inputs = layers.Input(shape=x_train.shape[1:])
    d = inputs
    # Decoder
    n_layers = trial.suggest_int("n_layers", 1, 3)
    flt = trial.suggest_int("nl_flt", d_lm[0], d_lm[1])
    # Reduction from output
    red = np.prod(strd[:n_layers])
    # Decoder first shape
    lt_shp = (np.array(conv_shape) / red).astype(int)
    # Decoder dense size
    n_flat = np.prod(lt_shp) * flt
    # Format stride list
    strd = strd[::-1][-n_layers:]
    # Latent -> Decoder layer
    # Activation
    act_lt = trial.suggest_categorical("lt_activation", act_opts)
    # Regularization
    l2_lt = int(trial.suggest_loguniform("lt_l2", l2_lm[0], l2_lm[1]))
    l2_reg = regularizers.l2(l=l2_lt)
    # Flat input to the decoder
    d = layers.Dense(n_flat,
                     activation=act_lt,
                     kernel_regularizer=l2_reg,
                     name="l1_dense_decoder")(inputs)
    # Reshape to the output of the encoder
    d = layers.Reshape(list(lt_shp) + [flt])(d)
    # Generate the convolutional layers
    for i in range(n_layers):
        # Get number of filters
        flt = trial.suggest_int("n{}_flt".format(i), flt_lm[i][0],
                                flt_lm[i][1])
        # Get the kernel size
        k_sz = trial.suggest_categorical("d{}_kernel_size".format(i), k_lm)
        # Get the activation function
        act = trial.suggest_categorical("d{}_activation".format(i), act_opts)
        # Regularization value
        l2 = trial.suggest_loguniform("d{}_l2".format(i), l2_lm[0], l2_lm[1])
        l2_reg = regularizers.l2(l=l2)
        # Convolutional layer
        d = layers.Conv2DTranspose(
            flt,
            (k_sz, k_sz),
            strides=strd[i],
            activation=act,
            padding="same",
            kernel_regularizer=l2_reg,
            name="{}_decoder".format(i + 1),
        )(d)
        dp = 0
        # Dropout layers
        if dp > 0:
            d = layers.Dropout(dp, name="{}_dropout_decoder".format(i + 1))(d)

    decoded = layers.Conv2DTranspose(
        y_train.shape[3],
        (5, 5),
        activation="linear",
        padding="same",
        name="output_decoder",
    )(d)

    ae = Model(inputs, decoded, name="Decoder_nxt")

    # Earling stopping monitoring the loss of the validation dataset
    monitor = "val_loss_norm_error"
    patience = int(epochs * 0.3)
    es = EarlyStopping(monitor=monitor,
                       mode="min",
                       patience=patience,
                       restore_best_weights=True)

    opt = "adam"
    if opt == "adam":
        k_optf = optimizers.Adam
    elif opt == "nadam":
        k_optf = optimizers.Nadam
    elif opt == "adamax":
        k_optf = optimizers.Adamax

    lr = trial.suggest_loguniform("lr", lm_lr[0], lm_lr[1])
    if lr > 0:
        k_opt = k_optf(learning_rate=lr)
    else:
        k_opt = k_optf()

    ae.compile(optimizer=k_opt,
               loss=loss_norm_error,
               metrics=["mse", loss_norm_error])

    batch_size = int(trial.suggest_uniform("batch_sz", 2, 32))
    ae.summary()
    hist = ae.fit(
        x_train,
        y_train,
        epochs=epochs,
        batch_size=batch_size,
        shuffle=True,
        validation_data=(x_val, y_val),
        callbacks=[KerasPruningCallback(trial, "val_loss_norm_error"), es],
        verbose=1,
    )

    txt = PREFIX + SUFFIX
    ae.save(txt.format(RUN_VERSION, trial.number))
    return min(hist.history["val_loss_norm_error"])
コード例 #12
0
def objective(trial):

    # Open data file
    f = h5py.File(DT_FL, "r")
    dt = f[DT_DST]

    # Split data and get slices
    idxs = split(dt.shape[0], N_TRAIN, N_VALID)
    slc_trn, slc_vld, slc_tst = slicer(dt.shape, idxs)

    # Get data
    x_train = dt[slc_trn]
    x_val = dt[slc_vld]

    # Limits and options
    # Filters
    # flt_lm = [4, 128]
    flt_lm = [[4, 128], [4, 128], [4, 128]]
    # Kernel
    k_lm = [3, 5]
    # Regularizer
    l2_lm = [1e-7, 1e-3]
    # Activation functions
    act_opts = ["relu", "elu", "tanh", "linear"]
    # Latent space cfg
    lt_sz = [5, 150]
    lt_dv = [0.3, 0.7]
    # Learning rate
    lm_lr = [1e-5, 1e-2]

    # Clear tensorflow session
    tf.keras.backend.clear_session()
    # Input
    inputs = layers.Input(shape=x_train.shape[1:])
    e = inputs
    # Encoder
    flt, k_sz, act, l2 = [], [], [], []
    strd = [2, 2, 5]
    # n_layers = trial.suggest_int("n_layers", 2, 3)
    n_layers = 3
    for i in range(n_layers):
        # Get values
        flt += [
            trial.suggest_int("n{}_flts".format(i), flt_lm[i][0], flt_lm[i][1])
        ]
        k_sz += [trial.suggest_categorical("e{}_kernel_size".format(i), k_lm)]
        act += [
            trial.suggest_categorical("e{}_activation".format(i), act_opts)
        ]
        l2 += [
            trial.suggest_loguniform("e{}_l2".format(i), l2_lm[0], l2_lm[1])
        ]
        l2_reg = regularizers.l2(l=l2[-1])
        # l2_reg = regularizers.l2(l=0)
        # Set layer
        e = layers.Conv2D(
            flt[-1],
            (k_sz[-1], k_sz[-1]),
            strides=strd[i],
            activation=act[-1],
            padding="same",
            kernel_regularizer=l2_reg,
            name="{}_encoder".format(i + 1),
        )(e)
        # Add layers
        if i == 0:
            ed = layers.Conv2D(
                1,
                (1, 1),
                padding="same",
                kernel_regularizer=l2_reg,
                name="l2_input".format(i),
            )(e)
        # Dropout
        dp = 0
        if dp > 0:
            e = layers.Dropout(dp, name="{}_dropout_encoder".format(i + 1))(e)

    # Latent space
    act_lt = trial.suggest_categorical("lt_activation", act_opts)
    l2_lt = int(trial.suggest_loguniform("lt_l2", l2_lm[0], l2_lm[1]))
    l2_reg = regularizers.l2(l=l2_lt)
    sz_lt = trial.suggest_int("lt_sz", lt_sz[0], lt_sz[1])
    dv_lt = trial.suggest_uniform("lt_div", lt_dv[0], lt_dv[1])
    # Dense latent sizes
    latent_1 = int(sz_lt * dv_lt)
    latent_2 = sz_lt - latent_1

    lt1 = layers.Flatten()(e)
    lt1 = layers.Dense(latent_1,
                       activation=act_lt,
                       kernel_regularizer=l2_reg,
                       name="l1_latent")(lt1)

    lt2 = layers.Flatten()(ed)
    lt2 = layers.Dense(latent_2,
                       activation=act_lt,
                       kernel_regularizer=l2_reg,
                       name="l2_latent")(lt2)

    # Dencoder
    # Flat input to the decoder
    n_flat = np.prod(backend.int_shape(e)[1:])
    d = layers.Dense(n_flat,
                     activation=act_lt,
                     kernel_regularizer=l2_reg,
                     name="l1_dense_decoder")(lt1)
    # Consider uses only one filter with convolution
    # Reshape to the output of the encoder
    d = layers.Reshape(backend.int_shape(e)[1:])(d)
    # Generate the convolutional layers
    for i in range(n_layers):
        # Settings index
        j = -i - 1
        # Set the regularizer
        l2_reg = regularizers.l2(l=l2[j])
        # Add the latent space
        if i == n_layers - 1:
            d1 = layers.Dense(
                5000,
                activation="linear",
                kernel_regularizer=l2_reg,
                name="l2_dense_decoder",
            )(lt2)
            d1 = layers.Reshape(backend.int_shape(ed)[1:],
                                name="l2_reshape_decoder")(d1)
            d1 = layers.Conv2D(
                flt[j + 1],
                (1, 1),
                padding="same",
                name="l2_compat_decoder",
                kernel_regularizer=l2_reg,
            )(d1)
            d = layers.Add()([d1, d])
        # Convolutional layer
        d = layers.Conv2DTranspose(
            flt[j],
            (k_sz[j], k_sz[j]),
            strides=strd[j],
            activation=act[j],
            padding="same",
            kernel_regularizer=l2_reg,
            name="{}_decoder".format(i + 1),
        )(d)
        # Dropout layers
        if dp > 0:
            d = layers.Dropout(dp, name="{}_dropout_decoder".format(i + 1))(d)

    decoded = layers.Conv2DTranspose(
        x_train.shape[-1],
        (5, 5),
        activation="linear",
        padding="same",
        kernel_regularizer=l2_reg,
        name="output_decoder",
    )(d)

    ae = Model(inputs, decoded, name="auto_encoder_add")

    opt = "adam"
    if opt == "adam":
        k_optf = optimizers.Adam
    elif opt == "nadam":
        k_optf = optimizers.Nadam
    elif opt == "adamax":
        k_optf = optimizers.Adamax

    lr = trial.suggest_loguniform("lr", lm_lr[0], lm_lr[1])
    if lr > 0:
        k_opt = k_optf(learning_rate=lr)
    else:
        k_opt = k_optf()

    ae.compile(optimizer=k_opt,
               loss=loss_norm_error,
               metrics=["mse", loss_norm_error])

    batch_size = int(trial.suggest_uniform("batch_sz", 2, 32))
    ae.summary()
    hist = ae.fit(
        x_train,
        x_train,
        epochs=30,
        batch_size=batch_size,
        shuffle=True,
        validation_data=(x_val, x_val),
        callbacks=[KerasPruningCallback(trial, "val_loss_norm_error")],
        verbose=1,
    )

    txt = PREFIX + SUFFIX
    ae.save(txt.format(RUN_VERSION, trial.number))
    return hist.history["val_loss_norm_error"][-1]
コード例 #13
0
    def _do_set_callbacks_task(
            self, base_model, train_set, validation_set, trial):

        best_model_name = "best_model.h5"
        model_save_file = os.path.join(self.config.model_path, best_model_name)
        if self.config.multi_gpu:
            checkpoint = ncc.callbacks.MultiGPUCheckpointCallback(
                filepath=model_save_file,
                base_model=base_model,
                save_best_only=True,
            )
        else:
            checkpoint = keras.callbacks.ModelCheckpoint(
                filepath=model_save_file, save_best_only=True
            )
        if self.config.cosine_decay:
            ncc_scheduler = ncc.schedulers.Scheduler(
                self.config.cosine_lr_max,
                self.config.cosine_lr_min,
                self.config.epochs
            )
            scheduler = keras.callbacks.LearningRateScheduler(
                ncc_scheduler.cosine_decay)
        else:
            scheduler = keras.callbacks.ReduceLROnPlateau(
                factor=0.5, patience=10, verbose=1)

        plot_history = ncc.callbacks.PlotHistory(
            self.config.learning_path,
            ['loss', 'acc', 'iou_score', 'categorical_crossentropy']
        )
        callbacks = [checkpoint, scheduler, plot_history]
        if self.config.task == ncc.tasks.Task.SEMANTIC_SEGMENTATION:
            iou_history = ncc.callbacks.IouHistory(
                save_dir=self.config.learning_path,
                validation_files=validation_set,
                class_names=self.config.class_names,
                height=self.config.height,
                width=self.config.width,
                train_colors=self.config.train_colors
            )
            val_save_dir = os.path.join(self.config.image_path, "validation")
            generate_sample_result = ncc.callbacks.GenerateSampleResult(
                val_save_dir=val_save_dir,
                validation_files=validation_set,
                nb_classes=self.config.nb_classes,
                height=self.config.height,
                width=self.config.width,
                train_colors=self.config.train_colors,
                segmentation_val_step=self.config.segmentation_val_step
            )
            callbacks.extend([iou_history, generate_sample_result])

            if self.config.optuna:
                callbacks.append(KerasPruningCallback(trial, 'dice'))

        elif self.config.task == ncc.tasks.Task.CLASSIFICATION:
            if self.config.input_data_type == "video":
                batch_checkpoint = ncc.callbacks.BatchCheckpoint(
                    self.config.learning_path,
                    f'{self.config.model_path}/batch_model.h5',
                    token=self.config.slack_token,
                    channel=self.config.slack_channel,
                    period=self.config.batch_period
                )
                callbacks.append(batch_checkpoint)

            if self.config.optuna:
                callbacks.append(KerasPruningCallback(trial, 'val_acc'))

        if self.config.slack_channel and self.config.slack_token:
            if self.config.task == ncc.tasks.Task.SEMANTIC_SEGMENTATION:
                file_name = os.path.join(self.config.learning_path, "IoU.png")
            else:
                file_name = os.path.join(
                    self.config.learning_path, "acc.png"
                )

            slack_logging = ncc.callbacks.SlackLogger(
                logger_file=file_name,
                token=self.config.slack_token,
                channel=self.config.slack_channel,
                title=self.config.model_name,
            )
            callbacks.append(slack_logging)
        return callbacks
def objective(trial):

    # Open data file
    f = h5py.File(DT_FL, "r")
    dt = f[DT_DST]

    # Format data for LSTM training
    x_data, y_data = format_data(dt, wd=WD, get_y=True)

    x_data = np.squeeze(x_data)
    # Split data and get slices
    idxs = split(x_data.shape[0], N_TRAIN, N_VALID)
    slc_trn, slc_vld, slc_tst = slicer(x_data.shape, idxs)

    # Get data
    x_train = x_data[slc_trn[0]]
    y_train = y_data[slc_trn[0]] - x_train
    x_val = x_data[slc_vld[0]]
    y_val = y_data[slc_vld[0]] - x_val

    # Limits and options
    # Filters
    # n_lstm = [[4, 128], [4, 128], [4, 128]]
    n_lstm = [[4, 196], [4, 196], [4, 196]]
    # Regularizer
    l2_lm = [1e-7, 1e-3]
    # Activation functions
    act_opts = ["relu", "elu", "tanh", "linear"]
    # Latent space cfg
    lt_sz = [5, 150]
    lt_dv = [0.3, 0.7]
    # Learning rate
    lm_lr = [1e-5, 1]

    # Clear tensorflow session
    tf.keras.backend.clear_session()
    # Input
    inputs = layers.Input(shape=x_train.shape[1:])
    p = inputs
    # Dense layers
    # n_lyr_dense = trial.suggest_int("n_lyr_dense", 0, 2)
    n_lyr_dense = trial.suggest_int("n_lyr_dense", 1, 3)
    for i in range(n_lyr_dense):
        # For the current layer
        # Get number of filters
        l = trial.suggest_int("n{}_dense".format(i), n_lstm[i][0],
                              n_lstm[i][1])
        # Get the activation function
        act = trial.suggest_categorical("d{}_activation".format(i), act_opts)
        # Regularization value
        l2 = trial.suggest_loguniform("d{}_l2".format(i), l2_lm[0], l2_lm[1])
        l2_reg = regularizers.l2(l=l2)
        # Set layer
        p = layers.Dense(
            l,
            activation=act,
            # kernel_regularizer=l2_reg,
            name="{}_dense".format(i + 1),
        )(p)
        # Dropout
        dp = trial.suggest_uniform("d{}_dropout".format(i), 0, 1)
        p = layers.Dropout(dp, name="{}_dropout_dense".format(i + 1))(p)
        bn = trial.suggest_categorical("d{}_batchnorm".format(i), [0, 1])
        if bn == 1:
            p = layers.BatchNormalization(name="{}_bnorm_dense".format(i +
                                                                       1))(p)

    out = layers.Dense(y_data.shape[1], activation="linear")(p)

    pred = Model(inputs, out, name="auto_encoder_add")

    # opt_opts = ["adam", "nadam", "adamax", "RMSprop"]
    # opt = trial.suggest_categorical("optimizer", opt_opts)
    opt = "adam"
    if opt == "adam":
        k_optf = optimizers.Adam
    elif opt == "nadam":
        k_optf = optimizers.Nadam
    elif opt == "adamax":
        k_optf = optimizers.Adamax
    elif opt == "RMSprop":
        k_optf = optimizers.RMSprop

    lr = trial.suggest_loguniform("lr", lm_lr[0], lm_lr[1])
    if lr > 0:
        k_opt = k_optf(learning_rate=lr)
    else:
        k_opt = k_optf()

    pred.compile(optimizer=k_opt, loss="mse", metrics=["mse", loss_norm_error])

    batch_size = int(trial.suggest_uniform("batch_sz", 2, 32))
    pred.summary()
    hist = pred.fit(
        x_train,
        y_train,
        epochs=100,
        batch_size=batch_size,
        shuffle=True,
        validation_data=(x_val, y_val),
        callbacks=[KerasPruningCallback(trial, "val_mse")],
        verbose=1,
    )

    txt = PREFIX + SUFFIX
    pred.save(txt.format(RUN_VERSION, trial.number))
    return hist.history["val_mse"][-1]
コード例 #15
0
def objective(trial):

    # Open data file
    f = h5py.File(DT_FL, "r")
    dt = f[DT_DST]

    y_data = np.empty_like(dt)
    for idx in dt.attrs['idx']:
        y_data[idx[0]:idx[1]] = np.gradient(dt[idx[0]:idx[1]], 10, axis=0)

    # Split data file
    idxs = split(dt.shape[0], N_TRAIN, N_VALID, test_last=dt.attrs['idx'])
    slc_trn, slc_vld, slc_tst = slicer(dt.shape, idxs)

    # Slice data
    x_train = dt[slc_trn]
    y_train = y_data[slc_trn]
    x_val = dt[slc_vld]
    y_val = y_data[slc_vld]

    # Limits and options
    epochs = 500
    # Filters
    n_n = [[30, 150], [30, 150]]
    # Regularizer
    l2_lm = [1e-7, 1e-2]
    # Activation functions
    act_opts = ["relu", "elu", "tanh", "linear"]
    # Learning rate
    lm_lr = [1e-5, 1e-1]

    # Clear tensorflow session
    tf.keras.backend.clear_session()
    # Input
    inputs = layers.Input(shape=x_train.shape[1:])
    d = inputs
    # FCNN
    n_layers = trial.suggest_int("n_layers", 1, 3)
    for i in range(n_layers):
        # For the current layer
        # Get number of filters
        n = trial.suggest_int("l{}_n_neurons".format(i), n_n[i][0], n_n[i][1])
        # Get the activation function
        act = trial.suggest_categorical("l{}_activation".format(i), act_opts)
        # Regularization value
        l2 = trial.suggest_loguniform("l{}_l2".format(i), l2_lm[0], l2_lm[1])
        l2_reg = regularizers.l2(l=l2)
        # Set layer
        d = layers.Dense(
            n,
            activation=act,
            kernel_regularizer=l2_reg,
            name="l{}_fc".format(i),
        )(d)
    dd = layers.Dense(x_train.shape[1], activation='linear')(d)

    fcnn = Model(inputs, dd, name="FCNN")

    monitor = "val_loss_norm_error"
    patience = int(epochs * 0.1)
    es = EarlyStopping(monitor=monitor,
                       mode="min",
                       patience=patience,
                       restore_best_weights=True)

    opt = "adam"
    if opt == "adam":
        k_optf = optimizers.Adam
    elif opt == "nadam":
        k_optf = optimizers.Nadam
    elif opt == "adamax":
        k_optf = optimizers.Adamax

    lr = trial.suggest_loguniform("lr", lm_lr[0], lm_lr[1])
    if lr > 0:
        k_opt = k_optf(learning_rate=lr)
    else:
        k_opt = k_optf()

    fcnn.compile(optimizer=k_opt,
                 loss=loss_norm_error,
                 metrics=["mse", loss_norm_error])

    batch_size = int(trial.suggest_uniform("batch_sz", 2, 32))
    fcnn.summary()
    hist = fcnn.fit(
        x_train,
        y_train,
        epochs=epochs,
        batch_size=batch_size,
        shuffle=True,
        validation_data=(x_val, y_val),
        callbacks=[KerasPruningCallback(trial, "val_loss_norm_error"), es],
        verbose=1,
    )

    txt = PREFIX + SUFFIX
    fcnn.save(txt.format(RUN_VERSION, trial.number))
    return hist.history["val_loss_norm_error"][-1]