Esempio n. 1
0
def run(data, base_logdir, session_id, hparams):
    """Run a training/validation session.

  Flags must have been parsed for this function to behave.

  Args:
    data: The data as loaded by `prepare_data()`.
    base_logdir: The top-level logdir to which to write summary data.
    session_id: A unique string ID for this session.
    hparams: A dict mapping hyperparameters in `HPARAMS` to values.
  """
    model = model_fn(hparams=hparams, seed=session_id)
    logdir = os.path.join(base_logdir, session_id)

    callback = tf.keras.callbacks.TensorBoard(
        logdir,
        update_freq=flags.FLAGS.summary_freq,
        profile_batch=0,  # workaround for issue #2084
    )
    hparams_callback = hp.KerasCallback(logdir, hparams)
    ((x_train, y_train), (x_test, y_test)) = data
    result = model.fit(
        x=x_train,
        y=y_train,
        epochs=flags.FLAGS.num_epochs,
        shuffle=False,
        validation_data=(x_test, y_test),
        callbacks=[callback, hparams_callback],
    )
Esempio n. 2
0
def train_model(r_name, hparams, model):
    """Train the model by feeding it data."""
    hp_log_dir = log_dir + session_name + "/" + r_name
    history = model.fit(
        generate_arrays_from_file(tens_path + training_file,
                                  hparams[HP_BATCH_SIZE]),
        steps_per_epoch=train_rows // hparams[HP_BATCH_SIZE],
        epochs=n_epochs,
        validation_data=generate_arrays_from_file(tens_path + validation_file,
                                                  hparams[HP_BATCH_SIZE]),
        validation_steps=valid_rows // hparams[HP_BATCH_SIZE],
        callbacks=[
            tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=1),
            tf.keras.callbacks.TensorBoard(log_dir=hp_log_dir,
                                           histogram_freq=1),  # log_dir
            hp.KerasCallback(hp_log_dir + "/validation",
                             hparams)  # log hparams
        ])
    # To track the progression of training, gather a snapshot
    # of the model's metrics at each epoch.
    epochs = history.epoch
    hist = pd.DataFrame(history.history)
    # save model
    model.save(save_dir + session_name + "/" + r_name)
    test_results = model.evaluate(generate_arrays_from_file(
        tens_path + test_file, hparams[HP_BATCH_SIZE]),
                                  steps=test_rows // hparams[HP_BATCH_SIZE])
    return epochs, hist, test_results
def train_test_model(hparams):
    model = tf.keras.models.Sequential([
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(hparams[HP_NUM_UNITS], activation=tf.nn.relu),
        tf.keras.layers.Dropout(hparams[HP_DROPOUT]),
        tf.keras.layers.Dense(10, activation=tf.nn.softmax),
    ])
    model.compile(
        optimizer=hparams[HP_OPTIMIZER],
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'],
    )

    log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")

    model.fit(
        x_train,
        y_train,
        epochs=15,
        callbacks=[
            tf.keras.callbacks.TensorBoard(log_dir),  # log metrics
            hp.KerasCallback(log_dir, hparams),  # log hparams
        ])  # Run with 1 epoch to speed things up for demo purposes
    _, accuracy = model.evaluate(x_test, y_test)
    return accuracy
Esempio n. 4
0
def train_test_model(logdir, hparams):
    start = timer()
    classifier = tf.keras.Sequential()
    classifier.add(tf.keras.layers.Conv2D(filters=int(hparams[HP_CNN_FILTER_1]), kernel_size=int(hparams[HP_CNN_KERNEL_1]), activation='relu', input_shape=(x_train[0].shape[0], x_train[0].shape[1], 1)))
    classifier.add(tf.keras.layers.MaxPooling2D(pool_size=2))
    classifier.add(tf.keras.layers.Dropout(0.4))

    classifier.add(tf.keras.layers.Conv2D(filters=int(hparams[HP_CNN_FILTER_2]), kernel_size=int(hparams[HP_CNN_KERNEL_2]), activation='relu'))
    classifier.add(tf.keras.layers.MaxPooling2D(pool_size=2))
    classifier.add(tf.keras.layers.Dropout(0.4))

    classifier.add(tf.keras.layers.Conv2D(filters=int(hparams[HP_CNN_FILTER_3]), kernel_size=int(hparams[HP_CNN_KERNEL_3]), activation='relu'))
    classifier.add(tf.keras.layers.MaxPooling2D(pool_size=2))
    
    classifier.add(tf.keras.layers.GlobalAvgPool2D())
    classifier.add(tf.keras.layers.Dense(hparams[HP_NUM_UNITS], activation='relu'))
    classifier.add(tf.keras.layers.Dense(1, activation='sigmoid'))
    classifier.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=hparams[HP_LEARNING_RATE], decay=0.001), loss='binary_crossentropy', metrics=['accuracy'])

    cb = [
        tf.keras.callbacks.TensorBoard(log_dir=logdir),
        hp.KerasCallback(logdir, hparams)
    ]
    classifier.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=64, epochs=100, callbacks=cb, verbose=0)

    _, accuracy = classifier.evaluate(x_test, y_test)
    end = timer()
    print(timedelta(seconds=end-start))
    return accuracy
def train_test_model(hparams, training_generator, test_generator,
                     val_generator, model):
    clip_size = hparams[HP_clip_size]
    # steps = int((train_images.shape[1]/clip_size))$
    steps = int((train_images_horiz.shape[1] / clip_size))
    # steps = 5

    callbacks = [
        tf.keras.callbacks.TensorBoard(logdir),  # log metrics
        hp.KerasCallback(logdir, hparams),  # log hparams
        tf.keras.callbacks.EarlyStopping(monitor='loss', patience=17),
        # tf.keras.callbacks.LearningRateScheduler(scheduler)
        ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4)
    ]

    val_steps = int((val_images_horiz.shape[1] / clip_size))
    history = model.fit(training_generator,
                        verbose=2,
                        epochs=200,
                        callbacks=callbacks,
                        steps_per_epoch=steps,
                        validation_data=val_generator,
                        validation_steps=val_steps)

    test_steps = int((test_images_horiz.shape[1] / clip_size))
    _, accuracy = model.evaluate(test_generator, steps=test_steps)

    return accuracy, history
Esempio n. 6
0
def train_test_model(hparams, logdir):
    model = Sequential(
        [Dense(units=hparams[HP_HIDDEN], activation='relu'),
         Dense(units=1)])
    model.compile(loss='mean_squared_error',
                  optimizer=tf.keras.optimizers.Adam(
                      hparams[HP_LEARNING_RATE]),
                  metrics=['mean_squared_error'])
    model.fit(
        X_scaled_train,
        y_train,
        validation_data=(X_scaled_test, y_test),
        epochs=hparams[HP_EPOCHS],
        verbose=False,
        callbacks=[
            tf.keras.callbacks.TensorBoard(logdir),  # log metrics
            hp.KerasCallback(logdir, hparams),  # log hparams
            tf.keras.callbacks.EarlyStopping(
                monitor='val_loss',
                min_delta=0,
                patience=200,
                verbose=0,
                mode='auto',
            )
        ],
    )
    _, mse = model.evaluate(X_scaled_test, y_test)
    pred = model.predict(X_scaled_test)
    r2 = r2_score(y_test, pred)
    return mse, r2
Esempio n. 7
0
def run(train_dataset, val_dataset, base_logdir, session_id, hparams):
    """Run a training/validation session.
    Flags must have been parsed for this function to behave.
    Args:
      data: The data as loaded by `prepare_data()`.
      base_logdir: The top-level logdir to which to write summary data.
      session_id: A unique string ID for this session.
      hparams: A dict mapping hyperparameters in `HPARAMS` to values.
    """
    model = model_fn(hparams=hparams, seed=session_id)
    logdir = os.path.join(base_logdir, session_id)

    EPOCHS = 600

    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir,
                                                       profile_batch=0)
    callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                patience=40,
                                                restore_best_weights=True)

    hparams_callback = hp.KerasCallback(logdir, hparams)
    model.fit(train_dataset,
              epochs=EPOCHS,
              callbacks=[tensorboard_callback, callback, hparams_callback],
              validation_data=val_dataset)
Esempio n. 8
0
def run(data, base_path, session_id, hparams):
    """ Run a training/validation session
    Args:
        data: the train and validation data
        base_path: The top-level logdir to which to write summary data.
        session_id: A unique string ID for this session.
        hparams: A dict mapping hyperparameters in `HPARAMS` to values.
    """
    logdir = os.path.join(base_path, session_id)
    ((x_train, y_train), (x_val, y_val)) = data
    model = build_multilayer_lstm_hp(input_dim=x_train.shape[1:],
                                     hparams=hparams,
                                     seed=session_id,
                                     num_classes=flags.FLAGS.num_classes)
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=logdir,
        histogram_freq=2,
        update_freq=flags.FLAGS.summary_freq,
        profile_batch=0)
    hparams_callback = hp.KerasCallback(logdir, hparams)
    model.fit(
        x=x_train,
        epochs=flags.FLAGS.epochs,
        y=y_train,
        batch_size=flags.FLAGS.batch_size,
        shuffle=False,
        validation_data=(x_val, y_val),
        callbacks=[tensorboard_callback, hparams_callback],
    )
    model_file = "model_%d_stages_%ds_%s_%d_seq_%s_%s.h5" % (
        flags.FLAGS.num_classes, flags.FLAGS.hrv_win_len,
        flags.FLAGS.nn_type, flags.FLAGS.seq_len, flags.FLAGS.modality,
        convert_args_to_str(hparams))
    model_save_path = os.path.join(logdir, model_file)
    model.save(model_save_path, save_format='h5')
Esempio n. 9
0
def train_test_model(logdir, hparams):
    classifier = tf.keras.Sequential()
    classifier.add(tf.keras.layers.Conv2D(filters=hparams['filter_1'], kernel_size=hparams['kernel_1'], activation='relu', input_shape=(x_train[0].shape[0], x_train[0].shape[1], 1)))
    if hparams['batch_norm']:
        classifier.add(tf.keras.layers.BatchNormalization())
    classifier.add(tf.keras.layers.MaxPooling2D(pool_size=2))
    classifier.add(tf.keras.layers.Dropout(hparams['dropout']))
    classifier.add(tf.keras.layers.Conv2D(filters=hparams['filter_2'], kernel_size=hparams['kernel_2'], activation='relu'))
    classifier.add(tf.keras.layers.MaxPooling2D(pool_size=2))
    classifier.add(tf.keras.layers.Dropout(hparams['dropout']))
    classifier.add(tf.keras.layers.Flatten())
    classifier.add(tf.keras.layers.Dense(hparams['units'], activation='relu'))
    classifier.add(tf.keras.layers.Dense(1, activation='sigmoid'))
    
    
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    plot_model(classifier, logdir + '/model.png', show_shapes=True, show_layer_names=False)
    
    classifier.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=hparams['lr']), loss='binary_crossentropy', metrics=['accuracy'])

    cb = [
        tf.keras.callbacks.TensorBoard(log_dir=logdir),
        hp.KerasCallback(logdir, hparams)
    ]
    classifier.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=16, epochs=250, callbacks=cb, verbose=0)

    _, accuracy = classifier.evaluate(x_test, y_test)
    end = timer()
    return accuracy
Esempio n. 10
0
def train_model(run_dir,hparams):

    hp.hparams(hparams)
    [X_train, y_train] = create_data(data_unscaled=data, start_train=start_train, end_train=end_train, n_windows=hparams[HP_WINDOW], n_outputs=hparams[HP_OUTPUT])
    [X_test, y_test] = create_data(data_unscaled=data, start_train=end_train, end_train=end_test, n_windows=hparams[HP_WINDOW], n_outputs=hparams[HP_OUTPUT])

    tf.compat.v1.keras.backend.clear_session()
    model = Sequential()
    model.add(TimeDistributed(Masking(mask_value=0., input_shape=(hparams[HP_WINDOW], n_inputs+1)), input_shape=(n_company, hparams[HP_WINDOW], n_inputs+1)))
    model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh', return_sequences=True, input_shape=(hparams[HP_WINDOW], n_inputs+1), kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1), dropout=hparams[HP_DROPOUT] ,recurrent_dropout=hparams[HP_DROPOUT])))
    model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=False ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    #model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=True ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    #model.add(TimeDistributed(LSTM(hparams[HP_NUM_UNITS], stateful=False, activation='tanh' ,return_sequences=False ,kernel_initializer='TruncatedNormal' ,bias_initializer=initializers.Constant(value=0.1) ,dropout=hparams[HP_DROPOUT], recurrent_dropout=hparams[HP_DROPOUT])))
    model.add(Dense(units=20, activation='softmax'))
    model.compile(optimizer=hparams[HP_OPTIMIZER], loss='categorical_crossentropy', metrics=['accuracy']) #get_weighted_loss(weights=weights)) # metrics=['mae'])
    model.build(input_shape=(None, n_company, hparams[HP_WINDOW], n_inputs+1))
    model.summary()
    #model.fit(X_train, to_categorical(y_train[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW], 0],20), epochs=100, batch_size=batch_size, validation_data=(X_test, to_categorical(y_test[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW], 0],20)))
    model.fit(X_train, to_categorical(y_train[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW],0],20), epochs=1000, batch_size=batch_size, validation_data=(X_test, to_categorical(y_test[:, :, hparams[HP_WINDOW]-1:hparams[HP_WINDOW],0],20)), callbacks=[
        TensorBoard(log_dir=run_dir, histogram_freq=50, write_graph=True, write_grads=True, update_freq='epoch'),
        hp.KerasCallback(writer=run_dir, hparams=hparams)])
    model.save('model_' + str(hparams[HP_NUM_UNITS]) + '_' + str(hparams[HP_DROPOUT]) + '_' + str(hparams[HP_OPTIMIZER]) + '_' + str(hparams[HP_WINDOW]) + '_' + str(hparams[HP_OUTPUT]) + '.h5')

    pd.DataFrame(np.argmax(model.predict(X_test),axis=2)).to_csv('pred.csv')
    pd.DataFrame(np.reshape(np.transpose(model.predict(X_test),axes=(1,0,2)), (X_test.shape[0]*X_test.shape[1],20))).to_csv('pred_weights.csv')

    return 0
Esempio n. 11
0
def train_test_model(log_dir, hparams: dict):
    dataset = DataSet(fraction=1.0)
    optimiser = getattr(tf.keras.optimizers, hparams['optimizer'])
    schedule = scheduler(hparams, dataset)

    model = SequentialCNN(input_shape=dataset.input_shape(),
                          output_shape=dataset.output_shape())

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=optimiser(learning_rate=hparams['learning_rate']),
                  metrics=['accuracy'])

    history = model.fit(
        dataset.data['train_X'],
        dataset.data['train_Y'],
        batch_size=hparams["batch_size"],
        epochs=250,
        verbose=False,
        validation_data=(dataset.data["valid_X"], dataset.data["valid_Y"]),
        callbacks=[
            EarlyStopping(monitor='val_loss',
                          mode='min',
                          verbose=1,
                          patience=hparams['patience']),
            schedule,
            tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                           histogram_freq=1),  # log metrics
            hp.KerasCallback(log_dir, hparams)  # log hparams
        ])
    print({key: value[-1] for key, value in history.history.items()})
Esempio n. 12
0
def train_and_evaluate(model, hparams):
    tensorboard = TensorBoard(log_dir = log_dir, histogram_freq=10, write_graph=True, write_images=False)
    hp_tuning = hp.KerasCallback(log_dir,hparams)
    cbs = [tensorboard,hp_tuning]

    history = model.fit(gen, validation_split=0.15, epochs=p.epochs, batch_size=p.batch_size, callbacks=cbs, verbose=1, shuffle = True)
    mse, rmse = model.evaluate(test_gen)
    return rmse, history
Esempio n. 13
0
 def test_duplicate_hparam_names_from_two_objects(self):
   hparams = {
       hp.HParam("foo"): 1,
       hp.HParam("foo"): 1,
   }
   with six.assertRaisesRegex(
       self, ValueError, "multiple values specified for hparam 'foo'"):
     hp.KerasCallback(self.get_temp_dir(), hparams)
Esempio n. 14
0
def get_callbacks(name, hparams, log_path):
    return [
        EarlyStopping(monitor="val_loss",
                      patience=_patience,
                      restore_best_weights=True),
        TensorBoard(log_dir=log_path, profile_batch=2, histogram_freq=1),
        hp.KerasCallback(log_path, hparams, name)
    ]
Esempio n. 15
0
def configure_callbacks(hparams,
                        log_dir,
                        weight_dir,
                        no_early_stopping=False,
                        lr_reducer=True,
                        histogram_freq=1,
                        early_stopping_patience=25):
    callbacks = []
    # Initialize model checkpoint
    model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
        weight_dir +
        "/weights.{epoch:02d}-{val_loss:.2f}-{val_precision:.2f}-{val_recall:.2f}.hdf5",
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        mode='auto')

    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=log_dir,
        histogram_freq=histogram_freq,
        write_grads=True,
        write_images=True)

    # Learning rate scheduler
    #def scheduler(epoch, lr):
    #    if epoch < 10:
    #        return lr
    #    else:
    #        return lr * tf.math.exp(-0.1)
    #
    # learning_rate_scheduler_callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
    # Hyperparameter search callback
    hparams_callback = hp.KerasCallback(log_dir, hparams)

    callbacks = [model_checkpoint, tensorboard_callback, hparams_callback]

    if not no_early_stopping:
        print('early stopping')
        early_stopping_callback = tf.keras.callbacks.EarlyStopping(
            monitor='val_loss',
            min_delta=0,
            patience=early_stopping_patience,
            verbose=0,
            mode='auto',
            baseline=None,
            restore_best_weights=True)

        callbacks.append(early_stopping_callback)

    if lr_reducer:
        print('Using lr reducer')
        # Control the learning rate on plateau
        reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
            monitor='val_loss', factor=0.1, patience=5, min_lr=0.001)

        callbacks.append(reduce_lr_callback)

    return callbacks
Esempio n. 16
0
def train_test_model(hparams, data_dir, log_dir, run_name, epochs):
    print("train_test_model started")

    config = SunRGBDConfig()
    dataset_train = SunRGBDDataset()
    dataset_train.load_sun_rgbd(data_dir, "train13")
    dataset_train.prepare()
    dataset_val = SunRGBDDataset()
    dataset_val.load_sun_rgbd(data_dir, "split/val13")
    dataset_val.prepare()

    # Create model in training mode
    if hparams['HP_BACKBONE'] == "resnet50_batch_size1":
        backbone = "resnet50"
        batch_size = 1
    elif hparams['HP_BACKBONE'] == "resnet50_batch_size2":
        backbone = "resnet50"
        batch_size = 2
    else:
        backbone = "resnet101"
        batch_size = 1
    config.BACKBONE = backbone
    config.BATCH_SIZE = batch_size
    config.IMAGES_PER_GPU = batch_size
    config.OPTIMIZER = hparams['HP_OPTIMIZER']
    if config.OPTIMIZER == "ADAM":
        config.LEARNING_RATE = config.LEARNING_RATE / 10
    # config.VALIDATION_STEPS = 1000  # bigger val steps
    # config.STEPS_PER_EPOCH = 10

    model = modellib.MaskRCNN(mode="training", config=config,
                              model_dir=log_dir, unique_log_name=False)
    augmentation = iaa.Sequential([
        iaa.Fliplr(0.5)
    ])

    custom_callbacks = [tbhp.KerasCallback(log_dir, hparams)]

    print("start training")
    model.train(dataset_train, dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=epochs,
                layers='all',
                augmentation=augmentation,
                custom_callbacks=custom_callbacks)

    print("save model")
    model_path = log_dir + run_name + ".h5"
    model.keras_model.save_weights(model_path)

    # inference calculation
    print("calculate inference")
    config.BATCH_SIZE = 1
    config.IMAGES_PER_GPU = 1
    m_ap, f1s = inference_calculation(config=config, model_path=model_path, model_dir=log_dir, dataset_val=dataset_val)

    print("train_test_model finished")
    return m_ap, f1s
Esempio n. 17
0
def train(model: tf.keras.Model, data_csv: str, data_root: str, hparams: dict,
          hp_space: dict, run_name: str):
    train_split = hparams[hp_space['train_split']]
    num_imgs = hparams[hp_space['num_imgs']]
    num_epochs = hparams[hp_space['num_epochs']]
    batch_size = hparams[hp_space['batch_size']]

    # Load csv data
    examples_df = pd.read_csv(data_csv, header=0, skipinitialspace=True)

    label_dict = {'Fish': 0, 'Flower': 1, 'Gravel': 2, 'Sugar': 3}
    filenames = np.array(
        [data_root + fname for fname in examples_df['Image'].tolist()],
        dtype=object)
    labels = np.array(
        [label_dict[fname] for fname in examples_df['Label'].tolist()],
        dtype=int)

    if num_imgs == -1: num_imgs = len(filenames)

    train_idxs, val_idxs = split_train_val(examples_df, train_split, num_imgs)
    train_filenames = filenames[train_idxs]
    train_labels = labels[train_idxs]
    val_filenames = filenames[val_idxs]
    val_labels = labels[val_idxs]

    train_dataset, num_train = create_dataset(files=train_filenames,
                                              labels=train_labels,
                                              batch_size=batch_size,
                                              training=True)

    val_dataset, num_val = create_dataset(files=val_filenames,
                                          labels=val_labels,
                                          batch_size=batch_size,
                                          training=False)

    print("Num train: {}, num val: {}".format(num_train, num_val))

    log_dir = './logs/{}'.format(run_name)

    train_history = model.fit(
        train_dataset,
        epochs=num_epochs,
        steps_per_epoch=num_train // batch_size,
        validation_steps=num_val // batch_size,
        validation_data=val_dataset,
        callbacks=[
            tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.5,
                                                 patience=5),
            tf.keras.callbacks.TensorBoard(log_dir=log_dir),
            tf.keras.callbacks.ModelCheckpoint(
                filepath="experiments/{}/".format(run_name) +
                "best_model.hdf5",
                save_best_only=True,
                save_weights_only=False),
            hp.KerasCallback(log_dir, hparams)
        ])
def train_test_model(hparams, data_dir, log_dir, run_name, epochs):
    print("train_test_model started")

    config = SunRGBDFusenetConfig()
    dataset_train = SunRGBDFusenetDataset()
    dataset_train.load_sun_rgbd_fusenet(data_dir, "train13")
    dataset_train.prepare()
    dataset_val = SunRGBDFusenetDataset()
    dataset_val.load_sun_rgbd_fusenet(data_dir, "split/val13")
    dataset_val.prepare()

    # Create model in training mode
    if hparams['HP_NUM_FILTERS'] == NUM_FILTERS_L:
        config.NUM_FILTERS = [128, 128, 256, 512, 1024]
    elif hparams['HP_NUM_FILTERS'] == NUM_FILTERS_M:
        config.NUM_FILTERS = [64, 64, 128, 256, 512]
    else:
        config.NUM_FILTERS = [32, 32, 64, 128, 256]
    config.BATCH_SIZE = 1
    config.IMAGES_PER_GPU = 1
    config.OPTIMIZER = hparams['HP_OPTIMIZER']
    if config.OPTIMIZER == "ADAM":
        config.LEARNING_RATE = config.LEARNING_RATE / 10

    model = modellib.MaskRCNN(mode="training",
                              config=config,
                              model_dir=log_dir,
                              unique_log_name=False)
    augmentation = iaa.Sequential([iaa.Fliplr(0.5)])

    custom_callbacks = [tbhp.KerasCallback(log_dir, hparams)]

    print("start training")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=epochs,
                layers='all',
                augmentation=augmentation,
                custom_callbacks=custom_callbacks)

    print("save model")
    model_path = log_dir + run_name + ".h5"
    model.keras_model.save_weights(model_path)

    # inference calculation
    print("calculate inference")
    config.BATCH_SIZE = 1
    config.IMAGES_PER_GPU = 1
    m_ap, f1s = inference_calculation(config=config,
                                      model_path=model_path,
                                      model_dir=log_dir,
                                      dataset_val=dataset_val)

    print("train_test_model finished")
    return m_ap, f1s
Esempio n. 19
0
def train_model(model, train, validate, initial_epoch):

    # Metrics / loss / optimizer
    if FLAGS.mode == 'regression':
        metrics = ['mean_absolute_error', 'mean_squared_error']
        loss = 'mean_squared_error'
    else:
        metrics = [
            tf.keras.metrics.SparseCategoricalAccuracy(),
            tf.keras.metrics.SparseTopKCategoricalAccuracy(k=2),
        ]
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

    optimizer = tf.keras.optimizers.Adam(
            learning_rate=hparams[HP_LR],
            epsilon=0.1
    )
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    if FLAGS.speedrun:
        steps_per_epoch = 110
        validation_steps = 15

        # Disabled output printing, slows things down
        model_callbacks = []
        # model_callbacks = [ tf.keras.callbacks.LambdaCallback(
        #         on_epoch_end=lambda x, y: quick_eval(model, train, FLAGS.mode)
        # )]

    else:
        validation_steps=FLAGS.validation_size // hparams[HP_BATCH_SIZE]
        model_callbacks = callbacks + [hp.KerasCallback(hparam_dir, hparams)]
        steps_per_epoch=FLAGS.steps_per_epoch

    if FLAGS.weighted:
        weights = {i: float(x) for i, x in enumerate(FLAGS.weighted)}
        logging.debug("Using class weights", weights)
    else:
        weights = None



    fit_args = {
        'x': train,
        'epochs': FLAGS.epochs,
        'steps_per_epoch': steps_per_epoch,
        'validation_data': validate,
        'validation_steps': validation_steps,
        'class_weight': weights,
        'callbacks': model_callbacks,
        'initial_epoch': initial_epoch
    }
    pretty_args = json.dumps({k: str(v) for k, v in fit_args.items()}, indent=2)
    logging.info("Fitting model with args: \n%s", pretty_args)
    history = model.fit(**fit_args)
Esempio n. 20
0
def train_test_model(model_name,
                     hparams,
                     logdir,
                     X_train=X_train,
                     y_train=y_train,
                     with_weigths=True):
    model = Sequential()
    model.add(Dense(hparams[HP_NUM_UNITS_L1], activation='relu'))
    model.add(Dropout(hparams[HP_DROPOUT]))
    model.add(Dense(hparams[HP_NUM_UNITS_L2], activation='relu'))
    model.add(Dropout(hparams[HP_DROPOUT]))
    model.add(Dense(hparams[HP_NUM_UNITS_L3], activation='relu'))
    model.add(Dropout(hparams[HP_DROPOUT]))
    model.add(Dense(units=1, activation='sigmoid'))

    model.compile(loss=hparams[HP_LOSS],
                  optimizer=hparams[HP_OPTIMIZER],
                  metrics=METRICS)

    class_weight = {0: 1, 1: 1}
    if with_weigths:
        neg, pos = np.bincount(y_train)
        total = neg + pos
        print('Examples: Total: {} Positive: {} ({:.2f}% of total)'.format(
            total, pos, 100 * pos / total))
        weight_for_0 = (1 / neg) * (total) / 2.0
        weight_for_1 = (1 / pos) * (total) / 2.0
        class_weight = {0: weight_for_0, 1: weight_for_1}
        print('Weight for class 0: {:.2f}'.format(weight_for_0))
        print('Weight for class 1: {:.2f}'.format(weight_for_1))

    model.fit(
        x=X_train,
        y=y_train,
        epochs=500,
        class_weight=class_weight,
        batch_size=hparams[HP_BATCH_SIZE],
        validation_data=(X_test, y_test),
        verbose=0,
        callbacks=[
            EarlyStopping(monitor='loss', mode='min', verbose=1, patience=10),
            tf.keras.callbacks.TensorBoard(
                log_dir=logdir,
                histogram_freq=1,
                write_graph=False,
                write_images=False,
                update_freq='epoch',
                profile_batch=100000000),  # log metrics
            hp.KerasCallback(logdir, hparams)
        ])

    results = model.evaluate(X_test, y_test)
    print(results)
    return results
Esempio n. 21
0
def train_test_model(logdir, hparams):
    filter_kernel_2 = json.loads(hparams['filter_kernel_2'])
    units = json.loads(hparams['units'])
    kernel_1 = json.loads(hparams['kernel_1'])

    # C3 Convolution.
    fft_input = keras.Input(shape=(x_train[0].shape[0], x_train[0].shape[1],
                                   1))
    model = layers.Conv2D(filters=int(hparams['filter_1']),
                          kernel_size=(int(kernel_1[0]), int(kernel_1[1])),
                          activation='relu')(fft_input)
    model = layers.MaxPooling2D(pool_size=2)(model)
    model = layers.Dropout(hparams['dropout'])(model)
    if int(filter_kernel_2[0]) > 0:
        model = layers.Conv2D(filters=int(filter_kernel_2[0]),
                              kernel_size=(1, int(filter_kernel_2[1])),
                              activation='relu')(model)
        model = layers.MaxPooling2D(pool_size=2)(model)
        model = layers.Dropout(hparams['dropout'])(model)
    model = layers.Flatten()(model)

    # Dense concatenation.
    for unit in units:
        model = layers.Dense(unit, activation='relu')(model)
        model = layers.Dropout(hparams['dropout'])(model)
    model = layers.Dense(1, activation='sigmoid')(model)

    model = tf.keras.models.Model(inputs=fft_input, outputs=model)
    model.summary()
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    plot_model(model,
               logdir + '/model.png',
               show_shapes=True,
               show_layer_names=False)
    model.compile(optimizer=optimizers.Adam(learning_rate=hparams['lr'],
                                            decay=0.001),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    cb = [
        callbacks.TensorBoard(log_dir=logdir),
        hp.KerasCallback(logdir, hparams)
    ]

    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=16,
                        epochs=2000,
                        callbacks=cb,
                        verbose=0)
    return model, history
Esempio n. 22
0
    def hparamCallback(self):
        hparams = {
            'learningRate': self.config.learningRate,
            'hiddenSize': self.config.hiddenSize,
            'numberOfLayers': self.config.numLayers,
            'batchSize': self.config.batchSize,
            'numberParams': self.model.count_params()
        }

        boardPath = os.path.join(os.path.join(self.config.saveDir, 'logs'),
                                 self.config.name)
        return hp.KerasCallback(boardPath, hparams)
Esempio n. 23
0
 def _initialize_model(self, writer):
   HP_DENSE_NEURONS = hp.HParam("dense_neurons", hp.IntInterval(4, 16))
   self.hparams = {
       "optimizer": "adam",
       HP_DENSE_NEURONS: 8,
   }
   self.model = tf.keras.models.Sequential([
       tf.keras.layers.Dense(self.hparams[HP_DENSE_NEURONS], input_shape=(1,)),
       tf.keras.layers.Dense(1, activation="sigmoid"),
   ])
   self.model.compile(loss="mse", optimizer=self.hparams["optimizer"])
   self.callback = hp.KerasCallback(writer, self.hparams, group_name="psl27")
Esempio n. 24
0
 def _configure_tensorboard_dir(self, callbacks, trial):
     for callback in callbacks:
         if callback.__class__.__name__ == 'TensorBoard':
             # Patch TensorBoard log_dir and add HParams KerasCallback
             logdir = _get_tensorboard_dir(callback.log_dir, trial.trial_id)
             callback.log_dir = logdir
             hparams = tuner_utils.convert_hyperparams_to_hparams(
                 trial.hyperparameters)
             callbacks.append(
                 hparams_api.KerasCallback(
                     writer=logdir,
                     hparams=hparams,
                     trial_id=trial.trial_id))
Esempio n. 25
0
def create_hparams_callback(log_dir, opt_metric, hparams, args):
    """
    Set up Hprams plugin config and callback for Tensorboard
    """
    hparams_dir = os.path.join(log_dir, 'validation')
    opt_metric = 'epoch_' + opt_metric

    # Hparams callback to log the hyperparameter values
    with tf.summary.create_file_writer(hparams_dir).as_default():
        hp.hparams_config(hparams=[hp.HParam(hparam) for hparam in hparams],
                          metrics=[hp.Metric(opt_metric)])
    hparams_cb = hp.KerasCallback(
        writer=hparams_dir,
        hparams={hparam: args[hparam]
                 for hparam in hparams})
    return hparams_cb
Esempio n. 26
0
    def train(self, logdir, hparams):
        self._model = self.get_model(hparams)
        print(self._model.summary())

        self._model.fit(
            x=self._datasource.train.x,
            y=self._datasource.train.y,
            batch_size=self._options['batch'],
            epochs=self._options['epoch'],
            validation_data=(self._datasource.validation.x, self._datasource.validation.y),
            shuffle=True,
            callbacks=[
                ModelCheckpoint('{}/model_weights.h5'.format(self.folder), save_weights_only=True),
                YOLOMetrics.HistoryCheckpointCallback(folder=self.folder),
                YOLOMetrics.TensorboardCallback(logdir),
                hp.KerasCallback(logdir, hparams)])
Esempio n. 27
0
def model_run(hparams, log_seq):
    logdir= r'logs\t_{}'.format(log_seq)
    
    model = Sequential([Input((4,)),
                        Dense(hparams['num_units'], name='layer1_dense1'),
                        BatchNormalization(name='layer2_batch1'),
                        Dropout(hparams['dropout'], name='layer3_drop1'),
                        Dense(5, name='layer4_dense2'),
                        Dense(3, activation='softmax', name='layer5_sof1')])
    
    model.compile(hparams['optimizer'], loss='sparse_categorical_crossentropy', metrics=['acc'])
    
    model.fit(x, y, epochs=500, validation_split=0.2, shuffle=False,
              callbacks=[tf.keras.callbacks.TensorBoard(logdir),
                         hp.KerasCallback(logdir, hparams, trial_id=str(log_seq)),
                         lrs])
Esempio n. 28
0
def model_fit(
        model: EmbeddingModel,
        training_set: tf.data.Dataset,
        validation_set: tf.data.Dataset,
        export_path: str,
        log_dir: str,
        hparams: Dict[hp.HParam, Any],
        epochs: int = 20,
        verbose: int = 1,
        worker: int = 4,
) -> None:

    # tensorboard logging for standard metrics
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=log_dir,
        profile_batch=(300, 320)
    )

    # tensorboard logging for hyperparameters
    keras_callback = hp.KerasCallback(
        writer=log_dir,
        hparams=hparams,
        trial_id=log_dir
    )

    metrics = [
        tf.keras.metrics.AUC(name='auc'),
    ]
    model.compile(
        loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
        optimizer='adam',
        metrics=metrics)

    _ = model.fit(
        training_set,
        validation_data=validation_set,
        epochs=epochs,
        verbose=verbose,
        callbacks=[tensorboard_callback, keras_callback],
        workers=worker)

    model.summary()
    tf.saved_model.save(
        obj=model,
        export_dir=os.path.join(export_path, '1')
    )
Esempio n. 29
0
def train_test_model(logdir, hparams):
    classifier = tf.keras.Sequential()
    classifier.add(
        tf.keras.layers.Conv2D(filters=hparams[HP_CNN_FILTER_1],
                               kernel_size=(hparams[HP_CNN_KERNEL_X_1],
                                            hparams[HP_CNN_KERNEL_Y_1]),
                               padding='same',
                               activation='relu',
                               input_shape=(x_train[0].shape[0],
                                            x_train[0].shape[1], 1)))
    if hparams[HP_BATCH_NORM]:
        classifier.add(tf.keras.layers.BatchNormalization())
    classifier.add(tf.keras.layers.MaxPooling2D(pool_size=2))
    classifier.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT]))
    classifier.add(
        tf.keras.layers.Conv2D(filters=hparams[HP_CNN_FILTER_2],
                               kernel_size=(hparams[HP_CNN_KERNEL_X_2],
                                            hparams[HP_CNN_KERNEL_Y_2]),
                               padding='same',
                               activation='relu'))
    classifier.add(tf.keras.layers.MaxPooling2D(pool_size=2))
    classifier.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT]))
    classifier.add(tf.keras.layers.Flatten())
    classifier.add(
        tf.keras.layers.Dense(hparams[HP_NUM_UNITS], activation='relu'))
    classifier.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT]))
    classifier.add(tf.keras.layers.Dense(1, activation='sigmoid'))
    classifier.compile(optimizer=tf.keras.optimizers.Adam(
        learning_rate=hparams[HP_LEARNING_RATE], decay=0.001),
                       loss='binary_crossentropy',
                       metrics=['accuracy'])

    cb = [
        tf.keras.callbacks.TensorBoard(log_dir=logdir),
        hp.KerasCallback(logdir, hparams)
    ]
    classifier.fit(x_train,
                   y_train,
                   validation_data=(x_test, y_test),
                   batch_size=64,
                   epochs=100,
                   callbacks=cb,
                   verbose=0)

    _, accuracy = classifier.evaluate(x_test, y_test)
    return accuracy
Esempio n. 30
0
 def _train_test(model):  # pragma: no cover
     extra_callback = [hp.KerasCallback(logdir, options_dict)]
     training((train_data, val_data), options, model, logdir,
              extra_callback)
     K.clear_session()
     predictions = dgpred.predict_complete(step_size,
                                           options,
                                           logdir,
                                           val_data,
                                           use_mss=True)
     K.clear_session()
     is_not_na = np.logical_not(np.isnan(predictions[:, 0]))
     predictions_class = predictions[is_not_na].argmax(axis=1)
     dgpred.filter_segments(predictions_class, options.min_mss_len)
     _, metrics = dgpred.calculate_metrics(
         predictions_class, val_data.truelbl[:, is_not_na].argmax(axis=0))
     return metrics