def train(model, epochs=EPOCHS, dataset=VesicleDataset):
    """Train the model."""

    # Training dataset.
    dataset_train = dataset()
    dataset_train.load_vesicle(args.dataset, "train")
    dataset_train.prepare()

    # Validation dataset
    dataset_val = dataset()
    dataset_val.load_vesicle(args.dataset, "val")
    dataset_val.prepare()

    augmentation = iaa.SomeOf((0, None), [
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Affine(rotate=(-180, 180)),
        iaa.Affine(scale=(0.9, 1.11)),
    ])

    inference_model = modellib.MaskRCNN(mode="inference",
                                        config=config,
                                        model_dir=args.logs)
    params = args.__dict__
    params.update(config.get_dict())
    callbacks = [
        keras.callbacks.TensorBoard(log_dir=model.log_dir,
                                    histogram_freq=0,
                                    write_graph=True,
                                    write_images=False),
        keras.callbacks.ModelCheckpoint(model.checkpoint_path,
                                        verbose=0,
                                        save_weights_only=True),
    ]
    neptune_callback = NeptuneCallback(args.neptune_project, params, model,
                                       inference_model, dataset_val)
    if args.neptune:
        callbacks += [neptune_callback, NeptuneMonitor()]

    if args.tensorboard:
        callbacks.append(
            TensorBoardCallback(model.log_dir, model, inference_model,
                                dataset_val))

    print(f"Training {args.layers} network layers")
    try:
        model.train(dataset_train,
                    dataset_val,
                    learning_rate=config.LEARNING_RATE,
                    epochs=epochs,
                    augmentation=augmentation,
                    custom_callbacks=callbacks,
                    layers=args.layers,
                    override_callbacks=True)
    except KeyboardInterrupt:
        if args.neptune:
            neptune_callback.save_best_model()
示例#2
0
 def train(self, epochs=5):
     X_np = self.X.values
     Y_np = np_utils.to_categorical(self.Y, num_classes=6)
     Y_np_sums = Y_np.sum(axis=0)
     print(Y_np_sums)
     self.model.fit(x=X_np,
                    y=Y_np,
                    epochs=epochs,
                    callbacks=[NeptuneMonitor()],
                    validation_split=0.1)
     print('Training done')
     yp = self.model.predict(x=X_np)
     print(yp[0])
        **train_settings
    }
}),
                                **neptune_kwargs)

#Setup Checkpoint to save best model iteration
checkpoint = ModelCheckpoint("Model Checkpoint Name",
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='auto',
                             period=1)

#Start FineTuning The Model
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  min_delta=0,
                                                  patience=5,
                                                  verbose=0,
                                                  mode='auto')
hist = model.fit(
    x=imageLoader_modified(batch_size),
    steps_per_epoch=hdf5_file.root.exttest_img.shape[0] * 0.4 // batch_size,
    epochs=50,
    validation_data=imageLoader(hdf5_file.root.val_img,
                                hdf5_file.root.val_labels, batch_size, 1),
    validation_steps=hdf5_file.root.val_img.shape[0] // batch_size,
    callbacks=[early_stopping, csv_logger,
               NeptuneMonitor(exp), checkpoint])

model.save('Final Model Name')
#model.compile(optimizer=optimizer, loss=cust_loss(custLossThresh), metrics = ['accuracy',mae,dice_coef], run_eagerly=True)
model.compile(optimizer=optimizer, loss=cust_loss(custLossThresh), metrics = ['mae',cust_accuracy(custLossThresh),cust_mae(custLossThresh),dice_coef], run_eagerly=True)
#
# set up callback functions
#
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(output_path, checkpoint_filename), monitor='loss', verbose=1, save_best_only=True, mode='min')

#
# Adam optimizer adaptively computes updates to the learning rate
# so scheduler is taken out for this optimizer
#
LRS = tf.keras.callbacks.LearningRateScheduler(scheduler)

#
# run the model 
#
neptune_tb.integrate_with_tensorflow()
model.fit(train_dataset, epochs=epochs, validation_data=val_dataset, callbacks=[tensorboard, checkpoint, NeptuneMonitor()])

#
# Send signal to neptune that the run is done
#
neptune.stop()

#
# save the model from the last epoch
#
t = time.strftime("%Y_%m_%d_%H_%M", time.localtime())
model.save(output_model.format(t))
                                                verbose=1,
                                                save_best_only=True,
                                                mode='min')

#
# Adam optimizer adaptively computes updates to the learning rate
# so scheduler is taken out for this optimizer
#
LRS = tf.keras.callbacks.LearningRateScheduler(scheduler)

#
# run the model
#
neptune_tb.integrate_with_tensorflow()
model.fit(train_dataset,
          epochs=epochs,
          validation_data=val_dataset,
          callbacks=[tensorboard, checkpoint,
                     NeptuneMonitor()])

#
# Send signal to neptune that the run is done
#
neptune.stop()

#
# save the model from the last epoch
#
t = time.strftime("%Y_%m_%d_%H_%M", time.localtime())
model.save(output_model.format(t))
neptune.init(api_token='ANONYMOUS', project_qualified_name='shared/tensorflow-keras-integration')

# Quickstart

## Step 1: Create an Experiment

neptune.create_experiment('tensorflow-keras-quickstart')

## Step 2: Add NeptuneMonitor Callback to model.fit()

from neptunecontrib.monitoring.keras import NeptuneMonitor

model.fit(x_train, y_train,
          epochs=5,
          batch_size=64,
          callbacks=[NeptuneMonitor()])

## Step 3: Explore results in the Neptune UI

## Step 4: Stop logging

neptune.stop()

# More Options

## Log hardware consumption

## Log hyperparameters

PARAMS = {'lr':0.005, 
          'momentum':0.9,