Exemplo n.º 1
0
    def __init__(self):
        super().__init__()
        self.stop_training = False  # Callbacks' monitoring.
        self.z = config.EXPERIMENT_Z

        # Optimiser and losses.
        self.optimiser = Adam()
        self.model_losses = {"focal": SigmoidFocalCrossEntropy()}

        # Segmentation layers.
        self.encoder = ShallowEncoder()
        self.decoder = ShallowDecoder()
Exemplo n.º 2
0
 def test_config(self):
     bce_obj = SigmoidFocalCrossEntropy(
         reduction=tf.keras.losses.Reduction.NONE, name="sigmoid_focal_crossentropy"
     )
     self.assertEqual(bce_obj.name, "sigmoid_focal_crossentropy")
     self.assertEqual(bce_obj.reduction, tf.keras.losses.Reduction.NONE)
Exemplo n.º 3
0
def test_config():
    bce_obj = SigmoidFocalCrossEntropy(
        reduction=tf.keras.losses.Reduction.NONE,
        name="sigmoid_focal_crossentropy")
    assert bce_obj.name == "sigmoid_focal_crossentropy"
    assert bce_obj.reduction == tf.keras.losses.Reduction.NONE
Exemplo n.º 4
0
def main():
    dir_path = sys.argv[1]
    phi = 0
    cont_training = False
    weighted_bifpn = True
    freeze_backbone = False
    tf.compat.v1.keras.backend.set_session(get_session())

    # create the generators
    # train_generator = trainGenerator(dir_path)
    images, heatmaps = get_trainData(dir_path, multi_dim=True)
    print("Number of images: %s and heatmaps: %s\n" %
          (len(images), len(heatmaps)))
    model = efficientdet(phi,
                         weighted_bifpn=weighted_bifpn,
                         freeze_bn=freeze_backbone)

    # model_name = 'efficientnet-b{}'.format(phi)
    # file_name = '{}_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'.format(model_name)
    # file_hash = WEIGHTS_HASHES[model_name][1]
    # weights_path = keras.utils.get_file(file_name,
    #                                     BASE_WEIGHTS_PATH + file_name,
    #                                     cache_subdir='models',
    #                                     file_hash=file_hash)
    # model.load_weights(weights_path, by_name=True)

    # freeze backbone layers
    if freeze_backbone:
        # 227, 329, 329, 374, 464, 566, 656
        for i in range(1, [227, 329, 329, 374, 464, 566, 656][phi]):
            model.layers[i].trainable = False

    # compile model
    print("Compiling model ... \n")
    # # SOFTMAX ACTIVATION
    # model.compile(optimizer=Adam(lr=1e-3),
    #                 loss=[categorical_focal_loss(gamma = 2, alpha = 0.25)])

    # SIGMOID ACTIVATION
    focalloss = SigmoidFocalCrossEntropy(
        reduction=Reduction.SUM_OVER_BATCH_SIZE)
    model.compile(optimizer=Adam(lr=1e-3), loss=focalloss)

    # # LINEAR ACTIVATION
    # model.compile(optimizer=Adam(lr=1e-3),
    #                 loss='mean_absolute_error')

    # print(model.summary())

    # start training
    # return model.fit_generator(
    #     generator=train_generator,
    #     steps_per_epoch=10,
    #     initial_epoch=0,
    #     epochs=10,
    #     verbose=1
    # validation_data=validation_generator
    # )

    ## 'efficientdet' for the first stacked heatmaps
    if cont_training:
        model.load_weights('efficientdet2')
        model.fit(images, heatmaps, batch_size=16, epochs=60, verbose=1)
    else:
        model.fit(images, heatmaps, batch_size=16, epochs=10, verbose=1)
    model.save_weights('efficientdet2')
    preds = model.predict(images[0:3])
    # save_preds(dir_path, preds)

    # fig = plt.figure()

    plt.subplot(1, 2, 1)
    plt.imshow(np.sum(preds[0], axis=-1))

    plt.subplot(1, 2, 2)
    plt.imshow(np.sum(heatmaps[0], axis=-1))

    plt.show()
    plt.savefig("testres.png")
Exemplo n.º 5
0
        lcl_chkpt_dir = "./checkpoints"

    else:

        label_file_path_train = os.path.join(data_dir, files[0])
        label_file_path_val = os.path.join(data_dir,files[1])
        label_mapping_path = os.path.join(data_dir,files[2])

    
    
    # Variables definition
    config.batch_size = batch_size
    config.learning_rate = 0.001
    config.label_file_path_train=label_file_path_train # labels_1_4_train_v2
    config.label_file_path_val=label_file_path_val
    config.loss = SigmoidFocalCrossEntropy() # tf.keras.losses.BinaryCrossentropy(from_logits=False)
    config.optimizer = keras.optimizers.Adam(config.learning_rate)
    config.input_shape = (100,100,18)
    config.numclasses=10

    config.augment = True
    config.random_flip_up_down=False
    config.random_flip_left_right=False
    config.flip_left_right=True
    config.flip_up_down=True
    config.rot90=True
    config.transpose=False
    config.enable_shuffle=True

    print(f"batch size {config.batch_size}, learning_rate {config.learning_rate}, augment {config.augment}")
    
Exemplo n.º 6
0
def run_2d_model(batch_size=24):
    tf.random.set_seed(3141)
    epochs = 100001
    base_path, morfeus_drive, excel_path = return_paths()
    iterations = [0, 1]
    if base_path.startswith('H'):
        write_to_excel(excel_path=excel_path,
                       iterations=iterations,
                       batch_size=batch_size)
    channel_keys = {3: 2, 4: 3, 5: 3, 7: 4, 8: 2, 9: 4, 10: 3, 11: 5, 12: 3}
    base_df = pd.read_excel(excel_path, engine='openpyxl')
    base_df.set_index('Model_Index')
    potentially_not_run = base_df.loc[~pd.isnull(base_df.Iteration)
                                      & (base_df['Optimizer'] == 'Adam')
                                      & (base_df['run?'] == -10)
                                      #& (base_df['Model_Type'] == 7)
                                      & pd.isnull(base_df['epoch_loss'])]
    indexes_for_not_run = potentially_not_run.index.values
    np.random.shuffle(indexes_for_not_run)
    for index in indexes_for_not_run:
        run_df = base_df.loc[[index]]
        model_key = run_df.loc[index, 'Model_Type']
        model_index = run_df.loc[index, 'Model_Index']
        tensorboard_path = os.path.join(morfeus_drive, 'Tensorflow',
                                        'Model_Key_{}'.format(model_key),
                                        'Model_Index_{}'.format(model_index))
        if os.path.exists(tensorboard_path):
            continue
        os.makedirs(tensorboard_path)
        features_list = ('Model_Type', 'step_factor', 'Optimizer', 'min_lr',
                         'max_lr', 'loss')
        if model_key > 2:
            features_list = ('Model_Type', 'step_factor', 'blocks_in_dense',
                             'dense_conv_blocks', 'dense_layers',
                             'num_dense_connections', 'filters', 'growth_rate',
                             'Optimizer', 'min_lr', 'max_lr', 'loss',
                             'Dropout', 'global_max')
        _, _, train_generator, validation_generator = return_generators(
            batch_size=batch_size,
            cache_add='main_{}'.format(model_key),
            cache=True,
            model_key=model_key)
        model_base = return_model(model_key=model_key)
        model_parameters = run_df.squeeze().to_dict()
        model_parameters['channels'] = channel_keys[model_key]
        for key in model_parameters.keys():
            if type(model_parameters[key]) is np.int64:
                model_parameters[key] = int(model_parameters[key])
            elif type(model_parameters[key]) is np.float64:
                model_parameters[key] = float(model_parameters[key])
        if model_parameters['loss'] == 'CosineLoss':
            loss = CosineLoss()
        elif model_parameters['loss'] == 'SigmoidFocal':
            loss = SigmoidFocalCrossEntropy()
        elif model_parameters['loss'] == 'CategoricalCrossEntropy':
            loss = tf.keras.losses.CategoricalCrossentropy()
        if model_parameters['Optimizer'] == 'SGD':
            opt = tf.keras.optimizers.SGD()
        elif model_parameters['Optimizer'] == 'Adam':
            opt = tf.keras.optimizers.Adam()
        if isinstance(model_base, types.FunctionType):
            model = model_base(**model_parameters)
        else:
            model = model_base
        model_path = os.path.join(base_path, 'Models',
                                  'Model_Type_{}'.format(model_key),
                                  'Model_Index_{}'.format(model_index))

        print('Saving model to {}\ntensorboard at {}'.format(
            model_path, tensorboard_path))
        hparams = return_hparams(model_parameters,
                                 features_list=features_list,
                                 excluded_keys=[])
        run_model(model=model,
                  train_generator=train_generator,
                  validation_generator=validation_generator,
                  min_lr=model_parameters['min_lr'],
                  max_lr=model_parameters['max_lr'],
                  model_path=model_path,
                  tensorboard_path=tensorboard_path,
                  trial_id=model_index,
                  optimizer=opt,
                  hparams=hparams,
                  step_factor=model_parameters['step_factor'],
                  epochs=epochs,
                  loss=loss)
    return None
    #     mode='max',
    #     save_best_only=True)

    # reducelronplateau = tf.keras.callbacks.ReduceLROnPlateau(
    #   monitor='val_loss', factor=0.1, patience=5, verbose=1,
    #   mode='min', min_lr=0.000001)

    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_precision', mode='max', patience=20, verbose=1)

    callbacks_list = [save_checkpoint_wandb, early_stop]

    model = define_model(10, (100,100,18))

    # loss=tf.keras.losses.BinaryCrossentropy(from_logits=False), # Computes the cross-entropy loss between true labels and predicted labels.
    # Focal loss instead of class weights: https://www.dlology.com/blog/multi-class-classification-with-focal-loss-for-imbalanced-datasets/
    model.compile(loss=SigmoidFocalCrossEntropy(),
                  # https://www.tensorflow.org/addons/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy
                  optimizer=keras.optimizers.Adam(0.001),
                  metrics=[tf.metrics.BinaryAccuracy(name='accuracy'),
                           tf.keras.metrics.Precision(name='precision'),
                           # Computes the precision of the predictions with respect to the labels.
                           tf.keras.metrics.Recall(name='recall'),
                           # Computes the recall of the predictions with respect to the labels.
                           F1Score(num_classes=10, name="f1_score")
                           # https://www.tensorflow.org/addons/api_docs/python/tfa/metrics/F1Score
                           ]
                  )

    # model = Simple_CNN(10, input_shape=(100, 100, 18))
    epochs = 2
    history = model.fit(gen.training_dataset, validation_data=gen.validation_dataset,