Ejemplo n.º 1
0
def detailnet_retinanet(num_classes, inputs=None, modifier=None, **kwargs):
    """ Constructs a retinanet model using a modified ResNet backbone.

    Args
        num_classes: Number of classes to predict.
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    """
    if inputs is None:
        inputs = keras.layers.Input(shape=(None, None, 3))

    blocks = [1, 2, 2, 2]
    backbone = DenseNet(blocks=blocks,
                        input_tensor=inputs,
                        include_top=False,
                        pooling=None,
                        weights=None)

    # invoke modifier if given
    if modifier:
        backbone = modifier(backbone)

    layer_outputs = [
        backbone.get_layer(
            name='conv{}_block{}_concat'.format(idx + 2, block_num)).output
        for idx, block_num in enumerate(blocks)
    ]

    # create the full model
    return retinanet(inputs=inputs,
                     num_classes=num_classes,
                     backbone_layers=layer_outputs[1:],
                     **kwargs)
Ejemplo n.º 2
0
def densenet_retinanet(num_classes,
                       backbone='densenet121',
                       inputs=None,
                       modifier=None,
                       **kwargs):
    """ Constructs a retinanet model using a densenet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('densenet121', 'densenet169', 'densenet201')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a DenseNet backbone.
    """
    # choose default input
    if inputs is None:
        inputs = keras.layers.Input((None, None, 3))

    blocks = allowed_backbones[backbone]
    backbone = DenseNet(blocks=blocks,
                        input_tensor=inputs,
                        include_top=False,
                        pooling=None,
                        weights=None)

    # get last conv layer from the end of each dense block
    layer_outputs = [
        backbone.get_layer(
            name='conv{}_block{}_concat'.format(idx + 2, block_num)).output
        for idx, block_num in enumerate(blocks)
    ]

    # create the densenet backbone
    backbone = keras.models.Model(inputs=inputs,
                                  outputs=layer_outputs[1:],
                                  name=backbone.name)

    # invoke modifier if given
    if modifier:
        backbone = modifier(backbone)

    # create the full model
    model = retinanet.retinanet(inputs=inputs,
                                num_classes=num_classes,
                                backbone_layers=backbone.outputs,
                                **kwargs)

    return model
Ejemplo n.º 3
0
    '''Neural network training
    '''

    # list all CPUs and GPUs
    device_list = K.get_session().list_devices()

    # number of GPUs
    gpu_number = np.count_nonzero(['GPU' in str(x) for x in device_list])

    # instantiate model
    with tf.device('/cpu:0'):
        # we start with the DenseNet without the final Dense layer, because it has a softmax activation, and we only
        # want to classify 1 class. So then we manually add an extra Dense layer with a sigmoid activation as final
        # output
        base_model = DenseNet(blocks=[6, 12, 24, 16], include_top=False, weights=None, input_shape=(401, 401, 3),
                              pooling='avg')
        x = Dense(units=1, activation='sigmoid', name='fc1')(base_model.output)
        model = Model(inputs=base_model.input, outputs=x)

    saved_model_filename = os.path.join(saved_models_dir, experiment_id + '_model_fold_' + str(i_fold) + '.h5')

    if gpu_number > 1:  # compile and train model: Multiple GPUs

        # checkpoint to save model after each epoch
        checkpointer = cytometer.model_checkpoint_parallel.ModelCheckpoint(filepath=saved_model_filename,
                                                                           verbose=1, save_best_only=True)
        # compile model
        parallel_model = multi_gpu_model(model, gpus=gpu_number)
        parallel_model.compile(loss={'fc1': 'binary_crossentropy'},
                               optimizer='Adadelta',
                               metrics={'fc1': ['acc']})
    test_cell_in = np.concatenate((test_cell_im, 255 * test_cell_lab), axis=3)
    del test_cell_im
    del test_cell_lab

    '''Neural network training
    '''

    # list all CPUs and GPUs
    device_list = K.get_session().list_devices()

    # number of GPUs
    gpu_number = np.count_nonzero(['GPU' in str(x) for x in device_list])

    # instantiate model
    with tf.device('/cpu:0'):
        model = DenseNet(blocks=[6, 12, 24, 16], include_top=True, weights=None, input_shape=(401, 401, 4),
                         classes=1)

    saved_model_filename = os.path.join(saved_models_dir, experiment_id + '_model_fold_' + str(i_fold) + '.h5')

    if gpu_number > 1:  # compile and train model: Multiple GPUs

        # checkpoint to save model after each epoch
        checkpointer = cytometer.model_checkpoint_parallel.ModelCheckpoint(filepath=saved_model_filename,
                                                                           verbose=1, save_best_only=True)
        # compile model
        parallel_model = multi_gpu_model(model, gpus=gpu_number)
        parallel_model.compile(loss={'fc1000': 'mse'},
                               optimizer='Adadelta',
                               metrics={'fc1000': ['mse', 'mae']})

        # train model
Ejemplo n.º 5
0
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None):
    # parse inputs
    batchSize = 64 if batchSize is None else batchSize
    learningRate = 0.01 if learningRate is None else learningRate
    iEpochs = 300 if iEpochs is None else iEpochs

    print('Training DenseNet')
    print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))

    # save names
    _, sPath = os.path.splitdrive(sOutPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)
    model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_lr_' + str(
        learningRate) + '_bs_' + str(batchSize)
    weight_name = model_name + '_weights.h5'
    model_json = model_name + '.json'
    model_all = model_name + '_model.h5'
    model_mat = model_name + '.mat'

    if (os.path.isfile(model_mat)):  # no training if output file exists
        return

    # create model
    cnn = DenseNet(nb_classes=11, img_dim=(1, patchSize[0, 0], patchSize[0, 1]), nb_dense_block=2, depth=19,
                   growth_rate=16,
                   nb_filter=64)

    # opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)
    opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1),
                 ModelCheckpoint(sOutPath + os.sep + 'checkpoints' + os.sep + 'checker.hdf5', monitor='val_acc',
                                 verbose=0, period=1, save_best_only=True)]

    cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    cnn.summary()
    result = cnn.fit(X_train,
                     y_train,
                     validation_data=[X_test, y_test],
                     epochs=iEpochs,
                     batch_size=batchSize,
                     callbacks=callbacks,
                     verbose=1)

    score_test, acc_test = cnn.evaluate(X_test, y_test, batch_size=batchSize)

    prob_test = cnn.predict(X_test, batchSize, 0)
    y_pred = np.argmax(prob_test, axis=1)
    y_test = np.argmax(y_test, axis=1)
    confusion_mat = confusion_matrix(y_test, y_pred)

    # save model
    json_string = cnn.to_json()
    open(model_json, 'w').write(json_string)
    # wei = cnn.get_weights()
    cnn.save_weights(weight_name, overwrite=True)
    cnn.save(model_all)  # keras > v0.7
    model_png_dir = sOutPath + os.sep + "model.png"
    from keras.utils import plot_model
    plot_model(cnn, to_file=model_png_dir, show_shapes=True, show_layer_names=True)

    # matlab
    acc = result.history['acc']
    loss = result.history['loss']
    val_acc = result.history['val_acc']
    val_loss = result.history['val_loss']

    print('Saving results: ' + model_name)
    sio.savemat(model_name, {'model_settings': model_json,
                             'model': model_all,
                             'weights': weight_name,
                             'acc': acc,
                             'loss': loss,
                             'val_acc': val_acc,
                             'val_loss': val_loss,
                             'score_test': score_test,
                             'acc_test': acc_test,
                             'prob_test': prob_test,
                             'confusion_mat': confusion_mat})