예제 #1
0
def thigh_incremental_mem(modelObj: DynamicDLModel, trainingData: dict, trainingOutputs,
                          bs=5, minTrainImages=5):
    import dl.common.preprocess_train as pretrain
    from dl.common.DataGenerators import DataGeneratorMem
    import os
    from keras.callbacks import ModelCheckpoint
    from keras import optimizers
    
    try:
        np
    except:
        import numpy as np

    LABELS_DICT = {
        1: 'VL',
        2: 'VM',
        3: 'VI',
        4: 'RF',
        5: 'SAR',
        6: 'GRA',
        7: 'AM',
        8: 'SM',
        9: 'ST',
        10: 'BFL',
        11: 'BFS',
        12: 'AL'
        }

    MODEL_RESOLUTION = np.array([1.037037, 1.037037])
    MODEL_SIZE = (432, 432)
    BAND=49
    BATCH_SIZE = bs
    CHECKPOINT_PATH = os.path.join("..", "Weights_incremental")

    os.makedirs(CHECKPOINT_PATH, exist_ok=True)

    image_list, mask_list = pretrain.common_input_process(LABELS_DICT, MODEL_RESOLUTION, MODEL_SIZE, trainingData, trainingOutputs)
    output_data_structure = pretrain.input_creation_mem(image_list, mask_list, BAND)
    
    card = len(image_list)
    steps = int(float(card) / BATCH_SIZE)

    netc = modelObj.model
    checkpoint_files = os.path.join(CHECKPOINT_PATH, "weights_thigh - {epoch: 02d} - {loss: .2f}.hdf5")
    training_generator = DataGeneratorMem(output_data_structure, list_X=list(range(1, steps * BATCH_SIZE + 1)), batch_size=BATCH_SIZE)
    check = ModelCheckpoint(filepath=checkpoint_files, monitor='loss', verbose=0, save_best_only=False,
                            save_weights_only=True, mode='auto', period=10)
    adamlr = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, amsgrad=True)
    netc.compile(loss=pretrain.weighted_loss, optimizer=adamlr)
    history = netc.fit_generator(generator=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check],
                                 verbose=1)
예제 #2
0
def thigh_incremental_mem(modelObj: DynamicDLModel,
                          trainingData: dict,
                          trainingOutputs,
                          bs=5,
                          minTrainImages=5):
    import dl.common.preprocess_train as pretrain
    from dl.common.DataGenerators import DataGeneratorMem
    import os
    from keras.callbacks import ModelCheckpoint
    from keras import optimizers
    import time
    try:
        np
    except:
        import numpy as np

    from dl.labels.thigh import inverse_labels

    MODEL_RESOLUTION = np.array([1.037037, 1.037037])
    MODEL_SIZE = (432, 432)
    BAND = 49
    BATCH_SIZE = bs
    CHECKPOINT_PATH = os.path.join(".", "Weights_incremental", "thigh")
    MIN_TRAINING_IMAGES = minTrainImages

    os.makedirs(CHECKPOINT_PATH, exist_ok=True)

    t = time.time()
    print('Image preprocess')

    image_list, mask_list = pretrain.common_input_process(
        inverse_labels, MODEL_RESOLUTION, MODEL_SIZE, trainingData,
        trainingOutputs)

    print('Done. Elapsed', time.time() - t)
    nImages = len(image_list)

    if nImages < MIN_TRAINING_IMAGES:
        print("Not enough images for training")
        return

    print("image shape", image_list[0].shape)
    print("mask shape", mask_list[0].shape)

    print('Weight calculation')
    t = time.time()

    output_data_structure = pretrain.input_creation_mem(
        image_list, mask_list, BAND)

    print('Done. Elapsed', time.time() - t)

    card = len(image_list)
    steps = int(float(card) / BATCH_SIZE)

    print(f'Incremental learning for thigh with {nImages} images')
    t = time.time()

    netc = modelObj.model
    checkpoint_files = os.path.join(
        CHECKPOINT_PATH, "weights - {epoch: 02d} - {loss: .2f}.hdf5")
    training_generator = DataGeneratorMem(output_data_structure,
                                          list_X=list(range(steps *
                                                            BATCH_SIZE)),
                                          batch_size=BATCH_SIZE,
                                          dim=MODEL_SIZE)
    #check = ModelCheckpoint(filepath=checkpoint_files, monitor='loss', verbose=0, save_best_only=False,save_weights_only=True, mode='auto', period=10)
    check = ModelCheckpoint(filepath=checkpoint_files,
                            monitor='loss',
                            verbose=0,
                            save_best_only=False,
                            save_freq='epoch',
                            save_weights_only=True,
                            mode='auto')
    adamlr = optimizers.Adam(learning_rate=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             amsgrad=True)
    netc.compile(loss=pretrain.weighted_loss, optimizer=adamlr)
    #history = netc.fit_generator(generator=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check], verbose=1)
    history = netc.fit(x=training_generator,
                       steps_per_epoch=steps,
                       epochs=5,
                       callbacks=[check],
                       verbose=1)
    print('Done. Elapsed', time.time() - t)
예제 #3
0
def leg_incremental_mem(modelObj: DynamicDLModel, trainingData: dict, trainingOutputs,
                        bs=5, minTrainImages=5):
    import dl.common.preprocess_train as pretrain
    from dl.common.DataGenerators import DataGeneratorMem
    import os, time
    from keras.callbacks import ModelCheckpoint
    from keras import optimizers
    try:
        np
    except:
        import numpy as np

    from dl.labels.leg import inverse_labels

    MODEL_RESOLUTION = np.array([1.037037, 1.037037])
    MODEL_SIZE = (432, 432)
    MODEL_SIZE_SPLIT = (216, 216)
    BAND = 64
    BATCH_SIZE = bs
    CHECKPOINT_PATH = os.path.join(".", "Weights_incremental_split", "leg")
    MIN_TRAINING_IMAGES = minTrainImages

    os.makedirs(CHECKPOINT_PATH, exist_ok=True)

    t = time.time()
    print('Image preprocess')
    classification = trainingData.get('classification', '')

    single_side = False
    swap = False

    # Note: this is anatomical right, which means it's image left! It's the image right that is swapped
    if classification.lower().strip().endswith('right'):
        single_side = True
        swap = False
    elif classification.lower().strip().endswith('left'):
        single_side = True
        swap = True

    if single_side:
        image_list, mask_list = pretrain.common_input_process_single(inverse_labels, MODEL_RESOLUTION, MODEL_SIZE,
                                                                     MODEL_SIZE_SPLIT, trainingData,
                                                                     trainingOutputs, swap)
    else:
        image_list, mask_list = pretrain.common_input_process_split(inverse_labels, MODEL_RESOLUTION, MODEL_SIZE,
                                                                    MODEL_SIZE_SPLIT, trainingData,
                                                                    trainingOutputs)

    print('Done. Elapsed', time.time() - t)
    nImages = len(image_list)

    if nImages < MIN_TRAINING_IMAGES:
        print("Not enough images for training")
        return

    print("image shape", image_list[0].shape)
    print("mask shape", mask_list[0].shape)

    print('Weight calculation')
    t = time.time()

    output_data_structure = pretrain.input_creation_mem(image_list, mask_list, BAND)

    print('Done. Elapsed', time.time() - t)

    card = len(image_list)
    steps = int(float(card) / BATCH_SIZE)

    print(f'Incremental learning for leg with {nImages} images')
    t = time.time()

    netc = modelObj.model
    checkpoint_files = os.path.join(CHECKPOINT_PATH, "weights - {epoch: 02d} - {loss: .2f}.hdf5")
    training_generator = DataGeneratorMem(output_data_structure, list_X=list(range(steps * BATCH_SIZE)),batch_size=BATCH_SIZE, dim=MODEL_SIZE_SPLIT)
    # check = ModelCheckpoint(filepath=checkpoint_files, monitor='loss', verbose=0, save_best_only=False,save_weights_only=True, mode='auto', period=10)
    check = ModelCheckpoint(filepath=checkpoint_files, monitor='loss', verbose=0, save_best_only=True, # save_freq='epoch',
                            save_weights_only=True, mode='auto')
    adamlr = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, amsgrad=True)
    netc.compile(loss=pretrain.weighted_loss, optimizer=adamlr)
    # history = netc.fit_generator(generator=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check], verbose=1)
    history = netc.fit(x=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check], verbose=1)
    print('Done. Elapsed', time.time() - t)