Exemplo n.º 1
0
def keras_weighted_average(lhs: DynamicDLModel,
                           rhs: DynamicDLModel,
                           lhs_weight=0.5):
    if lhs.model_id != rhs.model_id: raise IncompatibleModelError
    lhs_weights = lhs.get_weights()
    rhs_weights = rhs.get_weights()
    newWeights = []
    for depth in range(len(lhs_weights)):
        average = lhs_weights[depth] * lhs_weight + rhs_weights[depth] * (
            1 - lhs_weight)
        newWeights.append(average)
    outputObj = lhs.get_empty_copy()
    outputObj.set_weights(newWeights)
    return outputObj
Exemplo n.º 2
0
def testClassification(modelPath, dicomPath):
    classModel = DynamicDLModel.Load(open(modelPath, 'rb'))
    
    ima, info = loadDicomFile(dicomPath)
    
    resolution = info.PixelSpacing
    out = classModel({'image': ima, 'resolution': resolution})
    print(out)
Exemplo n.º 3
0
def run_evaluation():
    print('Loading model...')
    model = DynamicDLModel.Load(open(model_path, 'rb'))

    print('Evaluating model...')
    t = time.time()
    dice = evaluate_model(model_type_or_dir, model, save_log=False)
    elapsed = time.time() - t
    print('Dice score:', dice)
    print('Elapsed time', elapsed)
Exemplo n.º 4
0
def testSegmentation(modelPath, dicomPath):
    thighModel = DynamicDLModel.Load(open(modelPath, 'rb'))
    
    ima, info = loadDicomFile(dicomPath)
    
    resolution = info.PixelSpacing
    out = thighModel({'image': ima, 'resolution': resolution})
    
    plotSegmentations(ima, out)
    
    plt.show()
Exemplo n.º 5
0
                            mode='auto',
                            period=5)
    adamlr = optimizers.Adam(learning_rate=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             amsgrad=True)
    training_generator = DataGenerator(path=path,
                                       list_X=list(
                                           range(1, steps * batch_size + 1)),
                                       batch_size=batch_size)
    netc.compile(loss=weighted_loss, optimizer=adamlr)
    history = netc.fit_generator(generator=training_generator,
                                 steps_per_epoch=steps,
                                 epochs=5,
                                 callbacks=[check],
                                 verbose=1)


model = gamba_unet()
model.load_weights('weights/weights_gamba_split.hdf5')
weights = model.get_weights()

modelObject = DynamicDLModel('ba333b4d-90e7-4108-aca5-9216f408d91e',
                             gamba_unet,
                             incremental_learn_function=leg_incremental,
                             weights=weights)

with open('models/incremental_leg_split.model', 'wb') as f:
    modelObject.dump(f)
Exemplo n.º 6
0
    os.makedirs(CHECKPOINT_PATH, exist_ok=True)

    image_list, mask_list = pretrain.common_input_process(LABELS_DICT, MODEL_RESOLUTION, MODEL_SIZE, trainingData, trainingOutputs)
    output_data_structure = pretrain.input_creation_mem(image_list, mask_list, BAND)
    
    card = len(image_list)
    steps = int(float(card) / BATCH_SIZE)

    netc = modelObj.model
    checkpoint_files = os.path.join(CHECKPOINT_PATH, "weights_thigh - {epoch: 02d} - {loss: .2f}.hdf5")
    training_generator = DataGeneratorMem(output_data_structure, list_X=list(range(1, steps * BATCH_SIZE + 1)), batch_size=BATCH_SIZE)
    check = ModelCheckpoint(filepath=checkpoint_files, monitor='loss', verbose=0, save_best_only=False,
                            save_weights_only=True, mode='auto', period=10)
    adamlr = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, amsgrad=True)
    netc.compile(loss=pretrain.weighted_loss, optimizer=adamlr)
    history = netc.fit_generator(generator=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check],
                                 verbose=1)

model = coscia_unet()
model.load_weights('weights/weights_coscia.hdf5')
weights = model.get_weights()

modelObject = DynamicDLModel('210e2a21-1984-4e6f-8675-bf57bbabef2f',
                             coscia_unet,
                             incremental_learn_function=thigh_incremental,
                             weights = weights
                             )

with open('models/incremental_thigh.model', 'wb') as f:
    modelObject.dump(f)
Exemplo n.º 7
0
    MODEL_SIZE = (128,128)
    netc = modelObj.model
    resolution = np.array(data['resolution'])
    zoomFactor = resolution/MODEL_RESOLUTION
    img = data['image']
    img = zoom(img, zoomFactor) # resample the image to the model resolution
    img = padorcut(img, MODEL_SIZE)
    categories = netc.predict(np.expand_dims(img,axis=0))
    value = categories[0].argmax()
    try:
        return LABELS_DICT[value]
    except KeyError:
        return None
    

model = class_unet()
model.load_weights('weights/weights_cosciagamba.hdf5')
weights = model.get_weights()

modelObject = DynamicDLModel('3f2a8066-007d-4c49-96b0-5fb7a703f6d0',
                             class_unet,
                             class_apply,
                             weights = weights,
                             timestamp_id=None
                             )

filename = f'models/Classifier_{modelObject.timestamp_id}.model'
with open(filename, 'wb') as f:
    modelObject.dump(f)

print('Saved', filename)
Exemplo n.º 8
0
                            save_weights_only=True,
                            mode='auto')
    adamlr = optimizers.Adam(learning_rate=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             amsgrad=True)
    netc.compile(loss=pretrain.weighted_loss, optimizer=adamlr)
    #history = netc.fit_generator(generator=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check], verbose=1)
    history = netc.fit(x=training_generator,
                       steps_per_epoch=steps,
                       epochs=5,
                       callbacks=[check],
                       verbose=1)
    print('Done. Elapsed', time.time() - t)


model = coscia_unet()
model.load_weights('weights/weights_coscia.hdf5')
weights = model.get_weights()

modelObject = DynamicDLModel('210e2a21-1984-4e6f-8675-bf57bbabef2f',
                             coscia_unet,
                             coscia_apply,
                             incremental_learn_function=thigh_incremental_mem,
                             weights=weights,
                             timestamp_id="1603281020")

with open('models/Thigh_1603281020.model', 'wb') as f:
    modelObject.dump(f)
Exemplo n.º 9
0
def evaluate_model_thread(model_type, model_file):
    model = DynamicDLModel.Load(open(model_file, 'rb'))
    utils_evaluate_model(model_type, model, cleanup=True)
    del model
    gc.collect()
Exemplo n.º 10
0
    print(f"latest_timestamp: {latest_timestamp}")
    print(r.json())
else:
    print(f"status code: {r.status_code}")
    print(f"message: {r.json()['message']}")

print("------------- Get model ------------------")

r = requests.post(url_base + "get_model",
                  json={
                      "model_type": model_type,
                      "timestamp": latest_timestamp,
                      "api_key": "abc123"
                  })
if r.ok:
    model = DynamicDLModel.Loads(r.content)
    model.dump(open('new_model.model', 'wb'))
else:
    print(f"status code: {r.status_code}")
    print(f"message: {r.json()['message']}")

print("------------- Upload model ------------------")

model = DynamicDLModel.Load(open('new_model.model', 'rb'))
files = {'model_binary': model.dumps()}
r = requests.post(url_base + "upload_model",
                  files=files,
                  data={
                      "model_type": model_type,
                      "api_key": "abc123",
                      "dice": 0.3
Exemplo n.º 11
0
    # check = ModelCheckpoint(filepath=checkpoint_files, monitor='loss', verbose=0, save_best_only=False,save_weights_only=True, mode='auto', period=10)
    check = ModelCheckpoint(filepath=checkpoint_files, monitor='loss', verbose=0, save_best_only=True, # save_freq='epoch',
                            save_weights_only=True, mode='auto')
    adamlr = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, amsgrad=True)
    netc.compile(loss=pretrain.weighted_loss, optimizer=adamlr)
    # history = netc.fit_generator(generator=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check], verbose=1)
    history = netc.fit(x=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check], verbose=1)
    print('Done. Elapsed', time.time() - t)


if len(sys.argv) > 1:
    # convert an existing model
    print("Converting model", sys.argv[1])
    old_model_path = sys.argv[1]
    filename = old_model_path
    old_model = DynamicDLModel.Load(open(old_model_path, 'rb'))
    shutil.move(old_model_path, old_model_path + '.bak')
    weights = old_model.get_weights()
    timestamp = old_model.timestamp_id
    model_id = old_model.model_id
else:
    model_id = 'ba333b4d-90e7-4108-aca5-9216f408d91e'
    timestamp = 1610001000
    model = gamba_unet()
    model.load_weights('weights/weights_gamba_split.hdf5')
    weights = model.get_weights()
    filename = f'models/Leg_{timestamp}.model'

modelObject = DynamicDLModel(model_id,
                             gamba_unet,
                             gamba_apply,
Exemplo n.º 12
0
    parser.add_argument('-o',
                        dest='output_path',
                        metavar='path',
                        type=str,
                        required=True,
                        help='Output folder')

    args = parser.parse_args()

    config = json.load(open(config_file))

    base_model = args.base_model
    new_model_list = args.new_models
    output_dir = args.output_path

    original_model_weight = config['original_model_weight']

    print('Original model weight setting', original_model_weight)

    merged_model = DynamicDLModel.Load(open(base_model, 'rb'))
    for new_model in new_model_list:
        print('Loading', new_model)
        new_model = DynamicDLModel.Load(open(new_model, 'rb'))
        merged_model = merged_model * original_model_weight + new_model * (
            1 - original_model_weight)

    merged_model.reset_timestamp()
    merged_model.dump(
        open(os.path.join(output_dir, f'{merged_model.timestamp_id}.model'),
             'wb'))
Exemplo n.º 13
0
def merge_model(model_type, new_model_path):
    """
    This will take a (weighted) average of the weights of two models.
    If the new_model or the resulting merged model have a lower validation dice score
    than dice_thr then the merged model will be discarded. Otherwise it will become the
    new default model.
    """
    print("Merging...")
    config = json.load(open("db/server_config.json"))

    latest_timestamp = get_models(model_type)[-1]
    latest_model = DynamicDLModel.Load(
        open(f"{MODELS_DIR}/{model_type}/{latest_timestamp}.model", 'rb'))
    new_model = DynamicDLModel.Load(open(new_model_path, 'rb'))

    # Check that model_ids are identical
    if latest_model.model_id != new_model.model_id:
        log(
            f"WARNING: Model_IDs do not match. Can not merge models. " +
            f"({latest_model.model_id} vs {new_model.model_id})", True)
        return

    # Validate dice of uploaded model
    if evaluate_model(
            model_type, new_model, comment='(Uploaded model)',
            cleanup=False) < config["dice_threshold"]:
        log("Score of new model is below threshold.", True)
        return

    # The following is only valid if we are applying a difference between two models. However, we are sending a full model
    # merged_model = latest_model.apply_delta(new_model)

    #merged_model = keras_weighted_average(latest_model, new_model, lhs_weight=ORIGINAL_MODEL_WEIGHT)
    # The following is slower but more general (not limited to keras models, using the internal multiplication/sum functionality)
    original_weight = config["original_model_weight"]
    merged_model = latest_model * original_weight + new_model * (
        1 - original_weight)

    merged_model.reset_timestamp()

    # Validate dice of merged model
    if evaluate_model(
            model_type, merged_model, comment='(Merged model)',
            cleanup=False) < config["dice_threshold"]:
        log("Score of the merged model is below threshold.")
        return

    print("Saving merged model as new main model...")
    new_model_path = f"{MODELS_DIR}/{model_type}/{merged_model.timestamp_id}.model"
    temp_model_path = new_model_path + '.tmp'
    merged_model.dump(
        open(temp_model_path, 'wb')
    )  # write to a tmp file to avoid serving an incompletely written model
    os.rename(temp_model_path, new_model_path)
    log(f"Saved merged model with timestamp: {merged_model.timestamp_id}",
        p=True)

    # cleaning up
    try:
        tf.keras.backend.clear_session(
        )  # this should clear the memory leaks by tensorflow
    except:
        print("Error cleaning keras session")

    del latest_model
    del merged_model
    del new_model
    gc.collect()

    print("Deleting old models...")
    delete_older_models(model_type, keep_latest=config["nr_models_to_keep"])

    return
Exemplo n.º 14
0
def evaluate_model(model_type_or_dir: Union[str, Path],
                   model: DynamicDLModel,
                   save_log=True,
                   comment='',
                   cleanup=True) -> float:
    """
    This will evaluate model on all subjects in TEST_DATA_DIR/model_type.
    Per subject all slices which have ground truth annotations will be evaluated (only a subset of all slices
    per subject).
    """

    t = time.time()

    if os.path.isdir(model_type_or_dir):
        test_files = Path(model_type_or_dir).glob("*.npz")
    else:
        test_files = Path(TEST_DATA_DIR).glob(f"{model_type_or_dir}/*.npz")

    dice_scores = []
    n_voxels = []
    for file in test_files:
        print(f"Processing subject: {file.name}")
        # actually load the data. It speeds things up dramatically, otherwise the file is kept memory-mapped from disk.
        img = {}
        with np.load(file) as npz_file:
            for label in npz_file:
                img[label] = npz_file[label]

        print("Data loaded")

        # find slices where any mask is defined
        slices_idxs = set()
        for dataset_name, dataset in img.items():
            if dataset_name.startswith('mask_'):
                n_slices = dataset.shape[2]
                slices_idxs = slices_idxs.union(_get_nonzero_slices(dataset))

        if len(slices_idxs) != n_slices:
            print('Reducing stored dataset')
            new_img = {}
            for dataset_name, dataset in img.items():
                if dataset_name.startswith('mask_') or dataset_name == 'data':
                    new_img[dataset_name] = dataset[:, :, list(slices_idxs)]
                else:
                    new_img[dataset_name] = dataset
            os.rename(file, f'{file}.orig')
            np.savez_compressed(file, **new_img)
            del new_img

        scores = defaultdict(list)
        for idx in tqdm(slices_idxs):
            #print('Running pred')
            pred = model.apply({
                "image": img["data"][:, :, idx],
                "resolution": np.abs(img["resolution"][:2]),
                "split_laterality": False
            })
            for label in pred:
                #print('Evaluating', label)
                mask_name = f"mask_{label}"
                if mask_name in img:
                    gt = img[mask_name][:, :, idx]
                else:  # if the validation set had split laterality
                    gt_L = None
                    if mask_name + '_L' in img:
                        gt_L = img[mask_name + '_L'][:, :, idx]
                    gt_R = None
                    if mask_name + '_R' in img:
                        gt_R = img[mask_name + '_R'][:, :, idx]
                    gt = np.logical_or(
                        gt_L, gt_R)  #Note: logical_or(None, None) == None

                if gt is None:
                    print(f'Warning: {label} not found in validation')
                    continue

                nr_voxels = gt.sum()
                dice = calc_dice_score(gt, pred[label])
                dice_scores.append(dice)
                n_voxels.append(nr_voxels)
                scores[label].append([dice, nr_voxels])
            del pred

        del img
        if cleanup:
            try:
                tf.keras.backend.clear_session(
                )  # this should clear the memory leaks by tensorflow
            except:
                print("Error cleaning keras session")
        scores_per_label = {
            k: np.array(v)[:, 0].mean()
            for k, v in scores.items()
        }
        print('Unweighted scores per label:', scores_per_label)

    try:
        mean_score = np.average(np.array(dice_scores),
                                weights=np.array(n_voxels))
    except ZeroDivisionError:
        mean_score = -1.0
    elapsed = time.time() - t
    if save_log:
        log(f"evaluating model {model_type_or_dir}/{model.timestamp_id}.model: Dice: {mean_score:.6f} (time: {elapsed:.2f}) {comment})",
            p=True)
        log_dice_to_csv(f"{model_type_or_dir}/{model.timestamp_id}.model",
                        mean_score)
    return mean_score
Exemplo n.º 15
0
    del latest_model
    del merged_model
    del new_model
    gc.collect()

    print("Deleting old models...")
    delete_older_models(model_type, keep_latest=config["nr_models_to_keep"])

    return


def log(text, p=False):
    if p:
        print(text)
    with open("db/log.txt", "a") as f:
        f.write(f"{datetime.datetime.now()} {text}\n")


def log_dice_to_csv(model_name, dice, comment=''):
    with open("db/dice.csv", "a") as f:
        f.write(
            f"{datetime.datetime.now()};{model_name};{dice:.6f};{comment}\n")


if __name__ == '__main__':
    ####### For testing #######
    model = DynamicDLModel.Load(
        open(f"{MODELS_DIR}/Thigh/1610001000.model", 'rb'))
    r = evaluate_model("Thigh", model)
Exemplo n.º 16
0
                            save_weights_only=True,
                            mode='auto')
    adamlr = optimizers.Adam(learning_rate=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             amsgrad=True)
    netc.compile(loss=pretrain.weighted_loss, optimizer=adamlr)
    # history = netc.fit_generator(generator=training_generator, steps_per_epoch=steps, epochs=5, callbacks=[check], verbose=1)
    history = netc.fit(x=training_generator,
                       steps_per_epoch=steps,
                       epochs=5,
                       callbacks=[check],
                       verbose=1)
    print('Done. Elapsed', time.time() - t)


model = gamba_unet()
model.load_weights('weights/weights_gamba.hdf5')
weights = model.get_weights()

modelObject = DynamicDLModel('ba333b4d-90e7-4108-aca5-9216f408d91e',
                             gamba_unet,
                             gamba_apply,
                             incremental_learn_function=leg_incremental_mem,
                             weights=weights,
                             timestamp_id="1603281013")

with open('models/Leg_1603281013.model', 'wb') as f:
    modelObject.dump(f)