Пример #1
0
def train_from_scratch(saving_loc,
                       loss=None,
                       initial_lr=0.0025,
                       gpus=1,
                       model_type="heavy paralell UNET",
                       image_interval=10,
                       check_interval=100,
                       validation_interval=200):

    if loss == None:
        l = custom_loss.loss()

        weights = np.zeros((3, 2))

        weights[0] = [2.6960856, 0.61383891]
        weights[1] = [4.05724285, 0.57027915]
        weights[2] = [4.09752934, 0.56949214]

        weights = np.asarray(weights)

        l.set_weight(weights)

        loss = l.weighted_cross

    n = 544

    proc = process.process(loss,
                           raw,
                           gt,
                           aff,
                           model_type=model_type,
                           precision="half",
                           save_loc=saving_loc,
                           saving_sched=[[0, 100], [1000, 10000],
                                         [100000, 20000]],
                           image_loc="tiffs/",
                           image_interval=image_interval,
                           check_interval=check_interval,
                           conf_coordinates=[[200, 216], [200, 328],
                                             [200, 328]],
                           learning_rate=initial_lr,
                           validation_frac=.2,
                           validation_interval=validation_interval,
                           check_function=checks,
                           gpus=gpus)

    flag = proc.train(500000)
    while (not flag):
        proc.iteration = 0
        proc.learning_rate = proc.learning_rate * .1
        print("\n\tNEW LR = %f" % proc.learning_rate)
        flag = proc.train(500000)

    print("training complete using LR=" + str(proc.learning_rate))
Пример #2
0
conf_raw[0, 0] = raw[200:216, 200:328, 200:328]
conf_raw = np.einsum("bczxy->bzxyc", conf_raw)

conf_aff = np.zeros((1, 3, 16, 128, 128))
conf_aff[0] = aff[:, 200:216, 200:328, 200:328]
conf_aff = np.einsum("bczxy->bzxyc", conf_aff)

#################
#   setup model #
#################

model = make.make()

adam = k.optimizers.Adam(lr=.0000025)

WCE = cl.loss()

WCE.set_weight(.5)

#MULTI GPU STUFF!!!!
if (len(sys.argv) > 1):
    GPU_N = sys.argv[1]
    if (int(GPU_N) > 1):
        print("\nRunning on %i GPUs...\n" % GPU_N)
        model = k.utils.multi_gpu_model(model, gpus=GPU_N)
    else:
        print("\nRunning in single GPU mode...\n")
else:
    print("\nNo GPU num argument, running in single GPU mode... \n")

model.compile(loss=WCE.weighted_cross, optimizer=adam, metrics=['accuracy'])
Пример #3
0
def find_best_segmentation(model_funct,
                           folder,
                           validation_frac,
                           metric="choose",
                           raw_vol=raw,
                           gt_vol=gt,
                           aff_vol=aff,
                           top_n_valid=3,
                           gpus=1):

    l = custom_loss.loss()

    weights = np.zeros((3, 2))

    weights[0] = [2.6960856, 0.61383891]
    weights[1] = [4.05724285, 0.57027915]
    weights[2] = [4.09752934, 0.56949214]

    models = []
    model_keys = []

    print("\nGrabbing models...\n")
    for file in os.listdir(folder):

        model = model_funct(verbose=0)

        try:
            model.load_weights(folder + "/" + file)
            models.append(model)
            model_keys.append(file)
        except OSError:
            print("\n%s is invalid\n" % file)
            continue

    print("\nLoaded %i models.\n" % len(models))
    for key in model_keys:
        print("\t" + key)

    proc = process.process(l.weighted_cross,
                           raw_vol,
                           gt_vol,
                           aff_vol,
                           model=models[0],
                           validation_frac=validation_frac,
                           gpus=gpus)

    valid_loss = []

    print("\nGetting validation loss for all models...\n")
    for model in models:

        proc.model = model

        valid_loss.append(proc.calc_validation_loss())

    top_models = []
    top_model_keys = []

    top_indexs = np.asarray(valid_loss).argsort()[-top_n_valid:][::-1]

    for index in top_indexs:

        top_models.append(models[index])
        top_model_keys.append(model_keys[index])

    print("\nFound top %i models.\n" % top_n_valid)

    for key in top_model_keys:
        print("\t%s" % key)

    print("\nWatershed sweep...\n")
    segs, metrics = predict_and_watershed_on_list_of_models(top_models,
                                                            metric=metric)

    if metric != "choose":

        index = np.where(metrics == np.max(metrics))

        index = index[0]

        print("BEST PERFORMANCE WAS =" + str(np.max(metrics)))

        seg = segs[int(index)]

    else:

        n = 0
        for metric in metrics:
            print("\tSegmentation %i: " % n)
            print("\t\tSplit: " + str(metric[0]))
            print("\t\tMerge: " + str(metric[1]))
            n += 1

        index = input("\n\nWhich model?")

        seg = segs[int(index)]

    model_key = top_model_keys[math.floor(
        (int(index) / len(segs)) * len(top_models))]

    print("Model %s is the choice" % model_key)

    save_segmentation_tifs(gt_vol, seg)
Пример #4
0
def run_process_with_checks_saved_model(iteration,
                                        raw_vol=raw,
                                        gt_vol=gt,
                                        aff_vol=aff,
                                        initial_lr=0.00025,
                                        check_function=checks,
                                        validation_interval=200,
                                        model_type="heavy paralell UNET",
                                        save_loc="malis_heavy_net/",
                                        check_interval=100,
                                        saving_schedule=[[0, 100],
                                                         [1000, 10000]],
                                        loss=None,
                                        image_interval=10,
                                        gpus=1,
                                        random_contrast=False):
    if loss == None:

        loss = custom_loss.loss()

        weights = np.zeros((3, 2))

        weights[0] = [2.6960856, 0.61383891]
        weights[1] = [4.05724285, 0.57027915]
        weights[2] = [4.09752934, 0.56949214]

        weights = np.asarray(weights)

        loss.set_weight(weights)

        loss = loss.weighted_cross

    n = 544

    proc = process.process(
        loss,
        raw_vol,
        gt_vol,
        aff_vol,
        model_type=model_type,
        precision="half",
        save_loc=save_loc,
        saving_sched=saving_schedule,
        image_loc="tiffs/",
        check_interval=check_interval,
        image_interval=image_interval,
        conf_coordinates=[[200, 216], [200, 328], [200, 328]],
        learning_rate=initial_lr,
        validation_frac=.2,
        validation_interval=validation_interval,
        check_function=check_function,
        pickup_file="malis_heavy_net/model%i" % iteration,
        pickup_iteration=iteration,
        gpus=gpus,
        random_contrast=random_contrast,
    )
    try:
        flag = proc.train(500000)
        while (not flag):
            proc.iteration = 0
            proc.learning_rate = proc.learning_rate * .1
            flag = proc.train(500000)
    except KeyboardInterrupt:
        raw_vol = raw_vol[0:32]
        gt_vol = gt_vol[0:32]
        predict_and_watershed_on_vol_with_gt_conf_and_save_tiffs_with_model(
            proc.model, raw_vol, gt_vol, .5, 1)

    print("training complete using LR=" + str(proc.learning_rate))