Beispiel #1
0
def sensitivity_specificity(path_to_model_weights,
                            crop_shape=(64,64),
                            threshold=0.5,
                            batch_size=32,
                            nb_gpus=1,
                            ):
    path_to_validation_data = "../IntermediateData/validation_data.npy"
    path_to_validation_label = "../IntermediateData/validation_label.npy"
    data = np.load(path_to_validation_data)
    labels = np.load(path_to_validation_label)
    
    img_dims, output_dims = crop_shape+(3,), crop_shape+(1,)
    model_single_gpu = seunet_model.seunet(img_dims, output_dims)
    model_single_gpu.load_weights(path_to_model_weights)
    if int(nb_gpus) > 1:
        model_multi_gpu = multi_gpu_model(model_single_gpu, gpus=nb_gpus)
    else:
        model_multi_gpu = model_single_gpu
    
    predicted = model_multi_gpu.predict(data, batch_size=batch_size)
    predicted[predicted>threshold] = 1
    predicted[predicted<=threshold] = 0
    
    sensitivity = predicted[(predicted==1) & (labels==1)].size / float(labels[labels==1].size)
    specificity = predicted[(predicted==0) & (labels==0)].size / float(labels[labels==0].size)

    return sensitivity, specificity
Beispiel #2
0
 def load_model(path_to_model_weights):
     img_dims, output_dims = crop_shape+(3,), crop_shape+(1,)
     model_single_gpu = seunet_model.seunet(img_dims, output_dims)
     model_single_gpu.load_weights(path_to_model_weights)
     if int(nb_gpus) > 1:
         model_multi_gpu = multi_gpu_model(model_single_gpu, gpus=nb_gpus)
     else:
         model_multi_gpu = model_single_gpu
     return model_multi_gpu
Beispiel #3
0
def train(path_to_image, path_to_target, model_path, batch_size, nb_epoch,
          nb_gpus):

    # X_sketchが元の病理画像。[0,1]に規格化されたnp array
    X_sketch_train = np.load(path_to_image)
    # X_fullがsegmentationされた画像。[0,1]に規格化された4channel np array
    X_full_train = np.load(path_to_target)

    img_dim = X_full_train.shape[-4:]
    img_dim0 = X_sketch_train.shape[-4:]
    train_size = X_full_train.shape[0]

    if img_dim[:-1] != img_dim0[:-1]:
        print("Error: output shape must be the same as that of input")

    opt_generator = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    generator_model = seunet(img_dim0, img_dim)

    print(nb_gpus)
    if int(nb_gpus) > 1:
        unet = multi_gpu_model(generator_model, gpus=nb_gpus)
    else:
        unet = generator_model

    unet.compile(loss=mean_dice_coef_loss, optimizer=opt_generator)

    print("Start training")
    for e in range(nb_epoch):

        e_shift = e + 1

        unet.fit(X_sketch_train,
                 X_full_train,
                 batch_size=batch_size,
                 epochs=1,
                 validation_split=0.1)

        print('Epoch %s/%s done' % (e_shift, nb_epoch))
        print("")

        #        model_path = pix2pix_path + '/models'

        if e % 10 == 9:

            if not os.path.exists(model_path):
                os.mkdir(model_path)

            gen_weights_path = model_path + '/seunet_weights_%s.h5' % (1 + e)
            generator_model.save_weights(gen_weights_path, overwrite=True)
Beispiel #4
0
def make_cnn(hp_value):
    crop_shape = hp_value["crop_shape"]
    # set our model
    img_dims, output_dims = crop_shape + (3, ), crop_shape + (1, )
    model_single_gpu = seunet_model.seunet(
        img_dims,
        output_dims,
        filter_list_encoding=hp_value["filter_list_encoding"],
        filter_list_decoding=hp_value["filter_list_decodng"])
    print(nb_gpus)
    if int(nb_gpus) > 1:
        model_multi_gpu = multi_gpu_model(model_single_gpu, gpus=nb_gpus)
    else:
        model_multi_gpu = model_single_gpu

    return model_multi_gpu
Beispiel #5
0
def load_trained_seunet(path_to_cnn,
                        epoch,
                        crop_shape,
                        nb_gpus,
                        ):
    path_to_model_weights = "weights_epoch=%03d.h5" % epoch
    filter_list_encoding = np.load(path_to_save_filter_list % "encoding")
    filter_list_decoding = np.load(path_to_save_filter_list % "decoding")
    
    img_dims, output_dims = crop_shape+(3,), crop_shape+(1,)
    model_single_gpu = seunet_model.seunet(img_dims, output_dims, filter_list_encoding, filter_list_decoding)
    model_single_gpu.load_weights(path_to_model_weights)
    if int(nb_gpus) > 1:
        model_multi_gpu = multi_gpu_model(model_single_gpu, gpus=nb_gpus)
    else:
        model_multi_gpu = model_single_gpu
    return model_multi_gpu
Beispiel #6
0
def train(
        train_ids=np.arange(20, 38),
        validation_ids=np.arange(39, 41),
        val_data_size=2048,
        batch_size=32,
        data_size_per_epoch=2**14,
        #          steps_per_epoch=2**14,
        epochs=256,
        data_shape=(584, 565),
        crop_shape=(128, 128),
        filter_list_encoding=np.array([]),
        filter_list_decoding=np.array([]),
        if_save_img=True,
        nb_gpus=1,
):

    steps_per_epoch = data_size_per_epoch // batch_size
    # set our model
    img_dims, output_dims = crop_shape + (3, ), crop_shape + (1, )
    model_single_gpu = seunet_model.seunet(img_dims, output_dims,
                                           filter_list_encoding,
                                           filter_list_decoding)
    print(nb_gpus)
    if int(nb_gpus) > 1:
        model_multi_gpu = multi_gpu_model(model_single_gpu, gpus=nb_gpus)
    else:
        model_multi_gpu = model_single_gpu

    # load data
    train_images, train_manuals = load_image_manual(image_ids=train_ids,
                                                    data_shape=data_shape)
    #    validation_images, validation_manuals = \
    #        load_image_manual(image_ids=validation_ids,data_shape=data_shape,crop_shape=crop_shape)
    val_data, val_label = make_validation_dataset(
        validation_ids=validation_ids,
        load=True,
        val_data_size=val_data_size,
        data_shape=data_shape,
        crop_shape=crop_shape,
    )

    train_gen = batch_iter(
        images=train_images,
        manuals=train_manuals,
        crop_shape=crop_shape,
        steps_per_epoch=steps_per_epoch,
        batch_size=batch_size,
    )

    #    path_to_save_model = "../output/ep{epoch:04d}-valloss{val_loss:.4f}.h5"
    path_to_cnn_format = "../output/mm%02ddd%02d_%02d/"
    # make a folder to save history and models
    now = datetime.datetime.now()
    for count in range(10):
        path_to_cnn = path_to_cnn_format % (now.month, now.day, count)
        if not os.path.exists(path_to_cnn):
            os.mkdir(path_to_cnn)
            break
    path_to_code = "./train_main.py"
    path_to_code_moved = path_to_cnn + "train_main_used.py"
    path_to_save_model = path_to_cnn + "model_epoch=%03d.h5"
    path_to_save_weights = path_to_cnn + "weights_epoch=%03d.h5"
    path_to_save_filter_list = path_to_cnn + "filter_list_%s.npy"  # % encoding or decoding

    shutil.copyfile(path_to_code, path_to_code_moved)

    np.save(path_to_save_filter_list % "encoding", filter_list_encoding)
    np.save(path_to_save_filter_list % "decoding", filter_list_decoding)

    #    callbacks = []
    #    callbacks.append(ModelCheckpoint(path_to_save_model, monitor='val_loss', save_best_only=False))
    #    callbacks.append(CSVLogger("log%03d.csv" % counter))
    #    callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0.0001 , patience=patience))
    opt_generator = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    #    model_multi_gpu.compile(loss='binary_crossentropy', optimizer=opt_generator)
    model_multi_gpu.compile(loss=seunet_main.mean_dice_coef_loss,
                            optimizer=opt_generator)

    for epoch in range(1, epochs + 1):
        model_multi_gpu.fit_generator(
            train_gen,
            steps_per_epoch=steps_per_epoch,
            epochs=1,
            #                                      epochs=epochs,
            #                                      callbacks=callbacks,
            validation_data=(val_data, val_label))
        print('Epoch %s/%s done' % (epoch, epochs))
        print("")

        if epoch > 0 and epoch % 1 == 0:
            print(epoch)
            model_single_gpu.save(path_to_save_model % (epoch))
            model_single_gpu.save_weights(path_to_save_weights % (epoch))
            validation_accuracy = evaluation.whole_slide_accuracy(
                path_to_cnn=path_to_cnn,
                epoch=epoch,
                model=model_multi_gpu,
                image_ids=validation_ids,
                data_shape=data_shape,
                crop_shape=crop_shape,
                if_save_img=if_save_img,
                nb_gpus=nb_gpus,
                batch_size=batch_size,
            )
            print("validation_accuracy = ", validation_accuracy)