예제 #1
0
파일: main_keras.py 프로젝트: DIDSR/dldp
        for training_type in training_types:
            training_validation_patches = tpp.training_patch_paths(
                image_patch_dir, exclude_normal_list, validation_slides_normal,
                validation_slides_tumor, path_to_save_model, IIIdhistech_only,
                hnm_dir, pnt_dir)
            # A python dataframe is created to include the paths of
            # all the training and validation patches and their labels (tumor or not)
            training_patches = training_validation_patches[0]
            validation_patches = training_validation_patches[1]
            print('number of validation patches: %d ' %
                  len(validation_patches))
            # set the path and name of the best trained models to be saved.
            sub_folder = '%s/%s_%s' % (path_to_save_model, training_type,
                                       patches_category)

            fm.creat_folder(sub_folder)
            # os.mkdir(sub_folder)
            model_name_path = '/%s/googlenetv1_keras_no_color_no_norm_%s_%s_%s-{epoch:02d}-{val_accuracy:.4f}.hdf5' % (
                sub_folder, model_name, current_time, patches_category)
            if training_type == 'no_color_noise':
                train_generator = tpp.gen_imgs(training_patches, BATCH_SIZE,
                                               crop_size, folder_to_save)
                validation_generator = tpp.gen_imgs(validation_patches,
                                                    BATCH_SIZE, crop_size,
                                                    folder_to_save)
                ###################################################################
                # train the neural network
                ########
                history = model_train(train_generator,
                                      validation_generator,
                                      model_name_path,
예제 #2
0
    anno_path = '/raida/wjc/CAMELYON16/training/lesion_annotations'
    mask_dir = '/raida/wjc/CAMELYON16/training/masking'
    slide_paths_total = glob.glob(osp.join(slide_path, '*.tif'))
    slide_paths_total.sort()

    crop_size = [224, 224]

    patch_near_tumor_dir = '/raidb/wli/testing_1219/patch_near_tumor'

    for i in tqdm(range(0, len(slide_paths_total))):
        # sampletotal = pd.DataFrame([])

        with openslide.open_slide(slide_paths_total[i]) as slide:

            new_folder = fm.creat_folder(
                osp.splitext(osp.basename(slide_paths_total[i]))[0],
                patch_near_tumor_dir)
            truth_slide_path = osp.join(
                mask_dir,
                osp.basename(slide_paths_total[i]).replace(
                    '.tif', '_mask.tif'))
            Anno_path_xml = osp.join(
                anno_path,
                osp.basename(slide_paths_total[i]).replace('.tif', '.xml'))

            with openslide.open_slide(str(truth_slide_path)) as truth:

                #slide = openslide.open_slide(slide_paths_total[i])
                annotations = convert_xml_df(str(Anno_path_xml))
                x_values = list(annotations['X'].get_values())
                y_values = list(annotations['Y'].get_values())
예제 #3
0
    # SGE_TASK_ID will not be used here.
    # origin_taskid = int(os.environ['SGE_TASK_ID'])
    color_norm_methods = ['Vahadane', 'Reinhard', 'Macenko']
    template_image_path = '/home/wli/DeepLearningCamelyon/dldp/data/tumor_st.png'
    color_norm = False
    if color_norm:
        color_norm_method = color_norm_methods[2]
        fit = pswh.color_normalization(template_image_path, color_norm_method)
    else:
        color_norm_method = 'baseline'
        fit = None
    path_for_results = '/raidb/wli/testing_1219/hnm/%s_%s' % (
        slide_category, color_norm_method)
    log_path = '/raidb/wli/testing_1219/hnm/log_files'
    fm.creat_folder(log_path)
    log_file = open('%s/%s.txt' % (log_path, color_norm_method), 'w')
    IIIdhistech_only = False
    # the WSI images from Camelyon16 challenge.
    slide_paths = {
        "normal": '/raida/wjc/CAMELYON16/training/normal/',
        "tumor": '/raida/wjc/CAMELYON16/training/tumor/',
        "test": '/raida/wjc/CAMELYON16/testing/images'
    }

    # the index_path is place to store all the coordinate of tiled patches
    ####################################################################################################
    # the slide and dimension information retrievaled based on the name of index_paths to make sure all
    # dimension, index_paths, slide are all matched
    ####################################################################################################
    index_paths = {
예제 #4
0
        '/raidb/wli/Final_Results/Display/pred_dim_0314/training-updated/normal/dimensions',
        "tumor":
        '/raidb/wli/Final_Results/Display/pred_dim_0314/training-updated/tumor/dimensions',
        "test": '/raidb/wli/Final_Results/Display/pred_dim_0314/testing',
    }
    # the dimension of the rectangle tissue region from WSI images was stored individually as npy file in the folder of pred_dim_0314
    Folder_dimension = dimensions[slide_category]

    dimension_files = glob.glob(osp.join(Folder_dimension, '*.npy'))
    dimension_files.sort()
    print(dimension_files)

    # Here is the folder for prediction results.The prediction results are organized into folders. Each folder corresponds to a WSI image.
    # Inside folder, there are large number of npy files. each file
    # is a 14x14x160 array.
    Folder_Heatmap = '/raidb/wli/testing_1219/heat_map_build/%s' % slide_category
    fm.creat_folder(Folder_Heatmap)
    Stride = 56
    # Stride = 16

    for taskid in range(1, 160):
        i = taskid - 1

        if i < len(dirs):
            pred_folder = osp.join(Folder_Prediction_Results[slide_category],
                                   dirs[i])

            print(pred_folder)
            stitch_preded_patches(dimension_files, pred_folder, Folder_Heatmap,
                                  Stride)