Ejemplo n.º 1
0
def prepare_patient_images(patient_id, intermediate_crop=0):
    file_lst = []
    prefix = str(patient_id).rjust(4, '0')
    src_files = helpers.get_files(settings.BASE_PREPROCESSEDIMAGES_DIR, prefix + "*.png")

    patient_dir = helpers.get_pred_patient_dir(patient_id)
    helpers.create_dir_if_not_exists(patient_dir)
    patient_img_dir = helpers.get_pred_patient_img_dir(patient_id)
    helpers.create_dir_if_not_exists(patient_img_dir)
    helpers.delete_files(patient_img_dir, "*.png")

    dummy = numpy.zeros((settings.TARGET_SIZE, settings.TARGET_SIZE))
    cv2.imwrite(patient_img_dir + "dummy_overlay.png", dummy)

    for src_path in src_files:
        file_name = ntpath.basename(src_path)
        org_img = cv2.imread(src_path, cv2.IMREAD_GRAYSCALE)
        cropped_img = helpers.prepare_cropped_sax_image(org_img, clahe=True, intermediate_crop=intermediate_crop, rotate=0)
        if SCALE_SIZE is not None:
            cropped_img = cv2.resize(cropped_img, (SCALE_SIZE, SCALE_SIZE), interpolation=cv2.INTER_AREA)

        cv2.imwrite(patient_img_dir + file_name, cropped_img)
        file_lst.append([file_name, "dummy_overlay.png"])

    with open(patient_img_dir + "pred.lst", "wb") as f:
        writer = csv.writer(f, delimiter='\t')
        writer.writerows(file_lst)
Ejemplo n.º 2
0
def predict_overlays_patient(patient_id, pred_model_name, pred_model_iter, save_transparents=False, threshold_value=-1):
    src_image_dir = helpers.get_pred_patient_img_dir(patient_id)
    overlay_dir = helpers.get_pred_patient_overlay_dir(patient_id)
    helpers.delete_files(overlay_dir, "*.png")
    transparent_overlay_dir = helpers.get_pred_patient_transparent_overlay_dir(patient_id)
    helpers.delete_files(transparent_overlay_dir, "*.png")

    num_lines = sum(1 for l in open(src_image_dir + "pred.lst"))
    batch_size = 1
    for try_size in [2, 3, 4, 5]:
        if num_lines % try_size == 0:
            batch_size = try_size

    pred_model = mx.model.FeedForward.load(pred_model_name, pred_model_iter, ctx=mx.gpu(), numpy_batch_size=batch_size)

    if not settings.QUICK_MODE:
        # 5 crops
        predictions_list = []
        predictions = []
        for crop_indents in [[1, 1], [1, CROP_SIZE - 1], [CROP_SIZE - 1, 1], [CROP_SIZE - 1, CROP_SIZE - 1], [CROP_SIZE / 2, CROP_SIZE / 2]]:
            # for crop_indents in [[CROP_SIZE / 2, CROP_SIZE / 2], [CROP_SIZE / 2, 1], [CROP_SIZE / 2, CROP_SIZE - 1]]:
            pred_iter = FileIter(root_dir=src_image_dir, flist_name="pred.lst", batch_size=batch_size, augment=False, mean_image=None, crop_size=INPUT_SIZE, crop_indent_x=crop_indents[0], crop_indent_y=crop_indents[1])
            tmp_predictions = pred_model.predict(pred_iter)
            predictions_list.append(tmp_predictions)

        averaged_overlays = []
        for image_index in range(0, predictions_list[0].shape[0]):
            min_pixels = 99999999.
            min_index = - 1
            max_pixels = -99999999.
            max_index = - 1
            for crop_index in range(0, len(predictions_list)):
               pred_overlay = predictions_list[crop_index][image_index]
               pixel_sum = pred_overlay.sum()
               if pixel_sum < min_pixels:
                   min_pixels = pixel_sum
                   min_index = crop_index

               if pixel_sum > max_pixels:
                   max_pixels = pixel_sum
                   max_index = crop_index

            sum_overlay = None
            sum_item_count = 0
            min_index = -1
            for crop_index in range(0, len(predictions_list)):
                if crop_index != max_index:
                    continue
                pred_overlay = predictions_list[crop_index][image_index]
                if sum_overlay is None:
                    sum_overlay = pred_overlay
                    sum_item_count += 1
                else:
                    sum_overlay += pred_overlay
                    sum_item_count += 1
            sum_overlay /= sum_item_count
            averaged_overlays.append(sum_overlay)

        predictions = numpy.vstack(averaged_overlays)
    else:
        pred_iter = FileIter(root_dir=src_image_dir, flist_name="pred.lst", batch_size=batch_size, augment=False, mean_image=None, crop_size=INPUT_SIZE)
        predictions = pred_model.predict(pred_iter)

    for i in range(len(predictions)):
        y = predictions[i]
        y = y.reshape(INPUT_SIZE, INPUT_SIZE)
        border_size = CROP_SIZE / 2
        y = cv2.copyMakeBorder(y, border_size, border_size, border_size, border_size, cv2.BORDER_CONSTANT, value=0)
        y *= 255
        if threshold_value >= 0:
            y[y <= threshold_value] = 0
            y[y > threshold_value] = 255

        file_name = ntpath.basename(pred_iter.image_files[i])
        cv2.imwrite(overlay_dir + file_name, y)

        if save_transparents:
            channels = cv2.split(y)
            # make argb
            empty = numpy.zeros(channels[0].shape, dtype=numpy.float32)
            alpha = channels[0].copy()
            alpha[alpha == 255] = 75
            channels = (channels[0], channels[0], empty, alpha)

            transparent_overlay = cv2.merge(channels)
            cv2.imwrite(transparent_overlay_dir + file_name, transparent_overlay)