Exemplo n.º 1
0
def ED_ES_histogram_matching(reference_subject, target_subject):
    reference_ed_nim = nib.load(str(reference_subject.ed_path))
    reference_ed_image = reference_ed_nim.get_data()
    # reference_ed_image = rescale_intensity(reference_ed_image)

    reference_es_nim = nib.load(str(reference_subject.es_path))
    reference_es_image = reference_es_nim.get_data()
    # reference_es_image = rescale_intensity(reference_es_image)

    target_ed_nim = nib.load(str(target_subject.ed_path))
    target_ed_image = target_ed_nim.get_data()
    # target_ed_image = rescale_intensity(target_ed_image)

    target_es_nim = nib.load(str(target_subject.es_path))
    target_es_image = target_es_nim.get_data()
    # target_es_image = rescale_intensity(target_es_image)

    matched_ed = match_histograms(target_ed_image,
                                  reference_ed_image,
                                  multichannel=False)
    matched_es = match_histograms(target_es_image,
                                  reference_es_image,
                                  multichannel=False)

    nim2 = nib.Nifti1Image(matched_ed, affine=target_ed_nim.affine)
    nim2.header['pixdim'] = target_ed_nim.header['pixdim']
    nib.save(nim2, str(target_subject.ed_path))

    nim2 = nib.Nifti1Image(matched_es, affine=target_es_nim.affine)
    nim2.header['pixdim'] = target_es_nim.header['pixdim']
    nib.save(nim2, str(target_subject.es_path))
Exemplo n.º 2
0
def image_normalise(style,
                    content,
                    gamma=1,
                    sharpen=1,
                    S_TYPE=None,
                    C_TYPE=None):

    style = np.array(style)
    content = np.array(content)

    #DIFFERENT TYPES OF NORMALISATION FOR STYLE IMAGE
    if (S_TYPE == 'MATCH'):
        style = match_histograms(style, content, multichannel=True)
    elif (S_TYPE == 'CONTRAST'):
        p2, p98 = np.percentile(style, (2, 98))
        style = rescale_intensity(style, in_range=(p2, p98))
    elif (S_TYPE == 'HISTO'):
        style = equalize_hist(style)
        style = (style * 255).astype(np.uint8)
    elif (S_TYPE == 'ADAPT'):
        style = equalize_adapthist(style, clip_limit=0.01)
        style = (style * 255).astype(np.uint8)

    #DIFFERENT TYPES OF NORMALISATION FOR CONTENT IMAGE
    if (C_TYPE == 'MATCH'):
        content = match_histograms(content, style, multichannel=True)
    elif (C_TYPE == 'CONTRAST'):
        p2, p98 = np.percentile(content, (2, 98))
        content = rescale_intensity(content, in_range=(p2, p98))
    elif (C_TYPE == 'HISTO'):
        content = equalize_hist(content)
        content = (content * 255).astype(np.uint8)
    elif (C_TYPE == 'ADAPT'):
        content = equalize_adapthist(content, clip_limit=0.01)
        content = (content * 255).astype(np.uint8)

    #GAMMA ADJUSTMENT
    if (gamma != 1):
        style = adjust_gamma(style, gamma)
        content = adjust_gamma(content, gamma)

    style = Image.fromarray(style)
    content = Image.fromarray(content)

    #SHARPENING
    if (sharpen != 1):
        style = ImageEnhance.Sharpness(style).enhance(sharpen)
        content = ImageEnhance.Sharpness(content).enhance(sharpen)

    if (VERBOSE):
        plt.title('Style: Norm={}, {}, {}'.format(S_TYPE, gamma, sharpen))
        plt.imshow(style)
        plt.show()

        plt.title('Content: Norm={}, {}, {}'.format(C_TYPE, gamma, sharpen))
        plt.imshow(content)
        plt.show()

    return style, content
Exemplo n.º 3
0
 def test_match_histograms_consistency(self):
     """ensure equivalent results for float and integer-based code paths"""
     image_u8 = self.image_rgb
     reference_u8 = self.template_rgb
     image_f64 = self.image_rgb.astype(np.float64)
     reference_f64 = self.template_rgb.astype(np.float64, copy=False)
     matched_u8 = exposure.match_histograms(image_u8, reference_u8)
     matched_f64 = exposure.match_histograms(image_f64, reference_f64)
     assert_array_almost_equal(matched_u8.astype(np.float64), matched_f64)
Exemplo n.º 4
0
def histogram_match(g_source, seed):
    if seed > .0:
        g_reference = np.asarray(
            Image.open(
                './data/test/real2016/1452274201.Fri.Jan.08_17_30_01.GMT.2016.argus02b.cx.timex.merge.png'
            ))
        if seed > .3:
            g_reference = np.asarray(
                Image.open(
                    './data/test/real2016/1453491001.Fri.Jan.22_19_30_01.GMT.2016.argus02b.cx.timex.merge.png'
                ))
            if seed > .5:
                g_reference = np.asarray(
                    Image.open(
                        './data/test/real2016/1452709801.Wed.Jan.13_18_30_01.GMT.2016.argus02b.cx.timex.merge.png'
                    ))
                if seed > .7:
                    g_reference = np.asarray(
                        Image.open(
                            './data/test/real2016/1477328401.Mon.Oct.24_17_00_01.GMT.2016.argus02b.cx.timex.merge.png'
                        ))
                    if seed > .9:
                        g_reference = np.asarray(
                            Image.open(
                                './data/test/real2016/1483196401.Sat.Dec.31_15_00_01.GMT.2016.argus02b.cx.timex.merge.png'
                            ))
    g_matched = match_histograms(g_source, g_reference)
    image = g_matched.astype('int16')
    return image
Exemplo n.º 5
0
    def test_match_histograms_channel_axis(self, channel_axis):
        """Assert that pdf of matched image is close to the reference's pdf for
        all channels and all values of matched"""

        image = np.moveaxis(self.image_rgb, -1, channel_axis)
        reference = np.moveaxis(self.template_rgb, -1, channel_axis)
        matched = exposure.match_histograms(image,
                                            reference,
                                            channel_axis=channel_axis)
        assert matched.dtype == image.dtype
        matched = np.moveaxis(matched, channel_axis, -1)
        reference = np.moveaxis(reference, channel_axis, -1)
        matched_pdf = self._calculate_image_empirical_pdf(matched)
        reference_pdf = self._calculate_image_empirical_pdf(reference)

        for channel in range(len(matched_pdf)):
            reference_values, reference_quantiles = reference_pdf[channel]
            matched_values, matched_quantiles = matched_pdf[channel]

            for i, matched_value in enumerate(matched_values):
                closest_id = (np.abs(reference_values -
                                     matched_value)).argmin()
                assert_almost_equal(matched_quantiles[i],
                                    reference_quantiles[closest_id],
                                    decimal=1)
Exemplo n.º 6
0
    def test_match_histograms(self, image, reference, multichannel):
        """Assert that pdf of matched image is close to the reference's pdf for
        all channels and all values of matched"""

        # when
        matched = exposure.match_histograms(image,
                                            reference,
                                            multichannel=multichannel)

        matched_pdf = self._calculate_image_empirical_pdf(matched)
        reference_pdf = self._calculate_image_empirical_pdf(reference)

        # then
        for channel in range(len(matched_pdf)):
            reference_values, reference_quantiles = reference_pdf[channel]
            matched_values, matched_quantiles = matched_pdf[channel]

            for i, matched_value in enumerate(matched_values):
                closest_id = (np.abs(reference_values -
                                     matched_value)).argmin()
                assert_almost_equal(
                    matched_quantiles[i],
                    reference_quantiles[closest_id],
                    decimal=1,
                )
def run(params):
    RTimageLocation = params['inputRTImagePath']
    GTimageLocation = params['inputGTImagePath']
    resultLocation = params['resultPath']
    resultLocationAdj = params['resultPathAdj']

    # Checking existence of temporary files (individual channels)
    if not os.path.exists(RTimageLocation):
        print(f'Error: {RTimageLocation} does not exist')
        return
    if not os.path.exists(GTimageLocation):
        print(f'Error: {GTimageLocation} does not exist')
        return

    # Loading input images
    RTData = imread(RTimageLocation)
    GTData = imread(GTimageLocation)
    print(f'Dimensions of Restored image: {RTData.shape}')
    print(f'Dimensions of GT image: {GTData.shape}')

    # Checking dtype is the same for both input channels
    if GTData.dtype != RTData.dtype:
        error_mes = "The bit depth of your input channels is not the same. Convert one of them and retry."
        ctypes.windll.user32.MessageBoxW(0, error_mes, 'Error', 0)
        sys.exit(error_mes)

    # Histogram matching
    matched_GTData = match_histograms(GTData, RTData).astype(RTData.dtype)

    # MSE measurement
    # valMSE = skimage.measure.compare_mse(RTData, GTData) # deprecated in scikit-image 0.18
    valMSE = mean_squared_error(RTData, matched_GTData)
    print(
        f'___ MSE = {valMSE} ___'
    )  # Value appears in the log if Verbosity option is set to 'Everything'

    # SSIM measurement
    outFullSSIM = structural_similarity(RTData, matched_GTData, full=True)

    # Extracting mean value (first item)
    outMeanSSIM = outFullSSIM[0]
    print(f'___ Mean SSIM = {outMeanSSIM} ___')

    # Extracting map (second item)
    outSSIM = outFullSSIM[1]
    print(f'Bit depth of SSIM array: {outSSIM.dtype}')

    # Convert output array whose range is [0-1] to adjusted bit range (8- or 16-bit) if necessary
    if RTData.dtype != np.dtype('float64') and RTData.dtype != np.dtype(
            'float32'):
        outputData = rescale_intensity(outSSIM,
                                       in_range=(0, 1),
                                       out_range=(0,
                                                  np.iinfo(RTData.dtype).max))
        outputData = outputData.astype(RTData.dtype)
    else:
        outputData = outSSIM

    imsave(resultLocation, outputData)
    imsave(resultLocationAdj, matched_GTData)
def evaluate_individual_metrics_with_hm(load_y,
                                        load_x,
                                        predict,
                                        metrics: dict,
                                        test_ids,
                                        train_ids,
                                        results_path,
                                        exist_ok=False):
    assert len(metrics) > 0, 'No metric provided'
    os.makedirs(results_path, exist_ok=exist_ok)

    all_train_img = np.float16([])
    for _id in train_ids:
        all_train_img = np.concatenate(
            (all_train_img, np.float16(load_x(_id)).ravel()))

    results = defaultdict(dict)
    for _id in tqdm(test_ids):
        target = load_y(_id)
        image = load_x(_id)
        prediction = predict(
            np.float32(
                np.reshape(match_histograms(image.ravel(), all_train_img),
                           image.shape)))

        for metric_name, metric in metrics.items():
            try:
                results[metric_name][_id] = metric(target, prediction, _id)
            except TypeError:
                results[metric_name][_id] = metric(target, prediction)

    for metric_name, result in results.items():
        save_json(result,
                  os.path.join(results_path, metric_name + '.json'),
                  indent=0)
Exemplo n.º 9
0
def hist_mtch_cs(im1, im2, color_space='RGB', transfer_channels=[0,1,2]):
    trans_1 = trans_2 = None
        
    if color_space == 'LAB':
        trans_1 = cv2.COLOR_RGB2LAB; trans_2 = cv2.COLOR_LAB2RGB
        
    if color_space == 'HSV':
        trans_1 = cv2.COLOR_RGB2HSV; trans_2 = cv2.COLOR_HSV2RGB
        
    if color_space == 'HLS':
        trans_1 = cv2.COLOR_RGB2HLS; trans_2 = cv2.COLOR_HLS2RGB
        
    im1_2 = im1.copy()
    im2_2 = im2.copy()
    
    if trans_1 is not None:
        im1_2 = cv2.cvtColor(im1, trans_1)
        im2_2 = cv2.cvtColor(im2, trans_1)
        
    matched = match_histograms(im1_2, im2_2, multichannel=True)
    
    for c in transfer_channels:
        im1_2[:,:,c] = matched[:,:,c]
        
    if trans_2 is not None:
        im1_2 = cv2.cvtColor(im1_2, trans_2)
        
    return im1_2
Exemplo n.º 10
0
def transform_image(src_img,
                    transform,
                    match_img=None,
                    ndvi=False,
                    enhance_contrast=True,
                    downscale=5,
                    denoise=True):
    """Applies some transformations to an image useful before segmentation."""
    if ndvi:
        img, multichannel = calc_ndvi(src_img), False
        if match_img is not None:
            match_img = calc_ndvi(match_img)
    else:
        img, multichannel = img_as_float(src_img[:, :, 0:3]), True
        if match_img is not None:
            match_img = img_as_float(match_img[:, :, 0:3])

    if match_img is not None:
        img = match_histograms(img, match_img, multichannel=multichannel)

    if enhance_contrast:
        img = equalize_adapthist(img)

    if downscale is not None:
        factors = (downscale, downscale, 1) if multichannel else \
                  (downscale, downscale)
        img = downscale_local_mean(img, factors=factors)
        a, b, c, d, e, f, _, _, _ = transform
        trf = Affine(downscale, b, c, d, -downscale, f)

    if denoise:
        img = denoise_tv_chambolle(img, multichannel=multichannel)

    return img, trf
Exemplo n.º 11
0
def matchhist(file, reference):
    img_ref = img_as_float(io.imread(reference))
    img_comp = img_as_float(io.imread(file))
    outfile = os.path.splitext(file)[0] + '_hist.jpg'
    matched = match_histograms(img_comp, img_ref, multichannel=True)
    io.imsave(outfile, img_as_ubyte(matched))
    return outfile
Exemplo n.º 12
0
def insert(blank_path, track_path, mask=None):
    blank_movie, blank_index = load_and_getDelSq(blank_path)
    track_movie, track_index = load_and_getDelSq(track_path)
    dif = track_index - blank_index
    track_i, blank_j = assign_ij(dif)
    end = []
    #track_matched = match_histograms(track_movie, blank_movie, multichannel = True)
    while True:
        try:
            b = blank_movie[blank_j]
            t = match_histograms(track_movie[track_i], b, multichannel=True)
            #t = track_matched[track_i]
            just_track = get_just_track(track_path[track_path.find("fm"):], t)
            sub_blank = b[:150, :150]
            mask = make_mask(track_path[track_path.find("fm"):],
                             just_track.shape)
            mask = np.expand_dims(mask, 2)
            mask = np.repeat(mask, 4, 2)
            s1, s2 = get_s1_s2(just_track.shape)
            sub_blank[75 - s1:75 + s1,
                      75 - s2:75 + s2] = just_track * mask + sub_blank[
                          75 - s1:75 + s1, 75 - s2:75 + s2] * (1 - mask)
            end.append(sub_blank)
            blank_j += 1
            track_i += 1
        except IndexError:
            break
    return np.array(end)
Exemplo n.º 13
0
    def normalize_frames(self,
                         frames=None,
                         inplace=True,
                         reference_frame=None):
        if frames is None and len(self.frames) == 0:
            raise ValueError("You did not specify any frames")

        if frames is not None:
            frames = frames
        else:
            frames = self.frames

        if reference_frame is None:
            print(
                "No reference frame provided using the first frame as reference"
            )
            reference_frame = 0

        adjusted_frames = []
        for i in range(len(frames)):
            if i == reference_frame:
                continue
            else:
                matched = match_histograms(frames[i],
                                           frames[reference_frame],
                                           multichannel=False)
                adjusted_frames.append(matched)
        if inplace:
            self.frames = adjusted_frames
        else:
            return adjusted_frames
Exemplo n.º 14
0
def process(pre_path, post_path, patient_folder):

    pre_reg_file = os.path.join(patient_folder, 'pre_reg.nii.gz')
    post_new = os.path.join(patient_folder, 'post.nii.gz')
    sub_file = os.path.join(patient_folder, 'sub.nii.gz')

    aladin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               "niftyreg/bin/reg_aladin")

    reg_call = aladin_path + " -rigOnly -ref " + post_path + " -flo " + pre_path + " -res " + pre_reg_file + " -pad 0"
    subprocess.run(shlex.split(reg_call), stdout=subprocess.PIPE, shell=False)

    pre_nib = nib.load(pre_reg_file)
    post_nib = nib.load(post_path)

    pre = pre_nib.get_fdata()
    post = post_nib.get_fdata()

    pre_histmatch = match_histograms(pre, post)

    sub = post - pre_histmatch

    nib.save(nib.Nifti1Image(sub, pre_nib.affine, pre_nib.header), sub_file)

    shutil.copyfile(post_path, post_new)
Exemplo n.º 15
0
def colorMatchImage(imageH, imageW, imageBaseAdd, matchH, matchW, matchBaseAdd,
                    vm):
    image = []
    rangeX = range(imageH)
    rangeY = range(imageW)
    for row in rangeX:
        image.append([])
        for cell in rangeY:
            position = imageBaseAdd + row * imageW + cell
            color = vm.getValue(position)
            image[-1].append(colorToRGB(color))
    image = np.array(image)

    match = []
    rangeX = range(matchH)
    rangeY = range(matchW)
    for row in rangeX:
        match.append([])
        for cell in rangeY:
            position = matchBaseAdd + row * matchW + cell
            color = vm.getValue(position)
            match[-1].append(colorToRGB(color))
    match = np.array(match)

    matched = match_histograms(image, match, multichannel=True)

    rangeX = range(imageH)
    rangeY = range(imageW)
    for row in rangeX:
        for cell in rangeY:
            color = rgbToColor(matched[row][cell])
            position = imageBaseAdd + row * imageW + cell
            vm.setValue(color, position)
 def processIdx(idx):
     ref_idx = ref_idxes[idx % len(ref_idxes)]
     source_img, _ = ds_source[idx]
     reference_img, _ = ds_reference[ref_idx]
     matched_img = match_histograms(img_as_float(source_img),
                                    img_as_float(reference_img),
                                    multichannel=True)
     ds_source[idx] = img_as_ubyte(matched_img)
Exemplo n.º 17
0
def apply_histogram(img, reference_image, blend_ratio):
    reference_image = cv2.resize(reference_image,
                                 dsize=(img.shape[1], img.shape[0]))
    matched = match_histograms(np.squeeze(img),
                               np.squeeze(reference_image),
                               multichannel=True)
    img = cv2.addWeighted(matched, blend_ratio, img, 1 - blend_ratio, 0)
    return img
Exemplo n.º 18
0
def cargarImagen(imagen, filename):
    rawImage = plt.imread(filename)
    reference = plt.imread("images/reference.jpg")
    matched = match_histograms(rawImage, reference, multichannel = True)
    #imagen.imageRGB = plt.imread(filename)
    imagen.imageRGB = matched
    #imagen.imageCIE = color.rgb2lab(imagen.imageRGB)
    imagen.imageCIE = color.rgb2lab(imagen.imageRGB)
    del rawImage
Exemplo n.º 19
0
    def apply(self, img, reference_image=None, blend_ratio=0.5, **params):
        from skimage.exposure import match_histograms

        if random.random() < self.p:
            reference_image = cv2.resize(reference_image, dsize=(img.shape[1], img.shape[0]))
            matched = match_histograms(img, reference_image, multichannel=True)
            img = cv2.addWeighted(matched, blend_ratio, img, 1 - blend_ratio, 0)

        return img
Exemplo n.º 20
0
def hist_match(source, target):
    try:
        matched = match_histograms(source, target,
                                   multichannel=True).astype(np.uint8)
        matched = Image.fromarray(matched)
        return matched
    except:
        matched = Image.fromarray(source)
        return matched
Exemplo n.º 21
0
def match_color(pre_name, ref_img, target_img):
    from skimage.io import imread, imsave
    from skimage.exposure import match_histograms

    reference = imread(ref_img)
    image = imread(target_img)

    matched = match_histograms(image, reference, multichannel=True)
    print(f'match color to {pre_name}')
    imsave(pre_name, matched)
Exemplo n.º 22
0
def create_converted_imgs(min_num_features=10, max_num_features=50):
    for i, file in enumerate(os.listdir(foreign_imgs_folder.path)):
        if file.endswith(".jp2"):
            continue
        foreign_img = cv2.imread(os.path.join(foreign_imgs_folder.path, file))
        merge_img = image_pipeline(foreign_img, num_of_features=np.random.randint(min_num_features, max_num_features),
                                   alpha=0.0)
        result_img = match_histograms(merge_img, kazakh_img, multichannel=True)
        cv2.imwrite(os.path.join(converted_imgs_folder.path, file), result_img)
        print("Finished processing:", i + 1, "images")
Exemplo n.º 23
0
def _adjust_lumination(sample_image, image):
	if(isinstance(sample_image, str)):
		sample_image = cv2.imread(sample_image)
	else:
		sample_image = sample_image

	assert isinstance(image, np.ndarray)

	new_image = exposure.match_histograms(image, sample_image, multichannel=True)

	return new_image
Exemplo n.º 24
0
def improve_scenebkp(sub_templateg, alg=1):
    
    ref = np.asarray(PIL.Image.fromarray(np.asarray(pd.read_pickle('./AuxFiles/refhist.pkl'))))
    
    #if alg==1:
        #sub_templateg = cv2.fastNlMeansDenoising(sub_templateg,None,10,7,21)
    
    if is_low_contrast(sub_templateg, 0.35):
        
        temp_img = np.copy(sub_templateg).astype(float)
        temp_img[temp_img <= 30] = np.nan
        temp_img_std = np.nanstd(temp_img)
        del temp_img
                
        if temp_img_std <= 30:
                    
            print('Scene is low contrasted, improving...')
            sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.7)
            sub_templateg = adjust_sigmoid(sub_templateg)
            sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.2)
                     
    hist1 = cv2.calcHist([sub_templateg], [0], None, [256], [0, 256])
    hist2 = cv2.calcHist([ref], [0], None, [256], [0, 256])
    sim = cv2.compareHist(hist1, hist2, 0)
            
    if sim < 0.8:

        if alg == 1:

            print('Computing large objects.')
            sub_templateg = cv2.medianBlur(sub_templateg, 5)
            sub_templateg = cv2.bilateralFilter(sub_templateg, 3, 3, 3)
            sub_templateg = hist.adjust_gamma(sub_templateg, gamma=3.0)
            sub_templateg = adjust_sigmoid(sub_templateg)
            sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.0)

        if alg == 2:

            print('Computing small objects')
            sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.5)
    else:

        if alg ==1:
            print('Computing large objects.')
            sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.5)
            sub_templateg = adjust_sigmoid(sub_templateg)
            sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.0)
                    
        else:
            print('Computing small objects.')
            sub_templateg = match_histograms(sub_templateg, ref)
            sub_templateg = convert(sub_templateg, 0, 255, np.uint8)
            
    return sub_templateg
Exemplo n.º 25
0
def equalize_histogram(filename, filename2):
    image = Image.open(filename)
    image = asarray(image)

    reference = Image.open(filename2)
    reference = asarray(reference)

    matched_image = match_histograms(image, reference, multichannel=True)
    plot = plot_equalized_histogram(image, reference, matched_image)
    image = Image.fromarray(matched_image)
    image.save(filename)
    return filename, plot
Exemplo n.º 26
0
def matchHistograms(image_ref_path, image_target_path):
    reference = io.imread(image_ref_path)
    ic(reference.dtype, reference.shape, type(reference))
    target = io.imread(image_target_path)
    ic(target.dtype, target.shape, type(target))

    matched = match_histograms(target, reference)
    ic(matched.dtype, matched.shape, type(matched))
    data = Image.fromarray(matched)
    # saving the final output 
    # as a PNG file
    data.save('/media/nacho/Puzzles/gabriele_data/hippo_3/Round1/Round1_c1_maxIP_matched.tif')
Exemplo n.º 27
0
def match_histogram(source, reference):
    isTorch = False
    source = source / source.max() * reference.max()
    if isinstance(source, torch.Tensor):
        source = source.cpu().numpy()
        isTorch = True
    if isinstance(reference, torch.Tensor):
        reference = reference[:source.shape[0], ...].cpu().numpy()

    matched = match_histograms(source, reference, multichannel=False)
    if isTorch:
        matched = torch.from_numpy(matched)
    return matched
Exemplo n.º 28
0
def checksim(reference, edge, crop, match_hist=False):
    img_ref = imgcrop(img_as_float(io.imread(reference)), crop)
    img_comp = imgcrop(img_as_float(io.imread(edge)), crop)
    if match_hist:
        matched = match_histograms(img_comp, img_ref, multichannel=True)
    else:
        matched = img_comp

    # shift, error, diffphase = phase_cross_correlation(img_ref, img_comp)
    # ssim_calc = ssim(img_ref, matched,  data_range=img_ref.max() - img_ref.min())
    mse = sqrt(mean_squared_error(img_ref, matched))
    sim = mse
    return sim
Exemplo n.º 29
0
def run(params):
	RTimageLocation = params['inputRTImagePath']
	GTimageLocation = params['inputGTImagePath']
	resultLocation = params['resultPath']
	resultLocationAdj = params['resultPathAdj']
	
	# Checking existence of temporary files (individual channels)
	if not os.path.exists(RTimageLocation):
		print(f'Error: {RTimageLocation} does not exist')
		return; 
	if not os.path.exists(GTimageLocation):
		print(f'Error: {GTimageLocation} does not exist')
		return; 
		
	# Loading input images
	RTData = imread(RTimageLocation)
	GTData = imread(GTimageLocation)
	print(f'Dimensions of Restored image: {RTData.shape}')
	print(f'Dimensions of GT image: {GTData.shape}')
	
	# Histogram matching
	matched_GTData = match_histograms(GTData, RTData).astype(RTData.dtype)
	
	# MSE measurement
	# valMSE = skimage.measure.compare_mse(RTData, GTData) # deprecated in scikit-image 0.18 
	valMSE = mean_squared_error(RTData, matched_GTData)
	print(f'___ MSE = {valMSE} ___')	# Value appears in the log if Verbosity option is set to 'Everything'
	
	# SSIM measurement
	outFullSSIM = structural_similarity(RTData, matched_GTData, full=True)
	
	# Extracting mean value (first item)
	outMeanSSIM = outFullSSIM[0]
	print(f'___ Mean SSIM = {outMeanSSIM} ___')
	
	# Extracting map (second item)
	outSSIM = outFullSSIM[1]
	print(f'Bit depth of SSIM array: {outSSIM.dtype}')
	
	# Convert output array whose range is [0-1] to adjusted bit range (8- or 16-bit)
	if RTData.dtype is np.dtype('u2'):
		outputData = img_as_uint(outSSIM)
	elif RTData.dtype is np.dtype('f4'):
		outputData = img_as_float32(outSSIM)	# necessary?
	else:
		outputData = img_as_ubyte(outSSIM)
	
	imsave(resultLocation, outputData)	
	imsave(resultLocationAdj, matched_GTData)
Exemplo n.º 30
0
def ScaleMatchDiff(filename, expmrc):
    Sim = mrcfile.open(filename)
    npdata = Sim.data
    Min = np.amin(npdata)
    Max = np.amax(npdata)
    CurrData = np.array(npdata)
    CurrData = npdata
    CurrData = CurrData - Min
    ReviData = CurrData / (Max - Min)
    ExpD = expmrc
    Match = match_histograms(ReviData, ExpD.data)
    Match = Match.astype(np.float32)
    Diff = ((ExpD.data - Match.data)**2)
    Sum = np.sum(Diff)
    return Sum