Пример #1
0
def compare_png(img1, img2, eps=0.99):
    """check whether img1 and img2 are similar

       we use structural similarity (SSIM) to compare them.

       SSIM generates values between 0 and 1, where 1 represents
       identical images

       If SSIM result is greater eps, then this method returns True.

    """
    im1 = imread(img1)
    im2 = imread(img2)
    if len(im1.shape) == 2 or im1.shape[-1] == 1:
        # only one color channel
        mssim = compare_ssim(im1, im2)
    elif HAVE_MULTICHANNEL_SSIM:
        # multi color channel
        mssim = compare_ssim(im1, im2, multichannel=True)
    else:
        # We have to do multichannel ssim ourselves
        nch = im1.shape[-1]
        mssim = np.empty(nch)
        for chan in range(nch):
            # use copy to generate contiguous array and avoid warning
            ch_result = compare_ssim(
                im1[..., chan].copy(), im2[..., chan].copy())
            mssim[..., chan] = ch_result
        mssim = mssim.mean()
    return mssim > eps
Пример #2
0
def image_similarity_index(image_1_path_name, image_2_path_name):
    """Calculates the similarity of two images. A structural similarity index of 1.0 means the images are identical."""
    image_1 = color.rgb2gray(imread(image_1_path_name))  # color-images are not supported in the version for Raspbian
    image_2 = color.rgb2gray(imread(image_2_path_name))

    similarity = compare_ssim(image_1, image_2)

    return similarity
Пример #3
0
def test(model):
    
    print('Start to test on {}'.format(args.test_dir))
    out_dir = save_dir + args.test_dir.split('/')[-1] + '/'
    if not os.path.exists(out_dir):
            os.mkdir(out_dir)
            
    name = []
    psnr = []
    ssim = []
    file_list = glob.glob('{}/*.png'.format(args.test_dir))
    for file in file_list:
        # read image
        img_clean = np.array(Image.open(file), dtype='float32') / 255.0
        img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape)
        img_test = img_test.astype('float32')
        # predict
        x_test = img_test.reshape(1, img_test.shape[0], img_test.shape[1], 1) 
        y_predict = model.predict(x_test)
        # calculate numeric metrics
        img_out = y_predict.reshape(img_clean.shape)
        img_out = np.clip(img_out, 0, 1)
        psnr_noise, psnr_denoised = compare_psnr(img_clean, img_test), compare_psnr(img_clean, img_out)
        ssim_noise, ssim_denoised = compare_ssim(img_clean, img_test), compare_ssim(img_clean, img_out)
        psnr.append(psnr_denoised)
        ssim.append(ssim_denoised)
        # save images
        filename = file.split('/')[-1].split('.')[0]    # get the name of image file
        name.append(filename)
        img_test = Image.fromarray((img_test*255).astype('uint8'))
        img_test.save(out_dir+filename+'_sigma'+'{}_psnr{:.2f}.png'.format(args.sigma, psnr_noise))
        img_out = Image.fromarray((img_out*255).astype('uint8')) 
        img_out.save(out_dir+filename+'_psnr{:.2f}.png'.format(psnr_denoised))
    
    psnr_avg = sum(psnr)/len(psnr)
    ssim_avg = sum(ssim)/len(ssim)
    name.append('Average')
    psnr.append(psnr_avg)
    ssim.append(ssim_avg)
    print('Average PSNR = {0:.2f}, SSIM = {1:.2f}'.format(psnr_avg, ssim_avg))
    
    pd.DataFrame({'name':np.array(name), 'psnr':np.array(psnr), 'ssim':np.array(ssim)}).to_csv(out_dir+'/metrics.csv', index=True)
Пример #4
0
def get_ssim(actual, expected):
    im = Image.fromarray(actual)
    im2 = Image.fromarray(expected)

    if im.size[0] != im2.size[0] or im.size[1] != im2.size[1]:
        raise RuntimeError(
            "Can't calculate SSIM for images of different sizes (one is %dx%d, the other %dx%d)." % (
                im.size[0], im.size[1],
                im2.size[0], im2.size[1],
            )
        )
    return compare_ssim(np.array(im), np.array(im2), multichannel=True)
Пример #5
0
 def ssim(self, image1, image2):
     image1 = Image.open(image1).convert('RGB')
     if image1.size[0] > 300:
         new_size = (300, int(image1.size[1] / image1.size[0] * 300))
     else:
         new_size = image1.size
     print(image1.size, new_size)
     image1 = image1.resize(new_size)
     image2 = Image.open(image2).resize(new_size).convert('RGB')
     image1 = array(image1)
     image2 = array(image2)
     img1 = img_as_float(image1)
     img2 = img_as_float(image2)
     return compare_ssim(img1, img2, win_size=None, gradient=False, multichannel=True)
Пример #6
0
def test_denoise_tv_chambolle_weighting():
    # make sure a specified weight gives consistent results regardless of
    # the number of input image dimensions
    rstate = np.random.RandomState(1234)
    img2d = astro_gray.copy()
    img2d += 0.15 * rstate.standard_normal(img2d.shape)
    img2d = np.clip(img2d, 0, 1)

    # generate 4D image by tiling
    img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2))

    w = 0.2
    denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w)
    denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w)
    assert_(measure.compare_ssim(denoised_2d,
                                 denoised_4d[:, :, 0, 0]) > 0.99)
Пример #7
0
def find_the_most_similar(images_data):
    from skimage.measure import compare_ssim

    n = len(images_data)

    sim = np.zeros((n,n))

    for i in xrange(n):
        image_i = images_data[i]
        for j in xrange(i,n):
            image_j = images_data[j]
            sim[i,j] = compare_ssim(image_i,image_j,multichannel=True) #,gaussian_weights=True)
            sim[j,i] = sim[i,j]

    ret = np.sum(sim,axis=0)
    
    ret = zip(ret,range(n))
    ret.sort()
    
    return ret[-1][1]
Пример #8
0
def run_metrics(image_file_name1,image_file_name2 ):
    image_name1 = io.imread(image_file_name1)
    image_name2 = io.imread(image_file_name2)
    peak_signal_to_noise_ratio = measure.compare_psnr (image_name1,image_name2)
    print ("PSNR Peak signal to noise ratio is %s"%peak_signal_to_noise_ratio)
    mse = measure.compare_mse(image_name1,image_name2)
    print  ("MSE Mean square error between the images is %s"%mse)
    rmse = measure.compare_nrmse(image_name1,image_name2)
    print  ("RMSE Normalised root mean square error between the images is %s"%rmse)
    ssim = measure.compare_ssim(image_name1,image_name2, multichannel=True)
    print ("SSIM Structural Similarity Index is %s"%ssim)
    #[M3,M4] = minkowski_distance(image_name1,image_name2)
    #print ("Minkowski distance is %s %s"%(M3,M4))
    #AD = average_difference(image_name1,image_name2)
    #print ("AD Average difference is %s"%AD)
    #SC = structural_content(image_name1,image_name2)
    #print ("SC Structural Content is %s"%SC)
    #NK = normalised_cross_correlation(image_name1,image_name2)
    #print ("NK normalised cross correlation is %s"%NK)
    #MD = maximum_difference(image_name1,image_name2)
    #print ("Maximum difference is %s"%MD)
    return {'peaktonoise':peak_signal_to_noise_ratio ,'mse': mse, 'rmse': rmse, 'ssim':ssim,'score':peak_signal_to_noise_ratio}
Пример #9
0
print image_name2.shape

#estimate the standard deiviation of the images

std_1 = numpy.std (numpy.std (numpy.array(image_name1)))
std_2 = numpy.std (numpy.std (numpy.array(image_name2)))

print ("std is %2.10f"%std_1)

#print ("Standard deviation of the images are"%(std_1,std_2))

#estimate the peak signal to noise ratio (PSNR) between the image

peak_signal_to_noise_ratio = measure.compare_psnr (image_name1,image_name2)

print ("Peak signal to noise ratio is %s"%peak_signal_to_noise_ratio)

# estimate the mean square error between the images

mse = measure.compare_mse(image_name1,image_name2)

print  ("Mean square error between the images is %s"%mse)

# estimate the normalised root mean square error between the images

rmse = measure.compare_nrmse(image_name1,image_name2)
ssim = measure.compare_ssim(image_name1,image_name2)

print  ("Normalised root mean squre error between the images is %s"%rmse)
print ("SSIM is %s"%ssim)
Пример #10
0
    for j in range(0, len(models)):
        predicted_img = models[j].predict(
            [np.expand_dims(image, 0),
             np.expand_dims(mask, 0)])[0] * 255

        # if you want to save inpainted result, please use the following 3 lines, else use the 4th line below:
        # result_name=result_img_folder+"\\"+filename.rstrip('.png') +"_"+str(j+1)+ ".png"
        # cv2.imwrite(result_name, cv2.cvtColor(predicted_img, cv2.COLOR_BGR2RGB))
        # result_img=cv2.imread(result_name)

        # if you don't want to save inpainted result, please use the following line, else use the above 3 lines:
        result_img = cv2.cvtColor(predicted_img, cv2.COLOR_BGR2RGB)

        current_mse = compare_mse(input_img, result_img)
        current_psnr = compare_psnr(input_img, result_img)
        current_ssim = compare_ssim(to_gray(input_img), to_gray(result_img))
        #print("MSE:{}".format(current_mse),"PSNR:{}".format(current_psnr),"SSIM:{}".format(current_ssim))
        print(f"Image Num:{image_num}({j+1})", f"MSE:{current_mse:4.4f}",
              f"PSNR:{current_psnr:3.4f}", f"SSIM:{current_ssim:3.4f}")
        mse[j] = mse[j] + current_mse
        psnr[j] = psnr[j] + current_psnr
        ssim[j] = ssim[j] + current_ssim

end_time = datetime.now()
total_time = (end_time - start_time).seconds

for k in range(0, len(models)):
    mse[k] = mse[k] / image_num
    psnr[k] = psnr[k] / image_num
    ssim[k] = ssim[k] / image_num
    #print("Average result of method:",k+1)
Пример #11
0
    scale = 0.2
    # SCREENCAP  downscale only
    cropped_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)
    cropped_prev_frame = cv2.resize(prev_frame, (0, 0), fx=scale, fy=scale)

    # #FOR iPHONE 4k60 far
    # cropped_frame = frame[800:1200, 1600:2400]
    # cropped_prev_frame = prev_frame[800:1200, 1600:2400]
    # # downscale
    # cropped_frame = cv2.resize(cropped_frame, (0,0), fx=0.5, fy=0.5)
    # cropped_prev_frame = cv2.resize(cropped_prev_frame, (0,0), fx=0.5, fy=0.5)

    cropped_frame = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)
    cropped_prev_frame = cv2.cvtColor(cropped_prev_frame, cv2.COLOR_BGR2GRAY)

    (score, diff) = compare_ssim(cropped_frame, cropped_prev_frame, full=True)
    # diff = (diff * 255).astype("uint8")
    # print("SSIM: {}".format(score))

    #FOR SCREENCAP
    threshold = 0.99
    if score > threshold:
        result = "Dropped"
    else:
        # result = "Unique"
        result = ""
        frametime_display = frametime
        frametime = 0

    # #FOR iphone recorded 4k60 close to screen
    # threshold = 0.9
Пример #12
0
import cv2
import pywt
import numpy as np
from skimage.measure import compare_mse
from skimage.measure import compare_psnr
from skimage.measure import compare_ssim

img = cv2.imread("../crowd2.jpg", cv2.IMREAD_GRAYSCALE)
blur = cv2.GaussianBlur(img, (3, 3), 0)

for w in pywt.wavelist(kind="discrete"):
    wavelet = pywt.Wavelet(w)

    coeffs = pywt.wavedec2(blur, wavelet)
    #cA, (cH, cV, cD) = coeffs

    rec = pywt.waverec2(coeffs, wavelet)

    mse = compare_mse(blur, rec)
    psnr = compare_psnr(blur, rec)
    ssim = compare_ssim(blur, rec)

    print(w, ">>", mse, psnr, ssim)
Пример #13
0
    def get_chara(self):
        """Retrieves chara name for current player in current frame.

        Compare cropped avatar with reference avatars, pick the best match as 
        the chara current player plays with. In OWL, currently observed player
        has a larger avatar. To differentiate between the two, comparison has
        to run twice and the better match gets chosen.

        Author:
            Appcell

        Args:
            None

        Returns:
            None 
        """
        all_avatars = self.avatars
        avatars_ref_observed = all_avatars["observed"]
        avatars_ref = all_avatars["normal"]
        team_color = avatars_ref_observed['ana'][0, 0]

        # Crop avatar from frame
        avatar_observed = ImageUtils.crop(
            self.image,
            OW.get_avatar_pos_observed(self.index, self.game_type,
                                       self.game_version))
        avatar = ImageUtils.crop(
            self.image,
            OW.get_avatar_pos(self.index, self.game_type, self.game_version))
        # if self.game_version == 1:
        #     cv2.imshow('t', avatar)
        #     cv2.waitKey(0)
        # If player is observed, not sure about this tho
        avatar_diff = ImageUtils.crop(
            self.image,
            OW.get_avatar_diff_pos(self.index, self.game_type,
                                   self.game_version))
        max_diff = 0
        for i in range(avatar_diff.shape[0]):
            for j in range(avatar_diff.shape[1]):
                if ImageUtils.color_distance(avatar_diff[i, j],
                                             team_color) > max_diff:
                    max_diff = ImageUtils.color_distance(
                        avatar_diff[i, j], team_color)
        if max_diff < 40 and self.is_ult_ready is False:
            self.is_observed = True
        score = 0
        for (name, avatar_ref_observed) in avatars_ref_observed.items():
            s_observed = cv2.matchTemplate(avatar_observed,
                                           avatar_ref_observed,
                                           cv2.TM_CCOEFF_NORMED)
            _, s_observed, _, loc_observed = cv2.minMaxLoc(s_observed)
            temp_avatar_observed = ImageUtils.crop(avatar_observed, [
                loc_observed[1], avatar_ref_observed.shape[0], loc_observed[0],
                avatar_ref_observed.shape[1]
            ])
            s_ssim_observed = measure.compare_ssim(temp_avatar_observed,
                                                   avatar_ref_observed,
                                                   multichannel=True)
            s = cv2.matchTemplate(avatar, avatars_ref[name],
                                  cv2.TM_CCOEFF_NORMED)
            _, s, _, loc = cv2.minMaxLoc(s)
            temp_avatar = ImageUtils.crop(avatar, [
                loc[1], avatars_ref[name].shape[0], loc[0],
                avatars_ref[name].shape[1]
            ])
            s_ssim = measure.compare_ssim(temp_avatar,
                                          avatars_ref[name],
                                          multichannel=True)
            s_ssim_final = s_ssim_observed if s_ssim_observed > s_ssim else s_ssim
            s_final = s_observed if s_observed > s else s
            loc_final = loc_observed if s_observed > s else loc

            if s_final * 0.4 + s_ssim_final * 0.6 > score:
                score = s_final * 0.4 + s_ssim_final * 0.6
                self.chara = name

        if self.chara is None:
            self.chara = "empty"
            self.is_dead = True
            return

        if self.chara == OW.DVA:
            self.dva_status = OW.IS_WITH_MEKA

        self.get_living_status(avatars_ref_observed[self.chara])
Пример #14
0
def simScore(i1, i2):
    (score) = compare_ssim(i1, i2, full=False)
    #diff = (diff * 255).astype("uint8")
    return (f'{score:0.5}')
Пример #15
0
def main():
    startTime = time.time()
    outputDirPath = 'C:/ZCU/Diplomka/Dataset/04/RESULTS/Res_Conv'
    writer = sitk.ImageFileWriter()
    # moving = DataPreparation.readDICOMSerieToImage('C:/ZCU/DATA_FOR_TEST/MRI/TCGA-LIHC/TCGA-K7-AAU7/07-31-2001-MRI ABDOMEN WWO CONTRAST-59507/1201-C AX 3D LATE PHAS20CC MAGNEVISTE-50651')
    # fixedR = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.1/PATIENT_DICOM')
    # movingR = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.7/PATIENT_DICOM')
    # fixMask = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.1/MASKS_DICOM/liver')
    # movMas =  DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.7/MASKS_DICOM/liver')

    fixedRMHD = 'C:/ZCU/Diplomka/Dataset/01/october-massachusetts-helium-queen_ack-wyoming_4_ca45536493525b615615f4d703c108e994b2dc6ec8b33e58d64c5cd6a92a12f2_v0.mhd'
    fixedRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_4_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
    fixedRMHD = 'C:/ZCU/DATA_FOR_TEST/TCGA-LIHC/TCGA-BC-A10X/11-22-1992-MRI ABD WWO CONT-49239/11-LIVER-GAD-ENHANCEMENTT1F-68307'
    fixedRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_4_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
    fixedRMHD = 'C:/ZCU/Diplomka/Dataset/04/hamper-carpet-earth-jersey_lake-fanta_601_447041836b6cf9ef3b328041cf99cac6c8308e90c46d437db62f6c8689fa6b58_v0.mhd'
    reader = sitk.ImageFileReader()
    reader.SetFileName(fixedRMHD)
    fixed = reader.Execute()

    movingRMHD = 'C:/ZCU/Diplomka/Dataset/01/october-massachusetts-helium-queen_ack-wyoming_7_ca45536493525b615615f4d703c108e994b2dc6ec8b33e58d64c5cd6a92a12f2_v0.mhd'
    movingRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_7_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
    movingRMHD = 'C:/ZCU/DATA_FOR_TEST/TCGA-LIHC/TCGA-BC-A10X/03-29-1993-CT ABDOMEN  WCONTRAST-43286/4-150cc OMNIPAQUE-36663'
    movingRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_7_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
    movingRMHD = 'C:/ZCU/Diplomka/Dataset/04/hamper-carpet-earth-jersey_lake-fanta_501_447041836b6cf9ef3b328041cf99cac6c8308e90c46d437db62f6c8689fa6b58_v0.mhd'
    # fixedR = DataPreparation.readNrrdToImage(fixedRMHD)
    # fixedR = DataPreparation.readNrrdToImage(fixedRMHD)
    reader.SetFileName(movingRMHD)
    moving = reader.Execute()

    # print movingBR.GetSize()
    # size = [512,512,101]
    # movingAr = sitk.GetArrayFromImage(movingBR)
    # M = cv2.getRotationMatrix2D((512./2.,512./2.), -7., 1.)
    # moving = movingAr.copy()
    #
    #
    # for slice in range(size[2]):
    #     moving[slice][:][:] = cv2.warpAffine(movingAr[slice][:][:], M, (movingAr.shape[1], movingAr.shape[2]))
    # moving = sitk.GetImageFromArray(moving)
    # ed = sed3.sed3(moving)
    # ed.show()
    # X = compare_ssim(sitk.GetArrayFromImage(fixed), sitk.GetArrayFromImage(moving), full=True)
    # # ed = sed3.sed3(sitk.GetImageFromArray(X[1]))
    # # ed.show()
    # print 'Difference Score BEFORE:'+str(X[0])
    # writer.SetFileName(outputDirPath + 'DifferenceBefore.nrrd')
    # writer.Execute(sitk.GetImageFromArray(X[1]))

    # fixed = DataPreparation.maskToLivers(fixedR, fixMask)
    # moving = DataPreparation.maskToLivers(movingR, movMas)
    def observer(method):
        print("{0:3} = {1:10.5f} : {2}".format(method.GetOptimizerIteration(),
                                               method.GetMetricValue(),
                                               method.GetOptimizerPosition()))

    print("====Image registrion DICOM files====")
    #
    # resampleFilter = sitk.ResampleImageFilter()
    # resampleFilter.SetSize(fixed.GetSize()*(1/3))
    # resampleFilter.SetInterpolator(sitk.sitkGaussian)
    # resampleFilter.SetOutputSpacing([1,1,1])
    # fixedResampled = resampleFilter.Execute(fixed)
    # resampleFilter.SetSize(moving.GetSize()/3)
    # movingResampled = resampleFilter.Execute(moving)

    print 'Smoothing'
    fixImgSmooth = sitk.CurvatureFlow(image1=fixed,
                                      timeStep=0.35,
                                      numberOfIterations=10)
    movImgSmooth = sitk.CurvatureFlow(image1=fixed,
                                      timeStep=0.35,
                                      numberOfIterations=10)

    print 'Smoothing ENDED'
    # movImgSmooth = moving
    # fixImgSmooth = fixed
    resample = sitk.ResampleImageFilter()
    resample.SetReferenceImage(fixImgSmooth)
    initial_transform = sitk.CenteredTransformInitializer(
        sitk.Cast(fixImgSmooth, movImgSmooth.GetPixelID()), movImgSmooth,
        sitk.Euler3DTransform(),
        sitk.CenteredTransformInitializerFilter.GEOMETRY)

    registration_method = sitk.ImageRegistrationMethod()

    # registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=255)
    # registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
    # registration_method.SetMetricSamplingPercentage(0.01)
    registration_method.SetMetricAsCorrelation()

    registration_method.SetInterpolator(sitk.sitkGaussian)
    # registration_method.SetInterpolator(sitk.sitkLinear)

    registration_method.SetOptimizerAsGradientDescentLineSearch(
        learningRate=2.0, numberOfIterations=100)
    registration_method.SetOptimizerScalesFromPhysicalShift()
    # registration_method.SetOptimizerAsLBFGSB(gradientConvergenceTolerance=1e-5,
    #                        numberOfIterations=100,
    #                        maximumNumberOfCorrections=5,
    #                        maximumNumberOfFunctionEvaluations=1000,
    #                        costFunctionConvergenceFactor=1e+7)
    registration_method.AddCommand(sitk.sitkIterationEvent,
                                   lambda: observer(registration_method))

    registration_method.SetInitialTransform(initial_transform, inPlace=False)
    final_transform_v1 = registration_method.Execute(
        sitk.Cast(fixImgSmooth, sitk.sitkFloat32),
        sitk.Cast(movImgSmooth, sitk.sitkFloat32))

    print('Optimizer\'s stopping condition, {0}'.format(
        registration_method.GetOptimizerStopConditionDescription()))
    print('Final metric value: {0}'.format(
        registration_method.GetMetricValue()))

    print(final_transform_v1)

    writer.SetFileName(outputDirPath + 'Fixed_Smoothed.nrrd')
    writer.Execute(fixImgSmooth)
    writer.SetFileName(outputDirPath + 'Moving_Smoothed.nrrd')
    writer.Execute(fixImgSmooth)

    resample = sitk.ResampleImageFilter()
    resample.SetReferenceImage(fixed)

    # SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
    resample.SetInterpolator(sitk.sitkLinear)
    resample.SetTransform(final_transform_v1)
    movingAfterTransform = resample.Execute(moving)
    sitk.WriteImage(movingAfterTransform,
                    outputDirPath + 'MovingAfterTransform' + '.nrrd')
    sitk.WriteTransform(final_transform_v1,
                        outputDirPath + 'transform' + '.tfm')

    X = compare_ssim(sitk.GetArrayFromImage(fixed),
                     sitk.GetArrayFromImage(movingAfterTransform),
                     full=True)
    # ed = sed3.sed3(sitk.GetImageFromArray(X[1]))
    # ed.show()
    # print 'Difference Score AFTER:'+str(X[0])
    # writer.SetFileName(outputDirPath + 'DifferenceAfter.nrrd')
    # writer.Execute(sitk.GetImageFromArray(X[1]))

    # writer.SetFileName(outputDirPath + '/' + '03.nrrd')
    # writer.Execute(resample.Execute(moving))

    simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)
    simg2 = sitk.Cast(sitk.RescaleIntensity(resample.Execute(moving)),
                      sitk.sitkUInt8)
    cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
    # sitk.Show(cimg, "RESULT")

    outFileName = 'ResultOfRegistration.nrrd'

    writer.SetFileName(outputDirPath + outFileName)
    writer.Execute(cimg)

    stopTime = time.time()
    print stopTime - startTime

    print "====END OF REGISTRATION====="
Пример #16
0
    def test(self):
        # make a network and load network weight to use later
        states = torch.load(os.path.join(self.args.log,
                                         'checkpoint_100000.pth'),
                            map_location=self.config.device)
        scorenet = CondRefineNetDilated(self.config).to(self.config.device)
        scorenet = torch.nn.DataParallel(scorenet, device_ids=[0])
        scorenet.load_state_dict(states[0])
        scorenet.eval()
        # prepare test data and undersample mask

        undersample_method = 'radial'
        undersample_factor = '030'
        # use for getting degrade img and psnr,ssim,hfen in iteration

        ori_complex = loadmat(self.args.load_path)["Img"]
        ori_complex = ori_complex / np.max(np.abs(ori_complex))

        kspace = np.fft.fft2(ori_complex)

        mask = loadmat("./mask/mask_" + undersample_method + "_" +
                       undersample_factor + ".mat")["mask_" +
                                                    undersample_method + "_" +
                                                    undersample_factor]
        mask = np.fft.fftshift(mask)
        self.write_images(255.0 * mask,
                          os.path.join(self.args.save_path, 'mask.png'))

        print('current undersample method is' + undersample_method,
              np.sum(mask) / (256 * 256))

        undersample_kspace = np.multiply(mask, kspace)

        zero_fiiled = np.fft.ifft2(undersample_kspace)
        #get ori png and degrade png to compare
        self.write_images(
            np.abs(zero_fiiled),
            os.path.join(
                self.args.save_path, 'img_ZF_undersample_' +
                undersample_method + undersample_factor + '.png'))
        self.write_images(
            np.abs(ori_complex),
            os.path.join(
                self.args.save_path, 'img_GT_undersample_' +
                undersample_method + undersample_factor + '.png'))

        x0 = nn.Parameter(torch.Tensor(1, 6, 256, 256).uniform_(-1, 1)).cuda()
        x01 = x0.clone()

        # set parameters
        step_lr = 0.05 * 0.00003

        #number of inner iterations
        n_steps_each = 80

        # Noise amounts
        sigmas = np.array([
            1., 0.59948425, 0.35938137, 0.21544347, 0.12915497, 0.07742637,
            0.04641589, 0.02782559, 0.01668101, 0.01
        ])

        start_start = time.time()
        # the outer itertaion loop
        for idx, sigma in enumerate(sigmas):

            start_out = time.time()
            lambda_recon = 1. / sigma**2
            labels = torch.ones(1, device=x0.device) * idx
            labels = labels.long()

            step_size = step_lr * (sigma / sigmas[-1])**2

            print('current {} use sigma = {}'.format(idx, sigma))
            # the inner itertaion loop
            for step in range(n_steps_each):
                start_in = time.time()
                #prior update by ncsn

                noise1 = torch.rand_like(x0) * np.sqrt(step_size * 2)
                grad1 = scorenet(x01, labels).detach()

                x0 = x0 + step_size * grad1
                x01 = x0 + noise1

                x0 = np.array(x0.cpu().detach(), dtype=np.float32)
                # channel mean
                x_real = (x0.real.squeeze()[0, :, :] +
                          x0.real.squeeze()[2, :, :] +
                          x0.real.squeeze()[4, :, :]) / 3
                x_imag = (x0.real.squeeze()[1, :, :] +
                          x0.real.squeeze()[3, :, :] +
                          x0.real.squeeze()[5, :, :]) / 3

                x_complex = x_real + x_imag * 1j
                # data consistance
                iterkspace = np.fft.fft2(x_complex)
                iterkspace = undersample_kspace + iterkspace * (1 - mask)
                x_complex = np.fft.ifft2(iterkspace)

                end_in = time.time()
                print("inner iteration cost time :%.2f s" %
                      (end_in - start_in))

                psnr = compare_psnr(255 * abs(x_complex),
                                    255 * abs(ori_complex),
                                    data_range=255)
                ssim = compare_ssim(abs(x_complex),
                                    abs(ori_complex),
                                    data_range=1)
                hfen = compare_hfen(abs(x_complex), abs(ori_complex))
                print("current {} step".format(step), 'PSNR :', psnr, 'SSIM :',
                      ssim, 'HFEN :', hfen)
                self.write_images(
                    np.abs(x_complex),
                    os.path.join(
                        self.args.save_path, 'img_rec_undersample_' +
                        undersample_method + undersample_factor + '.png'))
                x_real, x_imag = x_complex.real, x_complex.imag
                x_real, x_imag = x_real[np.newaxis, :, :], x_imag[
                    np.newaxis, :, :]

                x0 = np.stack([x_real, x_imag, x_real, x_imag, x_real, x_imag],
                              1)
                x0 = torch.tensor(x0, dtype=torch.float32).cuda()
            end_out = time.time()
            print("out inner iteration cost time :%.2f s" %
                  (end_out - start_out))

        end_end = time.time()
        print("one image reconstruction cost time :%.2f s" %
              (end_end - start_start))
def ssim(gt, pred):
    """ Compute Structural Similarity Index Metric (SSIM). """
    return compare_ssim(gt.transpose(1, 2, 0),
                        pred.transpose(1, 2, 0),
                        multichannel=True,
                        data_range=gt.max() - gt.min())
Пример #18
0
def closure():

    global i, net_input, psnr_max, psnr_noisy_max, files_name, psnr_2_max, noisy_np
    global TRAIN_PLAN, noisy_np_norm, sigma_now, final_ssim, final_ssim_max, files_name
    global psnr_curve_max_record, ssim_curve_max_record, training_loss_record

    out_effect_np = []
    if DATA_AUG:
        for aug in range(len(img_noisy_torch)):
            noisy_torch = np_to_torch(img_noisy_noisy_np[aug] -
                                      img_noisy_np[aug])
            out = net(net_input[aug])
            total_loss = mse(out, noisy_torch.type(dtype))

            total_loss.backward()
            psrn_noisy = compare_psnr(
                np.clip(img_noisy_np[aug], 0, 1),
                (torch_to_np(net_input[aug]) - out.detach().cpu().numpy()[0]))
            # do_i_learned_noise = out.detach().cpu().numpy()[0]
            # mse_what_tf = MSE(noisy_np, do_i_learned_noise)

            if psnr_noisy_max == 0:
                psnr_noisy_max = psrn_noisy
            elif psnr_noisy_max < psrn_noisy:
                psnr_noisy_max = psrn_noisy

            if SAVE_DURING_TRAINING and i % save_every == 0:
                # output_dir
                out_test_np = torch_to_np(out)  # I +N1
                # out_test_name = f'{i}_test'
                # save_image(out_test_name, np.clip(out_test_np, 0, 1), output_path=output_dir)

                net.eval()
                loss_add = 0
                with torch.no_grad():
                    out_effect_np_ = torch_to_np(img_noisy_torch[aug] -
                                                 net(img_noisy_torch[aug]))
                    out_effect_np.append(out_effect_np_)
                    psnr_1 = compare_psnr(img_aug_np[aug],
                                          np.clip(out_effect_np_, 0, 1))
                    test_do_i_learned_noise = torch_to_np(
                        net(img_noisy_torch[aug]))

                    if psnr_max == 0:
                        psnr_max = psnr_1
                    elif psnr_max < psnr_1:
                        psnr_max = psnr_1

                    loss_add = loss_add + total_loss.item()

        training_loss_record.append(loss_add / len(img_noisy_torch))
        if i % 10 == 0:
            out_effect_np[0] = out_effect_np[0].transpose(1, 2, 0)
            for aug in range(1, 8):
                if aug < 4:
                    out_effect_np[aug] = np.rot90(
                        out_effect_np[aug].transpose(1, 2, 0), 4 - aug)
                else:
                    out_effect_np[aug] = np.flipud(
                        np.rot90(out_effect_np[aug].transpose(1, 2, 0),
                                 8 - aug))
            final_reuslt = np.mean(out_effect_np, 0)

            psnr_2 = compare_psnr(img_aug_np[0].transpose(1, 2, 0),
                                  np.clip(final_reuslt, 0, 1))
            final_ssim = compare_ssim(img_aug_np[0].transpose(1, 2, 0),
                                      np.clip(final_reuslt, 0, 1),
                                      data_range=1,
                                      multichannel=True)

            if psnr_2_max == 0:
                psnr_2_max = psnr_2
                tmp_name_p = f'{files_name[:-4]}_{sigma_now * 255:.2f}_{psnr_2:.2f}_final_{final_ssim:.4f}'
                save_image(tmp_name_p,
                           np.clip(final_reuslt.transpose(2, 0, 1), 0, 1),
                           output_path=output_dir)
            elif psnr_2_max < psnr_2:
                psnr_2_max = psnr_2
                tmp_name_p = f'{files_name[:-4]}_{sigma_now * 255:.2f}_{psnr_2:.2f}_final_{final_ssim:.4f}'
                save_image(tmp_name_p,
                           np.clip(final_reuslt.transpose(2, 0, 1), 0, 1),
                           output_path=output_dir)
            if final_ssim_max == 0:
                final_ssim_max = final_ssim
            elif final_ssim_max < final_ssim:
                final_ssim_max = final_ssim
                tmp_name = f'{files_name[:-4]}_{sigma_now * 255:.2f}_{final_ssim:.4f}_final_{psnr_2:.2f}'
                save_image(tmp_name,
                           np.clip(final_reuslt.transpose(2, 0, 1), 0, 1),
                           output_path=output_dir)

            print(
                '%s Iteration %05d ,psnr 2: %f, psnr 2 max: %f, final ssim : %f, final ssim max: %f'
                % (files_name, i, psnr_2, psnr_2_max, final_ssim,
                   final_ssim_max))
            writer.add_scalar('final_test_psnr', psnr_2, i)
            writer.add_scalar('final_max_test_psnr', psnr_2_max, i)
            psnr_curve_max_record.append(psnr_2_max)
            ssim_curve_max_record.append(final_ssim_max)

    else:
        noisy_torch = np_to_torch(img_noisy_noisy_np - img_noisy_np)
        out = net(net_input)
        total_loss = mse(out, noisy_torch.type(dtype))

        total_loss.backward()
        psrn_noisy = compare_psnr(
            np.clip(img_noisy_np, 0, 1),
            (torch_to_np(net_input) - out.detach().cpu().numpy()[0]))
        do_i_learned_noise = out.detach().cpu().numpy()[0]
        mse_what_tf = MSE(noisy_np, do_i_learned_noise)
        if psnr_noisy_max == 0:
            psnr_noisy_max = psrn_noisy
        elif psnr_noisy_max < psrn_noisy:
            psnr_noisy_max = psrn_noisy

        if SAVE_DURING_TRAINING and i % save_every == 0:
            # output_dir
            out_test_np = torch_to_np(out)  # I +N1
            # out_test_name = f'{i}_test'
            # save_image(out_test_name, np.clip(out_test_np, 0, 1), output_path=output_dir)

        net.eval()
        loss_add = 0
        with torch.no_grad():
            out_effect_np = torch_to_np(img_noisy_torch - net(img_noisy_torch))
            psnr_1 = compare_psnr(img_np, np.clip(out_effect_np, 0, 1))
            test_do_i_learned_noise = torch_to_np(net(img_noisy_torch))

            if psnr_max == 0:
                psnr_max = psnr_1
            elif psnr_max < psnr_1:
                psnr_max = psnr_1

            loss_add = loss_add + total_loss.item()

        training_loss_record.append(loss_add / len(img_noisy_torch))
        if i % 10 == 0:
            final_reuslt = out_effect_np.transpose(1, 2, 0)
            psnr_2 = compare_psnr(img_np.transpose(1, 2, 0),
                                  np.clip(final_reuslt, 0, 1))
            final_ssim = compare_ssim(img_np.transpose(1, 2, 0),
                                      np.clip(final_reuslt, 0, 1),
                                      data_range=1,
                                      multichannel=True)
            if psnr_2_max == 0:
                psnr_2_max = psnr_2
                tmp_name_p = f'{files_name[:-4]}_{sigma_now*255:.2f}_{psnr_2:.2f}_final_{final_ssim:.4f}'
                save_image(tmp_name_p,
                           np.clip(final_reuslt.transpose(2, 0, 1), 0, 1),
                           output_path=output_dir)
            elif psnr_2_max < psnr_2:
                psnr_2_max = psnr_2
                tmp_name_p = f'{files_name[:-4]}_{sigma_now*255:.2f}_{psnr_2:.2f}_final_{final_ssim:.4f}'
                save_image(tmp_name_p,
                           np.clip(final_reuslt.transpose(2, 0, 1), 0, 1),
                           output_path=output_dir)
            if final_ssim_max == 0:
                final_ssim_max = final_ssim
            elif final_ssim_max < final_ssim:
                final_ssim_max = final_ssim
                tmp_name = f'{files_name[:-4]}_{sigma_now*255:.2f}_{final_ssim:.4f}_final_{psnr_2:.2f}'
                save_image(tmp_name,
                           np.clip(final_reuslt.transpose(2, 0, 1), 0, 1),
                           output_path=output_dir)

            print(
                '%s, sigma %f, Epoch %05d, psnr 2: %f, psnr 2 max: %f, final ssim : %f, final ssim max: %f'
                % (files_name, sigma_now * 255, i, psnr_2, psnr_2_max,
                   final_ssim, final_ssim_max))
            writer.add_scalar('final_test_psnr', psnr_2, i)
            writer.add_scalar('final_max_test_psnr', psnr_2_max, i)
            psnr_curve_max_record.append(psnr_2_max)
            ssim_curve_max_record.append(final_ssim_max)

    i += 1

    return total_loss
Пример #19
0
def ssim(img1, img2, multichannel=False):
    assert img1.dtype == img2.dtype == np.uint8, 'np.uint8 is supposed.'
    return compare_ssim(img1, img2, multichannel=multichannel)
Пример #20
0
    list_time_proc.append(dict_1['time_proc'])
    list_title.append(dict_1['title'])
    list_adj_bool.append(dict_1['adj'])

    for j in range(i, n):
        if j != i:
            # print(i,j)
            dict_2 = np.load('datas/' + argv[j])[()]
            y_2 = dict_2['y']
            y_1_abs, y_2_abs = np.absolute(y_1).astype(
                np.float64), np.absolute(y_2).astype(np.float64)

            mse = compare_mse(y_1_abs / np.max(y_1_abs),
                              y_2_abs / np.max(y_2_abs))
            ssim = compare_ssim(y_1_abs,
                                y_2_abs,
                                data_range=y_1_abs.max() - y_1_abs.min())

            ## Plot
            axes[cnt, 0].plot(y_1_abs)
            axes[cnt, 0].set_title(dict_1['title'] + '\n MSE:' + str(mse),
                                   fontsize=10)

            axes[cnt, 1].plot(y_2_abs)
            axes[cnt, 1].set_title(dict_2['title'] + '\n SSIM:' + str(ssim),
                                   fontsize=10)

            axes[cnt, 2].set_title('K-space')
            axes[cnt, 2].plot(np.real(y_1), np.imag(y_1), 'r.')
            axes[cnt, 2].plot(np.real(y_2), np.imag(y_2), 'b.')
            # axes[cnt,2].set_axis_bgcolor('k')# WARNING set_axis_bgcolor is deprecated in matplotlib 2.0, use next line instead
Пример #21
0
def main(argv=None):
    if tf.gfile.Exists(FLAGS.save_dir):
        tf.gfile.DeleteRecursively(FLAGS.save_dir)
    tf.gfile.MakeDirs(FLAGS.save_dir)
    if tf.gfile.Exists(FLAGS.gen_frm_dir):
        tf.gfile.DeleteRecursively(FLAGS.gen_frm_dir)
    tf.gfile.MakeDirs(FLAGS.gen_frm_dir)

    # load data
    train_input_handle, test_input_handle = datasets_factory.data_provider(
        FLAGS.dataset_name, FLAGS.train_data_paths, FLAGS.valid_data_paths,
        FLAGS.batch_size, FLAGS.img_width, FLAGS.seq_length)

    print("Initializing models")
    model = Model()
    lr = FLAGS.lr

    # Prepare tensorboard logging
    logger = Logger(os.path.join(FLAGS.gen_frm_dir, 'board'), model.sess)
    logger.define_item("loss", Logger.Scalar, ())
    logger.define_item("lr", Logger.Scalar, ())
    logger.define_item("mse", Logger.Scalar, ())
    logger.define_item("psnr", Logger.Scalar, ())
    logger.define_item("fmae", Logger.Scalar, ())
    logger.define_item("ssim", Logger.Scalar, ())
    logger.define_item("sharp", Logger.Scalar, ())
    logger.define_item(
        "image",
        Logger.Image,
        (1, 2 * FLAGS.img_width, FLAGS.img_width, FLAGS.img_channel),
        dtype='uint8')

    for itr in range(1, FLAGS.max_iterations + 1):
        if train_input_handle.no_batch_left():
            train_input_handle.begin(do_shuffle=True)
        ims = train_input_handle.get_batch()
        ims = preprocess.reshape_patch(ims, FLAGS.patch_size)

        logger.add('lr', lr, itr)
        cost = model.train(ims, lr)
        if FLAGS.reverse_input:
            ims_rev = ims[:, ::-1]
            cost += model.train(ims_rev, lr, mask_true)
            cost = cost / 2
        logger.add('loss', cost, itr)

        if itr % FLAGS.display_interval == 0:
            print('itr: ' + str(itr))
            print('training loss: ' + str(cost))

        if itr % FLAGS.test_interval == 0:
            print('test...')
            test_input_handle.begin(do_shuffle=False)
            res_path = os.path.join(FLAGS.gen_frm_dir, str(itr))
            os.mkdir(res_path)
            avg_mse = 0
            batch_id = 0
            img_mse, ssim, psnr, fmae, sharp = [], [], [], [], []
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                img_mse.append(0)
                ssim.append(0)
                psnr.append(0)
                fmae.append(0)
                sharp.append(0)
            while (test_input_handle.no_batch_left() == False):
                batch_id = batch_id + 1
                test_ims = test_input_handle.get_batch()
                test_dat = preprocess.reshape_patch(test_ims, FLAGS.patch_size)
                img_gen = model.test(test_dat)

                # concat outputs of different gpus along batch
                # img_gen = np.concatenate(img_gen)
                img_gen = preprocess.reshape_patch_back(
                    img_gen[:, np.newaxis, :, :, :], FLAGS.patch_size)
                # MSE per frame
                for i in range(1):
                    x = test_ims[:, -1, :, :, 0]
                    gx = img_gen[:, :, :, 0]
                    fmae[i] += metrics.batch_mae_frame_float(gx, x)
                    gx = np.maximum(gx, 0)
                    gx = np.minimum(gx, 1)
                    mse = np.square(x - gx).sum()
                    img_mse[i] += mse
                    avg_mse += mse

                    real_frm = np.uint8(x * 255)
                    pred_frm = np.uint8(gx * 255)
                    psnr[i] += metrics.batch_psnr(pred_frm, real_frm)
                    for b in range(FLAGS.batch_size):
                        sharp[i] += np.max(
                            cv2.convertScaleAbs(cv2.Laplacian(pred_frm[b], 3)))
                        score, _ = compare_ssim(pred_frm[b],
                                                real_frm[b],
                                                full=True)
                        ssim[i] += score

                # save prediction examples
                if batch_id == 1:
                    sel = np.random.randint(FLAGS.batch_size)
                    img_seq_pd = img_gen[sel]
                    img_seq_gt = test_ims[sel, -1]
                    h, w = img_gen.shape[1:3]
                    out_img = np.zeros((1, h * 2, w * 1, FLAGS.img_channel),
                                       dtype='uint8')
                    for i, img_seq in enumerate([img_seq_gt, img_seq_pd]):
                        img = img_seq
                        img = np.maximum(img, 0)
                        img = np.uint8(img * 10)
                        img = np.minimum(img, 255)
                        out_img[0, (i * h):(i * h + h), :] = img
                    logger.add("image", out_img, itr)

                test_input_handle.next()
            avg_mse = avg_mse / (batch_id * FLAGS.batch_size)
            logger.add('mse', avg_mse, itr)
            print('mse per seq: ' + str(avg_mse))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(img_mse[i] / (batch_id * FLAGS.batch_size))
            psnr = np.asarray(psnr, dtype=np.float32) / batch_id
            fmae = np.asarray(fmae, dtype=np.float32) / batch_id
            ssim = np.asarray(ssim, dtype=np.float32) / \
                (FLAGS.batch_size * batch_id)
            sharp = np.asarray(sharp, dtype=np.float32) / \
                (FLAGS.batch_size * batch_id)
            print('psnr per frame: ' + str(np.mean(psnr)))
            logger.add('psnr', np.mean(psnr), itr)
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(psnr[i])
            print('fmae per frame: ' + str(np.mean(fmae)))
            logger.add('fmae', np.mean(fmae), itr)
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(fmae[i])
            print('ssim per frame: ' + str(np.mean(ssim)))
            logger.add('ssim', np.mean(ssim), itr)
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(ssim[i])
            print('sharpness per frame: ' + str(np.mean(sharp)))
            logger.add('sharp', np.mean(sharp), itr)
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(sharp[i])

        if itr % FLAGS.snapshot_interval == 0:
            model.save(itr)

        train_input_handle.next()
Пример #22
0
index_1 = 7
index_2 = 10
img_arr_1 = train_data[0][index_1].reshape((28, 28))
img_val_1 = train_data[1][index_1]
img_arr_2 = train_data[0][index_2].reshape((28, 28))
img_val_2 = train_data[1][index_2]
import scipy.misc
ret, thresh_1 = cv2.threshold(np.uint8(img_arr_1 * 255).copy(), 127, 255, cv2.THRESH_BINARY)
ret, thresh2 = cv2.threshold(np.uint8(img_arr_2 * 255).copy(), 127, 255,0)
contours,hierarchy = cv2.findContours(thresh_1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt1 = contours[0]
contours,hierarchy = cv2.findContours(thresh2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt2 = contours[0]
formatt = "{:4.2f}"
match_I1 = formatt.format(matchShapes(cnt1, cnt2, cv2.cv.CV_CONTOURS_MATCH_I1,0))
match_I2 = formatt.format(matchShapes(cnt1, cnt2, cv2.cv.CV_CONTOURS_MATCH_I2,0))
match_I3 = formatt.format(matchShapes(cnt1, cnt2, cv2.cv.CV_CONTOURS_MATCH_I3,0))
plt.suptitle("Comparing two images - match_I1 = "+ match_I1+ " - match_I2 = "+ match_I2
             + " - match_I3 = " + match_I3 +'\n Structural similarity = ' + formatt.format(compare_ssim(img_arr_1, img_arr_2)))
plt.subplot(1, 2, 1)
plt.title(str(img_val_1))
fig = plt.imshow(img_arr_1, cmap=cm.binary)
fig.axes.get_xaxis().set_ticks([])
fig.axes.get_yaxis().set_ticks([])
plt.subplot(1, 2, 2)
plt.title(str(img_val_2))
fig = plt.imshow(img_arr_2 , cmap=cm.binary)
fig.axes.get_xaxis().set_ticks([])
fig.axes.get_yaxis().set_ticks([])
plt.tight_layout()
plt.show()
Пример #23
0
label = 'NRMSE = {:.4f}, PSNR = {:.4f}, SSIM = {:.4f}'

fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
ax[0, 0].imshow(image, cmap="gray", vmin=0, vmax=255)
ax[0, 0].set_title("Исходное изображение")
ax[0, 0].scatter(240, 125, color="red", s=0.5)

ax[0, 1].imshow(res_image2, cmap="gray", vmin=0, vmax=255)
ax[0, 1].set_title("Средняя интенсивность")
ax[0, 1].scatter(240, 125, color="red", s=0.5)

nrmse = measure.compare_nrmse(im_true=image,
                              im_test=res_image2,
                              norm_type='Euclidean')
psnr = measure.compare_psnr(im_true=image, im_test=res_image2, data_range=255)
ssim = measure.compare_ssim(image, res_image2, data_range=255)
ax[0, 1].set_xlabel(label.format(nrmse, psnr, ssim))

ax[1, 0].imshow(res_image3, cmap="gray", vmin=0, vmax=255)
ax[1, 0].set_title("Градиент")
ax[1, 0].scatter(240, 125, color="red", s=0.5)

ax[1, 1].imshow(res_image, cmap="gray", vmin=0, vmax=255)
ax[1, 1].set_title("Восстановленное изображение")
ax[1, 1].scatter(240, 125, color="red", s=0.5)

nrmse = measure.compare_nrmse(im_true=image,
                              im_test=res_image,
                              norm_type='Euclidean')
psnr = measure.compare_psnr(im_true=image, im_test=res_image, data_range=255)
ssim = measure.compare_ssim(image, res_image, data_range=255)
Пример #24
0
def dssim(p0, p1, range=255.):
    return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
Пример #25
0
import matplotlib.pyplot as plt
import numpy as np
import pylab
from skimage import measure



for i in range(1,6):
    f1 = 'true_' + str(i) + '.jpg'
    f2 = 'predict_' + str(i) + '.jpeg'
    a = plt.imread(f1)
    b = plt.imread(f2)
    ssim = measure.compare_ssim(a,b, multichannel = True)
    print (ssim)

Пример #26
0
            Iw_batch = embedding_net.predict_on_batch(
                [Im_32x32_patchs, W, np.array([alpha])])
            # reconstruct Iw
            Iw = vf.tiling(Iw_batch, rec_size=img_rows)
            Iw *= std_normalize
            Iw += mean_normalize if Is_mean_normalized else 0
            Iw[Iw > 255] = 255
            Iw[Iw < 0] = 0
            Iw = np.uint8(Iw.squeeze())
            # PSNR
            #psnr = 10*np.log10(255**2/np.mean((im_gray - Iw)**2))
            psnr = compare_psnr(im_gray, Iw, data_range=255)
            tmp_psnr.append(psnr)
            # SSIM
            tmp_ssim.append(
                compare_ssim(im_gray, Iw, win_size=9, data_range=255))

            # Save sample image
            if n == 0 and save_samples == True:
                cv2.imwrite(
                    os.path.join(sampled_embeded_folder,
                                 '{}_[{}].png'.format(test_img[:-4], alpha)),
                    Iw)

        psnr_values_per_alpha_mean.append(np.mean(tmp_psnr))
        psnr_values_per_alpha_std.append(np.std(tmp_psnr))
        ssim_values_per_alpha_mean.append(np.mean(tmp_ssim))

    psnr_means.append(psnr_values_per_alpha_mean)
    psnr_stds.append(psnr_values_per_alpha_std)
    ssim_means.append(ssim_values_per_alpha_mean)
Пример #27
0
def ssim(gt, pred):

    return compare_ssim(gt.transpose(1, 2, 0),
                        pred.transpose(1, 2, 0),
                        multichannel=True,
                        data_range=gt.max())
Пример #28
0
    def show_frame(self):
        # Capture frame-by-frame
        frame_a = self.capture.read()[1]
        frame_a = cv2.flip(frame_a, 1)
        # Gray out our first frame
        grayA = cv2.cvtColor(frame_a, cv2.COLOR_BGR2GRAY)
        # Capture second frame
        frame_b = self.capture.read()[1]
        frame_b = cv2.flip(frame_b, 1)
        # Gray out second frame
        grayB = cv2.cvtColor(frame_b, cv2.COLOR_BGR2GRAY)

        # compare Structural Similarity Index (SSIM) between the two frames
        (score, diff) = compare_ssim(grayA, grayB, full=True)
        diff = (diff * 255).astype("uint8")
        self.score = score
        # debug print
        if self.debug:
            # Debug output
            # Text part
            self.debug_output = tk.Text(self.debug_frame,
                                        borderwidth=3,
                                        relief="sunken")
            self.debug_output.config(font=("consolas", 12),
                                     undo=True,
                                     wrap="word")
            self.debug_output.grid(row=0,
                                   column=0,
                                   sticky="nsew",
                                   padx=2,
                                   pady=2)
            self.score_history.append(score)
            # Calculate average
            total_entity = len(self.score_history)
            total_score = 0

            for i in self.score_history:
                total_score += i
            average = total_score / total_entity
            self.debug_output.insert(
                tk.INSERT,
                f'Current SSIM: {score}{" "*int(20-len(str(score)))}'
                f'Average SSIM: {average}\n'
                f'Minimal SSIM: {min(self.score_history)}{" "*int(20-len(str(score)))}'
                f'Max     SSIM: {max(self.score_history)}')

            #create scrollbar for debug output
            self.debug_scrollbar = tk.Scrollbar(
                self.debug_frame, command=self.debug_output.yview)
            self.debug_scrollbar.grid(row=0, column=1, sticky="nsew")
            self.debug_output["yscrollcommand"] = self.debug_scrollbar.set
            self.debug_scrollbar.config(command=self.debug_output.yview)

            print("SSIM: {}".format(self.score))

        # If image score is lower or equal to sensitivity to take action.
        # This means the difference between the two frames are major enough
        # To call it a detection of motion.
        if score <= self.sensitivity:
            # threshold the difference in the frames, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]

            # compute the bounding box of the contour and then draw the
            # bounding box on both input frames to represent where the two
            # frames differ
            for c in cnts:
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(frame_a, (x, y), (x + w, y + h), (0, 0, 255), 1)
                cv2.rectangle(frame_b, (x, y), (x + w, y + h), (0, 0, 255), 1)

        # # Display the frame A
        # cv2.imshow("Frame: A", frame_a)

        cv2image = cv2.cvtColor(frame_a, cv2.COLOR_BGR2RGBA)
        img = Image.fromarray(cv2image)
        imgtk = ImageTk.PhotoImage(image=img)
        self.vid_frame.imgtk = imgtk
        self.vid_frame.configure(image=imgtk)
        self.vid_frame.after(10, self.show_frame)

        if score <= self.sensitivity:
            # Create a picture and store it in the img folder.
            # The file is named the current date and time.
            if self.save:
                cv2.imwrite(
                    'img/{}.png'.format(
                        datetime.datetime.now().strftime("%d-%B-%Y-%I%M%S%p")),
                    frame_a)

            # If debug modus
            if self.debug:
                # Display debug frames
                cv2.imshow("Frame: B", frame_b)
                cv2.imshow("Diff", diff)
                cv2.imshow("Thresh", thresh)

            self.saved_output.delete("1.0", tk.END)
            self.saved_output.insert(tk.INSERT, "Motion detected")
        else:
            # Text part
            self.saved_output.delete("1.0", tk.END)
            self.saved_output.insert(tk.INSERT, "No Motion")
Пример #29
0
ratio = 1
sigma = 12
list_psnr = []
list_ssim = []
test_dir = 'data/Test/Set68'
for im in os.listdir(test_dir):
    if im.endswith(".jpg") or im.endswith(".bmp") or im.endswith(".png"):
        x = cv2.imread(os.path.join(test_dir, im), 0)
        x = x.astype('float32')
        y = np.random.poisson(ratio * x) / (ratio)
        y = y.astype('float32')
        out = np.array(pybm3d.bm3d.bm3d(y, sigma))
        out = out
        list_psnr.append(compare_psnr(x / 255.0, out / 255.0))
        list_ssim.append(compare_ssim(x / 255.0, out / 255.0))
        plt.imshow(out, cmap='gray')
        print(
            '%s psnr: %s, ssim: %s' % (im, compare_psnr(
                x / 255.0, out / 255.0), compare_ssim(x / 255.0, out / 255.0)))
mean_psnr = np.mean(list_psnr)
mean_ssim = np.mean(list_ssim)
print('psnr:{0}, ssim:{1}'.format(mean_psnr, mean_ssim))

#gnoise = np.random.normal(0, 25/255.0, x.shape)
#z = x+gnoise

#flag = 'p'
#if flag == 'p':
#    plt.imshow(y,cmap='gray')
#else:
Пример #30
0
def test_HD720p(model=model,
                use_cuda=args.use_cuda,
                save_which=args.save_which,
                dtype=args.dtype):
    files = sorted(os.listdir(HD720p_Other_DATA))
    unique_id = str(random.randint(0, 100000))
    gen_dir = os.path.join(HD720p_Other_RESULT, unique_id)
    os.mkdir(gen_dir)

    for file_i in files:
        print("\n\n\n**************")
        print(file_i)
        gen_file = os.path.join(HD720p_Other_RESULT, unique_id, file_i)
        input_file = os.path.join(HD720p_Other_DATA, file_i)

        interp_error = AverageMeter()
        psnr_error = AverageMeter()
        ssim_error = AverageMeter()

        print(input_file)
        print(gen_file)
        Reader = YUV_Read(input_file, 720, 1280, toRGB=True)
        Writer = YUV_Write(gen_file, fromRGB=True)

        for index in range(0, 100, 2):  # len(files) - 2, 2):

            IMAGE1, sucess1 = Reader.read(index)
            IMAGE2, sucess2 = Reader.read(index + 2)
            if not sucess1 or not sucess2:
                break

            X0 = torch.from_numpy(
                np.transpose(IMAGE1,
                             (2, 0, 1)).astype("float32") / 255.0).type(dtype)
            X1 = torch.from_numpy(
                np.transpose(IMAGE2,
                             (2, 0, 1)).astype("float32") / 255.0).type(dtype)

            y_ = torch.FloatTensor()

            assert (X0.size(1) == X1.size(1))
            assert (X0.size(2) == X1.size(2))

            intWidth = X0.size(2)
            intHeight = X0.size(1)
            channel = X0.size(0)
            if not channel == 3:
                continue

            if intWidth != ((intWidth >> 7) << 7):
                intWidth_pad = (
                    ((intWidth >> 7) + 1) << 7)  # more than necessary
                intPaddingLeft = int((intWidth_pad - intWidth) / 2)
                intPaddingRight = intWidth_pad - intWidth - intPaddingLeft
            else:
                intWidth_pad = intWidth
                intPaddingLeft = 32
                intPaddingRight = 32

            if intHeight != ((intHeight >> 7) << 7):
                intHeight_pad = (
                    ((intHeight >> 7) + 1) << 7)  # more than necessary
                intPaddingTop = int((intHeight_pad - intHeight) / 2)
                intPaddingBottom = intHeight_pad - intHeight - intPaddingTop
            else:
                intHeight_pad = intHeight
                intPaddingTop = 32
                intPaddingBottom = 32

            pader = torch.nn.ReplicationPad2d([
                intPaddingLeft, intPaddingRight, intPaddingTop,
                intPaddingBottom
            ])

            X0 = Variable(torch.unsqueeze(X0, 0), volatile=True)
            X1 = Variable(torch.unsqueeze(X1, 0), volatile=True)
            X0 = pader(X0)
            X1 = pader(X1)

            if use_cuda:
                X0 = X0.cuda()
                X1 = X1.cuda()
            y_s, offset, filter, occlusion = model(torch.stack((X0, X1),
                                                               dim=0))
            y_ = y_s[save_which]

            if use_cuda:
                X0 = X0.data.cpu().numpy()
                y_ = y_.data.cpu().numpy()
                offset = [offset_i.data.cpu().numpy() for offset_i in offset]
                filter = [filter_i.data.cpu().numpy() for filter_i in filter
                          ] if filter[0] is not None else None
                occlusion = [
                    occlusion_i.data.cpu().numpy() for occlusion_i in occlusion
                ] if occlusion[0] is not None else None
                X1 = X1.data.cpu().numpy()
            else:
                X0 = X0.data.numpy()
                y_ = y_.data.numpy()
                offset = [offset_i.data.numpy() for offset_i in offset]
                filter = [filter_i.data.numpy() for filter_i in filter]
                occlusion = [
                    occlusion_i.data.numpy() for occlusion_i in occlusion
                ]
                X1 = X1.data.numpy()

            X0 = np.transpose(
                255.0 *
                X0.clip(0, 1.0)[0, :, intPaddingTop:intPaddingTop + intHeight,
                                intPaddingLeft:intPaddingLeft + intWidth],
                (1, 2, 0))
            y_ = np.transpose(
                255.0 *
                y_.clip(0, 1.0)[0, :, intPaddingTop:intPaddingTop + intHeight,
                                intPaddingLeft:intPaddingLeft + intWidth],
                (1, 2, 0))
            offset = [
                np.transpose(
                    offset_i[0, :, intPaddingTop:intPaddingTop + intHeight,
                             intPaddingLeft:intPaddingLeft + intWidth],
                    (1, 2, 0)) for offset_i in offset
            ]
            filter = [
                np.transpose(
                    filter_i[0, :, intPaddingTop:intPaddingTop + intHeight,
                             intPaddingLeft:intPaddingLeft + intWidth],
                    (1, 2, 0)) for filter_i in filter
            ] if filter is not None else None
            occlusion = [
                np.transpose(
                    occlusion_i[0, :, intPaddingTop:intPaddingTop + intHeight,
                                intPaddingLeft:intPaddingLeft + intWidth],
                    (1, 2, 0)) for occlusion_i in occlusion
            ] if occlusion is not None else None
            X1 = np.transpose(
                255.0 *
                X1.clip(0, 1.0)[0, :, intPaddingTop:intPaddingTop + intHeight,
                                intPaddingLeft:intPaddingLeft + intWidth],
                (1, 2, 0))

            Writer.write(IMAGE1)
            rec_rgb = np.round(y_).astype(numpy.uint8)
            Writer.write(rec_rgb)
            gt_rgb, sucess = Reader.read(index + 1)
            gt_yuv = rgb2yuv(gt_rgb / 255.0)
            rec_yuv = rgb2yuv(rec_rgb / 255.0)

            gt_rgb = gt_yuv[:, :, 0] * 255.0
            rec_rgb = rec_yuv[:, :, 0] * 255.0

            gt_rgb = gt_rgb.astype('uint8')
            rec_rgb = rec_rgb.astype('uint8')

            diff_rgb = 128.0 + rec_rgb - gt_rgb
            avg_interp_error_abs = np.mean(np.abs(diff_rgb - 128.0))

            interp_error.update(avg_interp_error_abs, 1)

            mse = numpy.mean((diff_rgb - 128.0)**2)
            if mse == 0:
                return 100.0
            PIXEL_MAX = 255.0
            psnr = 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
            psnr_error.update(psnr, 1)

            psnr_ = compare_psnr(rec_rgb, gt_rgb)
            print(str(psnr) + '\t' + str(psnr_))

            ssim = compare_ssim(rec_rgb, gt_rgb, multichannel=False)
            ssim_error.update(ssim, 1)

            diff_rgb = diff_rgb.astype("uint8")

            print("interpolation error / PSNR : " +
                  str(round(avg_interp_error_abs, 4)) + " ,\t  psnr " +
                  str(round(psnr, 4)) + ",\t ssim " + str(round(ssim, 5)))
            fh = open(
                os.path.join(HD720p_Other_RESULT, unique_id,
                             file_i + "_psnr_Y.txt"), "a+")
            fh.write(str(psnr))
            fh.write("\n")
            fh.close()
            fh = open(
                os.path.join(HD720p_Other_RESULT, unique_id,
                             file_i + "_ssim_Y.txt"), "a+")
            fh.write(str(ssim))
            fh.write("\n")
            fh.close()
            metrics = "The average interpolation error / PSNR for all images are : " + str(
                round(interp_error.avg, 4)) + ",\t  psnr " + str(
                    round(psnr_error.avg, 4)) + ",\t  ssim " + str(
                        round(ssim_error.avg, 4))
            print(metrics)

        metrics = "The average interpolation error / PSNR for all images are : " + str(
            round(interp_error.avg, 4)) + ",\t  psnr " + str(
                round(psnr_error.avg, 4)) + ",\t  ssim " + str(
                    round(ssim_error.avg, 4))
        print(metrics)
        fh = open(
            os.path.join(HD720p_Other_RESULT, unique_id,
                         file_i + "_psnr_Y.txt"), "a+")
        fh.write("\n")
        fh.write(str(psnr_error.avg))
        fh.write("\n")
        fh.close()
        fh = open(
            os.path.join(HD720p_Other_RESULT, unique_id,
                         file_i + "_ssim_Y.txt"), "a+")
        fh.write("\n")
        fh.write(str(ssim_error.avg))
        fh.write("\n")
        fh.close()
Пример #31
0
    def update_stats_db(self, redis_time, spark_time, tot_time):
        """stats database: for frontend to read

    id => [tags as words, 
           total num, 
           num filtered, 
           redis tag retr time, 
           spark filter time, 
           tot time, 
           structural similarity, 
           url original, 
           url new]
        """

        # connect to S3 bucket
        c = boto.connect_s3()
        src = c.get_bucket(self.incoming_bucket, validate=False)
        dst = c.get_bucket(self.main_bucket, validate=False)
        k_src = Key(src)
        k_dst = Key(dst)

        # database updates differ depending on the result
        if self.result == []:
            print("\n\n\n\n\nNo match found, adding to the db...\n\n")
            print("Adding to the database..")

            k_src.key = "{}.jpg".format(self.incoming_img_id)
            k_dst.key = "valid/img{}.jpg".format(self.incoming_img_id)

            # copy image from source bucket to the main bucket
            dst.copy_key(k_dst.key, src.name, k_src.key)

            print("Updating tags database..")
            update_db(self.incoming_img_tags,
                      "img{}".format(self.incoming_img_id), self.r_tags)

            sample_diff_img = load_from_S3(ak=self.awsak,
                                           sk=self.awssk,
                                           image_id=self.img_list[0],
                                           bucket_name=self.main_bucket)

            self.save_runtimes(redis_time, spark_time, tot_time)

            stats_list.append("Not found")

            ssim = compare_ssim(
                self.incoming_im_resized[0],
                sample_diff_img[0],
                multichannel=(not self.incoming_img_multichannel))
            self.stats_list.append(str(round(ssim * 100, 2)) + "%")

            # get URLs for images to display on the UI
            k_src.key = "{}{}.jpg".format(self.incoming_im_resized[3],
                                          self.incoming_img_id)
            k_dst.key = "{}{}.jpg".format(sample_diff_img[3],
                                          sample_diff_img[2])
            url_orig = k_dst.generate_url(expires_in=0, query_auth=False)
            url_incoming = k_src.generate_url(expires_in=0, query_auth=False)

            self.stats_list.append(url_orig)
            self.stats_list.append(url_incoming)

        else:
            print("\n\n\n\n\nDuplicate found...\n\n")

            self.save_runtimes(redis_time, spark_time, tot_time)

            self.stats_list.append("Found")

            ssim = compare_ssim(
                self.incoming_im_resized[0],
                self.result[0][0],
                multichannel=(not self.incoming_img_multichannel))
            self.stats_list.append(str(round(ssim * 100, 2)) + "%")

            k_src.key = "{}{}.jpg".format(self.incoming_im_resized[3],
                                          self.incoming_img_id)
            k_dst.key = "{}{}.jpg".format(self.result[0][3], self.result[0][2])
            url_orig = k_dst.generate_url(expires_in=0, query_auth=False)
            url_incoming = k_src.generate_url(expires_in=0, query_auth=False)
            self.stats_list.append(url_orig)
            self.stats_list.append(url_incoming)

        for stat in self.stats_list:
            self.r_stats.rpush(sample_diff_img[2], stat)
Пример #32
0

HRnames = sorted(os.listdir(HRpath))
SRnames = sorted(os.listdir(SRpath))
psnr_list = []
ssim_list = []

for i in range(0, len(HRnames)):
    HRimage = imgCrop(os.path.join(HRpath, HRnames[i]), upscale)
    SRimage = io.imread(os.path.join(SRpath, SRnames[i]))
    if mode == 'YCbCr':
        SRimage = color.rgb2ycbcr(SRimage)
        HR = shave(HRimage, upscale)
        SR = shave(SRimage[:, :, 0], upscale)
        psnr = measure.compare_psnr(HR, SR, data_range=255)
        ssim = measure.compare_ssim(HR, SR, data_range=255, multichannel=True)
    else:
        HR = shave(HRimage, upscale)
        SR = shave(SRimage, upscale)
        psnr = measure.compare_psnr(HR, SR, data_range=255)
        ssim = measure.compare_ssim(HR, SR, data_range=255, multichannel=True)
    print('PSNR:%0.04f   SSIM:%0.04f' % (psnr, ssim))
    psnr_list.append(psnr)
    ssim_list.append(ssim)
    with open(Logpath, 'a') as f:
        f.write('%s    %0.04f    %0.04f \n' % (SRnames[i], psnr, ssim))
average_psnr = np.mean(np.asarray(psnr_list))
average_ssim = np.mean(np.asarray(ssim_list))
print('Mean PSNR: %0.02f  Mean SSIM: %0.04f' % (average_psnr, average_ssim))
with open(Logpath, 'a') as f:
    f.write('%s    %0.04f    %0.04f \n' %
Пример #33
0
#Plot target image after cut
#io.imshow(img_target_cut)
#plt.show()


SSIM_max=0

for i in range(1, 6):

    filenum = (str(i))
    filename = ("f" + filenum + ".png")
    img_compare = imread(filename);
    img_compare_cut = img_compare[img_bottom:img_top,img_left:img_right]
    
    SSIM = compare_ssim(img_target_cut, img_compare_cut,multichannel=True)
    
    print(i)
    print(SSIM)


#To plot images in loop - joegi
    #plt.figure(0)
    #io.imshow(img_target_cut)
    #plt.figure(1)
    #io.imshow(img_compare_cut)
    #plt.show()


    if SSIM>SSIM_max:
        SSIM_max=SSIM
Пример #34
0
def fix_offset(annotation):
    con = connect(database=DB_NAME,
                  host=DB_HOST,
                  user=DB_USER,
                  password=DB_PASSWORD)
    cursor = con.cursor()

    # get video name
    cursor.execute("SELECT filename FROM videos WHERE id=%s",
                   (str(annotation.videoid), ))
    video_name = cursor.fetchone()[0]

    # grab video stream
    url = s3.generate_presigned_url('get_object',
                                    Params={
                                        'Bucket': S3_BUCKET,
                                        'Key':
                                        S3_VIDEO_FOLDER + str(video_name)
                                    },
                                    ExpiresIn=100)
    cap = cv2.VideoCapture(url)
    fps = cap.get(cv2.CAP_PROP_FPS)

    #search within +- search range seconds of original, +- frames from original annotation
    frames = 20
    search_range = 1 / fps * frames

    # initialize video for grabbing frames before annotation
    cap.set(0, (annotation.timeinvideo - search_range) *
            1000)  # tell video to start at 'start'-1 time

    imgs = []
    times = []
    for i in range(math.ceil(fps * search_range * 2)):
        check, vid = cap.read()
        if not check:
            print("end of video reached")
            break
        img = imutils.resize(vid, width=VIDEO_WIDTH, height=VIDEO_HEIGHT)
        time = cap.get(cv2.CAP_PROP_POS_MSEC)
        times.append(time)
        imgs.append(img)
    cap.release()

    # get s3 image
    try:
        obj = s3.get_object(Bucket=S3_BUCKET,
                            Key=S3_ANNOTATION_FOLDER + annotation.image)
    except:
        #print("Annotation missing image.")
        con.close()
        return
    img = Image.open(obj['Body'])
    img = np.asarray(img)
    img = img[:, :, :3]
    img = img[:, :, ::-1]
    img = cv2.resize(img, (VIDEO_WIDTH, VIDEO_HEIGHT))

    best_score = 0
    best = None
    for i in range(math.ceil(fps * search_range)):
        # +1 or -1
        for s in range(-1, 2, 2):
            index = math.ceil(fps * search_range) + i * s
            if index == len(imgs):
                continue
            (score, diff) = compare_ssim(img,
                                         imgs[index],
                                         full=True,
                                         multichannel=True)
            if best_score < score:
                best = index
                best_score = score

            if best_score > .95:
                cursor.execute(
                    "UPDATE annotations SET timeinvideo=%f, originalid=NULL WHERE id=%d;",
                    (
                        times[best] / 1000,
                        annotation.id,
                    ))
                con.commit()
                con.close()
                return
    cursor.execute("UPDATE annotations SET unsure=TRUE WHERE id=%d;",
                   (annotation.id, ))
    con.commit()
    con.close()
Пример #35
0
                "--cloaked",
                required=True,
                help="the cloaked image file")
args = vars(ap.parse_args())

# load the two input images
imageA = cv2.imread(args["original"])
imageB = cv2.imread(args["cloaked"])

# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))

# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255,
                       cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# loop over the contours
for c in cnts:
    # compute the bounding box of the contour and then draw the
    # bounding box on both input images to represent where the two
Пример #36
0
import cv2

from skimage.measure import compare_ssim  #ssim: structural similarity index

image_good = cv2.imread('D:/image/good_image.png')
image_good

image_bad = cv2.imread('D:/image/bad_image.png')
image_bad

# convert the images to grayscale
gray_good = cv2.cvtColor(image_good, cv2.COLOR_BGR2GRAY)
gray_fault = cv2.cvtColor(image_bad, cv2.COLOR_BGR2GRAY)

# compute the difference between two images
(score, diff) = compare_ssim(gray_good, gray_fault, full=True)
diff = (diff * 224).astype("uint8")
print("SSIM: {}".format(score))

#obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 256,
                       cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# loop over the contours
for c in cnts:
    (x, y, w, h) = cv2.boundingRect(c)
    cv2.rectangle(image_good, (x, y), (x + w, y + h), (0, 0, 224), 2)
    cv2.rectangle(image_bad, (x, y), (x + w, y + h), (0, 0, 224), 2)
Пример #37
0
def dssim(p0, p1, range=255.):
    # embed()
    return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
Пример #38
0
def img_analytics(z1,z2):
   with warnings.catch_warnings():
     warnings.simplefilter("ignore")
     return {'ssim':compare_ssim(z1,z2,multichannel=False),'psnr':__colorPSNR(z1,z2)}
Пример #39
0
def main():
    image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
    canonical_dir = 'canonical'
    # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
    template = os.path.join(canonical_dir, 'patty.png')

    img1 = imread(template)
    # img1_padded = numpy.zeros( (256, 256,3), dtype=numpy.uint8)
    img1_padded = numpy.resize( [255,255,255], (256, 256, 3))
    s = img1.shape
    img1_padded[:s[0], :s[1]] = img1
    img1_gray = rgb2gray(img1)

    descriptor_extractor = ORB()

    descriptor_extractor.detect_and_extract(img1_gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    # g = glob.glob(os.path.join(image_base_dir, 'patty*.nobox.png'))
    # for moving in g:
    while True:
        rot, tx, ty, scale = get_random_orientation()
        # img2 = imread(moving)
        img2 = draw_example('patty', 256, 256, rot, tx, ty, scale)
        img2_gray = rgb2gray(img2)

        try:
            descriptor_extractor.detect_and_extract(img2_gray)
        except RuntimeError:
            continue
        
        keypoints2 = descriptor_extractor.keypoints
        descriptors2 = descriptor_extractor.descriptors

        matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

        src = keypoints2[matches12[:, 1]][:, ::-1]
        dst = keypoints1[matches12[:, 0]][:, ::-1]

        model_robust, inliers = \
            ransac((src, dst), SimilarityTransform,
                   min_samples=4, residual_threshold=2)
        if not model_robust:
            print "bad"
            continue
        img2_transformed = transform.warp(img2, model_robust.inverse, mode='constant', cval=1)
        img1_padded_float = img1_padded.astype(numpy.float64)/255.
        sub = img2_transformed - img1_padded_float
        print compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
        fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True)
        ax = axes.ravel()

        ax[0].imshow(img1_padded_float)
        ax[1].imshow(img2)
        ax[1].set_title("Template image")
        ax[2].imshow(img2_transformed)
        ax[2].set_title("Matched image")
        ax[3].imshow(sub)
        ax[3].set_title("Subtracted image")
        # plt.gray()

        # ax = plt.gca()
        # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)


        plt.show()
Пример #40
0
def test(model, test_input_handle, configs, save_name):
  """Evaluates a model."""
  logger.info('start testing')
  test_input_handle.begin(do_shuffle=False)
  res_path = os.path.join(configs.gen_frm_dir, str(save_name))
  # os.mkdir(res_path)
  os.makedirs(res_path, exist_ok=True)
  avg_mse = 0
  batch_id = 0
  img_mse, ssim, psnr = [], [], []
  output_length = configs.total_length - configs.input_length

  for i in range(output_length):
    img_mse.append(0)
    ssim.append(0)
    psnr.append(0)

  real_input_flag_zero = np.zeros((configs.batch_size, output_length - 1,
                                   configs.img_width // configs.patch_size,
                                   configs.img_width // configs.patch_size,
                                   configs.patch_size**2 * configs.img_channel)).astype(np.float32)

  while not test_input_handle.no_batch_left():
    batch_id = batch_id + 1
    test_ims = test_input_handle.get_batch()
    test_dat = preprocess.reshape_patch(test_ims, configs.patch_size)
    # test_dat = np.split(test_dat, configs.n_gpu)
    img_gen = model.test(test_dat, real_input_flag_zero)

    # Concat outputs of different gpus along batch
    img_gen = np.concatenate(img_gen)
    img_gen = preprocess.reshape_patch_back(img_gen, configs.patch_size)
    img_out = img_gen[:, -output_length:]
    target_out = test_ims[:, -output_length:]
    # MSE per frame
    for i in range(output_length):
      x = target_out[:, i]
      gx = img_out[:, i]
      gx = np.maximum(gx, 0)
      gx = np.minimum(gx, 1)
      mse = np.square(x - gx).sum()
      img_mse[i] += mse
      avg_mse += mse
      for b in range(configs.batch_size):
          ssim[i] += compare_ssim(x[b], gx[b], multichannel=True)
      x = np.uint8(x * 255)
      gx = np.uint8(gx * 255)
      psnr[i] += batch_psnr(gx, x)

    # save prediction examples
    if batch_id <= configs.num_save_samples:
      path = os.path.join(res_path, str(batch_id))
      os.makedirs(path, exist_ok=True)
      # os.mkdir(path)
      for i in range(configs.total_length):
        if (i + 1) < 10:
          name = 'gt0' + str(i + 1) + '.png'
        else:
          name = 'gt' + str(i + 1) + '.png'
        file_name = os.path.join(path, name)
        img_gt = np.uint8(test_ims[0, i] * 255)
        cv2.imwrite(file_name, img_gt)
      for i in range(output_length):
        if (i + configs.input_length + 1) < 10:
          name = 'pd0' + str(i + configs.input_length + 1) + '.png'
        else:
          name = 'pd' + str(i + configs.input_length + 1) + '.png'
        file_name = os.path.join(path, name)
        img_pd = img_gen[0, i]
        img_pd = np.maximum(img_pd, 0)
        img_pd = np.minimum(img_pd, 1)
        img_pd = np.uint8(img_pd * 255)
        cv2.imwrite(file_name, img_pd)
    # else:

    # if batch_id > configs.num_save_samples*10:
    #   break
    test_input_handle.next()

  if batch_id==0:
    return

  avg_mse = avg_mse / (batch_id * configs.batch_size * configs.n_gpu)
  logger.info('mse per seq: ' + str(avg_mse))
  for i in range(output_length):
    print(img_mse[i] / (batch_id * configs.batch_size * configs.n_gpu))

  psnr = np.asarray(psnr, dtype=np.float32) / batch_id
  logger.info('psnr per frame: ' + str(np.mean(psnr)))
  for i in range(output_length):
    print(psnr[i])

  ssim = np.asarray(ssim, dtype=np.float32) / (configs.batch_size * batch_id)
  print('ssim per frame: ' + str(np.mean(ssim)))
  for i in range(output_length):
      print(ssim[i])
Пример #41
0
def main():
    image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
    canonical_dir = 'canonical'
    # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
    fig, axes = plt.subplots(7, 7, figsize=(7, 6), sharex=True, sharey=True)

    fig.delaxes(axes[0][0])

    ssims = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
    mses = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
                         
    for i, layer in enumerate(BurgerElement.__members__):
        template = os.path.join(canonical_dir, '%s.png' % layer)

        img1 = imread(template)
        # img1_padded = numpy.zeros( (WIDTH, HEIGHT,3), dtype=numpy.uint8)
        img1_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
        s = img1.shape
        w = s[0]
        h = s[1]
        nb = img1_padded.shape[0]
        na = img1.shape[0]
        lower1 = (nb) // 2 - (na // 2)
        upper1 = (nb // 2) + (na // 2)
        nb = img1_padded.shape[1]
        na = img1.shape[1]
        lower2 = (nb) // 2 - (na // 2)
        upper2 = (nb // 2) + (na // 2)
        img1_padded[lower1:upper1, lower2:upper2] = img1
        img1_padded_float = img1_padded.astype(numpy.float64)/255.
        print img1_padded_float.shape
        img1_gray = rgb2gray(img1_padded_float)

        descriptor_extractor = ORB()

        try:
            descriptor_extractor.detect_and_extract(img1_gray)
        except RuntimeError:
            continue
        
        keypoints1 = descriptor_extractor.keypoints
        descriptors1 = descriptor_extractor.descriptors

        axes[i][0].imshow(img1_padded_float)
        axes[i][0].set_title("Template image")

        for j, layer2 in enumerate(BurgerElement.__members__):

            rot, tx, ty, scale = get_random_orientation()
            img2 = draw_example(layer2, WIDTH, HEIGHT, rot, tx, ty, scale)

            # match = os.path.join(canonical_dir, '%s.png' % layer2)
            # img2 = imread(match)

            img2_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
            s = img2.shape
            img2_padded[:s[0], :s[1]] = img2
            img2_padded_float = img2_padded.astype(numpy.float64)/255.
            img2_gray = rgb2gray(img2_padded_float)

            try:
                descriptor_extractor.detect_and_extract(img2_gray)
            except RuntimeError:
                continue

            keypoints2 = descriptor_extractor.keypoints
            descriptors2 = descriptor_extractor.descriptors

            matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

            src = keypoints2[matches12[:, 1]][:, ::-1]
            dst = keypoints1[matches12[:, 0]][:, ::-1]

            model_robust, inliers = \
                ransac((src, dst), SimilarityTransform,
                       min_samples=4, residual_threshold=2)
            if not model_robust:
                print "bad"
                continue
            img2_transformed = transform.warp(img2_padded_float, model_robust.inverse, mode='constant', cval=1)
            sub = img2_transformed - img1_padded_float
            ssim = compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
            mse = compare_mse(img2_transformed, img1_padded_float)
            ssims[i,j] = ssim
            mses[i,j] = mse

            axes[0][j].imshow(img2_padded_float)
            axes[0][j].set_title("Match image")

            axes[i][j].imshow(img2_transformed)
            axes[i][j].set_title("Transformed image")
            axes[i][j].set_xlabel("SSIM: %9.4f MSE: %9.4f" % (ssim, mse))

        # ax = plt.gca()
        # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)

    print ssims
    print numpy.argmax(ssims, axis=1)
    print numpy.argmin(mses, axis=1)
                       
    plt.show()
                img_array = np.zeros(shape = [height, width])

                for i in range(height):
                    for j in range(width):
                        if final_matrix[i,j] < 0:
                            img_array[i,j] = 16.0
                        elif final_matrix[i,j] > 255:
                            img_array[i,j] = 239
                        else:
                            img_array[i,j] = final_matrix[i, j]

                img_array = img_array.astype(np.uint8, copy=False)

                #cv2.imwrite("noncausal1/%d.jpg" % index, img_array)
		original_matrix = cv2.imread("non_test_300/%d.png" % index, 0)

                mse_all += mean_squared_error(original_matrix, img_array)
                psnr += cv2.PSNR(original_matrix, img_array)
		(score, diff) = compare_ssim(original_matrix, img_array, full=True)
		
		ssim_all += score
		msssim_all += msssim(original_matrix, img_array)
                decoded_blocks  = []
                index += 1

print "mse result:", mse_all / 300
print "psnr result:", psnr / 300
print "ssim result:", ssim_all / 300
print "msssim result:", msssim_all / 300
Пример #43
0
def cal_ssim(ax, cx):
    return measure.compare_ssim(ax,
                                cx,
                                multichannel=True,
                                data_range=255,
                                win_size=11)
Пример #44
0
def compare(img1, img2, threshold = 0.5):
	if compare_ssim(img1, img2) > threshold:
		return True
	return False
Пример #45
0
                help="first input image")
ap.add_argument("-s", "--second", required=True,
                help="second")
args = vars(ap.parse_args())

# load the two input images
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])

# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))

# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255,
                       cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

# loop over the contours
for c in cnts:
    # compute the bounding box of the contours and then draw the
    # bounding box on both input images to represent where the two
def save_images_test(webpage,
                     visuals,
                     image_path,
                     aspect_ratio=1.0,
                     width=256):
    """Save images to the disk.

    Parameters:
        webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
        visuals (OrderedDict)    -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
        image_path (str)         -- the string is used to create image paths
        aspect_ratio (float)     -- the aspect ratio of saved images
        width (int)              -- the images will be resized to width x width

    This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
    """
    image_dir = webpage.get_image_dir()
    file_dir = webpage.get_file_dir()
    diff_dir = webpage.get_diff_dir()
    short_path = ntpath.basename(image_path[0])
    name = os.path.splitext(short_path)[0]

    webpage.add_header(name)
    ims, txts, links = [], [], []

    for label, im_data in visuals.items():

        label2 = 'difference'
        im = util.tensor2im(im_data)
        image_name = '%s_%s.png' % (name, label)
        img_diff_name = '%s_%s.png' % (name, label2)
        file_name = '%s_%s.txt' % (name, label)
        diff_name = '%s.txt' % (name)
        save_path = os.path.join(image_dir, image_name)
        save_path2 = os.path.join(file_dir, file_name)
        save_path3 = os.path.join(diff_dir, diff_name)
        save_path4 = os.path.join(image_dir, img_diff_name)

        h, w, _ = im.shape
        if aspect_ratio > 1.0:
            im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')
        if aspect_ratio < 1.0:
            im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')

        util.save_image2(im_data.view(-1, 240).cpu().numpy(), save_path)

        np.savetxt(save_path2,
                   im_data.view(-1, 240).cpu().numpy(),
                   delimiter=",")

        ims.append(image_name)
        txts.append(label)
        links.append(image_name)

        if label == 'fake_B':
            with open(save_path2) as f:
                fake_img = np.genfromtxt(f, delimiter=',',
                                         dtype='float64').astype('float32')
                f.close()
        if label == 'real_B':
            with open(save_path2) as f2:
                real_img = np.genfromtxt(f2, delimiter=',',
                                         dtype='float64').astype('float32')
                f2.close()

                # calculate the difference between real and fake imgs
                difference = fake_img - real_img
                # save difference results in txt file
                np.savetxt(save_path3, difference, delimiter=",")
                util.save_image2(difference, save_path4)

                ims.append(img_diff_name)
                txts.append(label2)
                links.append(img_diff_name)

                # calculate the mean squared error
                error = mean_squared_error(real_img, fake_img)

                # calculate the pearson correlation coefficient
                coe = np.corrcoef(real_img.flat, fake_img.flat)[0, 1]

                # calculate the structural similarity index
                ssi = measure.compare_ssim(real_img, fake_img)

    webpage.add_images(ims, txts, links, width=width)
    return error, coe, ssi
Пример #47
0
def extract_text(img):
    # Function to extract text boxes, line colors and decipher
    # text using ocr engine
    global G

    # Grab digit images and load them into array
    # Compute and store histograms for each image
    digits = pickle.load(open("digits.p", 'rb'))
    digits2 = pickle.load(open("digits2.p", 'rb'))

    # Shape detector to tell the shape of contour
    sd = ShapeDetector()

    # Filter out ecu boxes
    ecu_img = line_filter(img, color="ECU", rtype='rgb')
    ecu_img = cv2.medianBlur(ecu_img, 5)

    # Enlarge the boxes so we can overwrite the dashed or
    # solid lines
    kernel = np.ones((9, 9), np.uint8)
    _ecu_img = cv2.dilate(ecu_img, kernel, iterations=3)

    # Grab the contours and then fill them with white to
    # erase them from the image
    ecu_contours, centers = get_contours(_ecu_img)
    no_boxes = clear_contours(img, ecu_contours)
    ecu_rects = get_bound_rects(ecu_contours)

    # Convert to gray and filter out the colored wires
    # so it is just text
    just_text = bw_filter(no_boxes, rtype='rgb')

    # Add circle boxes for later relationship building
    kernel2 = np.ones((5, 5), np.uint8)
    erode = cv2.erode(just_text, kernel2, iterations=1)
    median = cv2.medianBlur(erode, 5)
    temp_contours, circle_centers = get_contours(median)
    circle_centers = list(set(circle_centers))

    # Loop through and grab only circle contours
    circle_centers = []
    for c in temp_contours:
        if sd.detect(c) == "circle":
            M = cv2.moments(c)
            if M["m00"] == 0:
                continue
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            circle_centers.append((cX, cY))

    circle_centers = circle_centers

    # Dilate text to get a rectangular contour
    # (Thanksgiving Approach)
    enlarged = cv2.dilate(just_text, kernel, iterations=5)

    # Grab text contours and convert to rectangular
    # boundries
    text_contours, centers = get_contours(enlarged)
    params = get_bound_rects(text_contours)

    # Decipher/Filter text and store mappings to
    # center and bounding rectangle
    ecu_map = {}
    decoded_text = []
    brgb = Image.fromarray(img, 'RGB')
    for i, _roi in enumerate(params):

        roi = list(_roi)
        roi[2] += roi[0]
        roi[3] += roi[1]
        roi = tuple(roi)
        ar = float(_roi[2]) / _roi[3]

        text_roi = brgb.crop(roi)
        text = pytesseract.image_to_string(text_roi)

        if lc_check(text):
            pass

        elif len(text) == 0 and (.65 < ar < 1.25):
            text_roi = text_roi.resize((100, 80)).convert('L')

            best_score = 0
            best_index = 0
            for i2, d in enumerate(digits):
                cmp_score = compare_ssim(d,
                                         np.array(text_roi),
                                         data_range=d.max() - d.min())
                cmp_score2 = compare_ssim(digits2[i2],
                                          np.array(text_roi),
                                          data_range=d.max() - d.min())
                score = max(cmp_score, cmp_score2)
                if score > best_score:
                    best_score = score
                    best_index = i2 + 1

            if _roi[0] < (200):
                ecu_map[i] = (_roi, "IN-" + str(best_index))
            elif _roi[0] > (img.shape[1] - 200):
                ecu_map[i] = (_roi, "OUT-" + str(best_index))

        else:
            # Check if text passes filter test
            if filter_text(text) != "":
                if text in decoded_text:
                    continue
                else:
                    decoded_text.append(text)

                (closest_ecu, text_near) = text2_ecu(ecu_rects, _roi, text)
                if not any(closest_ecu):
                    pass
                else:
                    ecu_map[i] = (closest_ecu, text_near)

    # Create nodes for each ecu and outward/inward
    for k, v in ecu_map.items():
        val = v[1].replace("\n\n", "\n")
        G.add_node(val)

    return ecu_map
Пример #48
0
            if "_x" + str(scale) in image_name:
                count += 1
                print("Processing ", image_name)
                im_gt_y = sio.loadmat(image_name)['im_gt_y']
                im_b_y = sio.loadmat(image_name)['im_b_y']

                im_gt_y = crop_by_pixel(im_gt_y, 3)
                im_b_y = crop_by_pixel(im_b_y, 3)
                           
                im_gt_y = im_gt_y.astype(float)
                im_b_y = im_b_y.astype(float)

                psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=scale)
                avg_psnr_bicubic += psnr_bicubic

                (score, diff) = compare_ssim(im_gt_y, im_b_y, full=True)
                avg_ssim_bicubic += score

                im_input = im_b_y/255.

                im_input = Variable(torch.from_numpy(im_input).float()).view(1, -1, im_input.shape[0], im_input.shape[1])

                if cuda:
                    model = model.cuda()
                    im_input = im_input.cuda()
                else:
                    model = model.cpu()

                start_time = time.time()
                HR = model(im_input)
                elapsed_time = time.time() - start_time
def get_ssim(img1, img2):
    ssim = compare_ssim(img1, img2, multichannel=True)
    return ssim
Пример #50
0
from utils import expand_folder

im_path = "/home/esepulveda/Documents/projects/roots/python/processing/1.25.AVI/windows/frame-51"

images = []
expand_folder(im_path,images)
images.sort()


#templates = load_templates("/home/esepulveda/Documents/projects/roots/python/models/templates")
templates = load_templates("/Users/exequiel/projects/roots/python/models/templates")

n = len(images)

sim = np.zeros((n,n))

print images
images_data = [data.imread(x) for x in images]

for i in xrange(n):
    image_i = images_data[i]
    for j in xrange(i,n):
        image_j = images_data[j]
        sim[i,j] = compare_ssim(image_i,image_j,multichannel=True) #,gaussian_weights=True)
        sim[j,i] = sim[i,j]

print np.sum(sim,axis=0)


print np.min(sim,axis=0)