def main(args): """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into cleaned dataLayer ready to be analyzed (saved in ../processed). """ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" ## Talk to Rune about how dataLayer is handle. config = TrainingConfig() config = update_config(args, config) ## For polyaxon #if config.run_polyaxon: localdir = Path().absolute().parent dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization') logger = logging.getLogger(__name__) logger.info('making final dataLayer set from raw dataLayer') #flatten all images used in ridgeplot so their pixel values go in one column curdatLayer = importData(config) #pathToRidgeImages = Path.joinpath(dataPath, 'Ridgeplot') pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\test\Turkey\T37SCC_20200729T081609\bandTCIRGB\Test\NIRImages\original_0NIR" images_test = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages) pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\test\Turkey\T37SCC_20200729T081609\bandTCIRGB\Train\NIRImages\original_0NIR" images_train = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages) images = images_test + images_train import seaborn as sns sns.set(font_scale=2.5) plt.rcParams.update({'font.size': 26}) import cv2 df = [] for i in images: i = i.flatten() df.append(pd.DataFrame(i, columns=['NIR'])) d = {'color': ['r']} df_merged = pd.concat(df) axes = df_merged.plot(kind='hist', subplots=True, layout=(1, 1), bins=200, color=['r'], yticks=[], sharey=True, sharex=True) axes[0, 0].yaxis.set_visible(False) fig = axes[0, 0].figure fig.text(0.5, 0.04, "Pixel Value", ha="center", va="center") fig.text(0.05, 0.5, "Pixel frequency", ha="center", va="center", rotation=90) #plt.xlim(0, 4000) plt.show() STOP = True
def main(args): """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into cleaned dataLayer ready to be analyzed (saved in ../processed). """ ## Talk to Rune about how dataLayer is handle. config = TrainingConfig() config = update_config(args,config) logger = logging.getLogger(__name__) logger.info('making final dataLayer set from raw dataLayer') curdatLayer = importData(config) train, names = curdatLayer.get_images_for_baseLine() curBaseLineModel = baselineModel(train,names,config) curBaseLineModel.baselineExperiment()
def main(args): """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into cleaned dataLayer ready to be analyzed (saved in ../processed). """ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" ## Talk to Rune about how dataLayer is handle. config = TrainingConfig() config = update_config(args, config) ## For polyaxon #if config.run_polyaxon: localdir = Path().absolute().parent dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization') logger = logging.getLogger(__name__) logger.info('making final dataLayer set from raw dataLayer') channelName = ["R", "G", "B"] #flatten all images used in ridgeplot so their pixel values go in one column curdatLayer = importData(config) #pathToRidgeImages = Path.joinpath(dataPath, 'Ridgeplot') pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\processed\Belarus\T35UNB_20200617T092029\bandTCIRGB\Test\RGBImages\original_0RGB" images = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages) # tuple to select colors of each channel line colors = ("r", "g", "b") channel_ids = (0, 1, 2) labels = [] ridgeImages = [] Rband = [np.arange(40000).flatten(), np.zeros((40000))] Gband = [np.arange(40000).flatten(), np.zeros((40000))] Bband = [np.arange(40000).flatten(), np.zeros((40000))] #Tag alle billeder, læg R kanal i en, G kanal i en, B kanal i en #med label Danmark_Rød, Danmark_grøn... #for i in range(len(images)): # #for each image, put the channels with correct name into the ridgeimage and labels # RGB = np.split(images[i], 3, axis=2) # for j in range(3): # ridgeImages.extend(RGB[j].ravel()) # labels.extend(np.tile(channelName[j], len(RGB[j].ravel()))) for i in range(len(images)): RGB = np.split(images[i], 3, axis=2) uniqueR = np.unique(RGB[0], return_counts=True) uniqueG = np.unique(RGB[1], return_counts=True) uniqueB = np.unique(RGB[2], return_counts=True) Rband[1][uniqueR[0]] = uniqueR[1] Gband[1][uniqueG[0]] = uniqueG[1] Bband[1][uniqueB[0]] = uniqueB[1] #df = pd.DataFrame({'Rband': Rband[1],'Gband': Gband[1],'Bband': Bband[1]}) dfs = [] dfs.append( pd.DataFrame({ 'band': Rband[1], 'index': Rband[0], 'ChannelName': 'redBand' })) dfs.append( pd.DataFrame({ 'band': Gband[1], 'index': Gband[0], 'ChannelName': 'greenBand' })) dfs.append( pd.DataFrame({ 'band': Bband[1], 'index': Bband[0], 'ChannelName': 'blueBand' })) df2 = pd.concat(dfs, axis=0) bandsNew = [] bandsNew.extend(Rband[1]) bandsNew.extend(Gband[1]) bandsNew.extend(Bband[1]) #df = pd.DataFrame(dict(Pixel_Values=images, g=labels)) plotting = RidgePlot().__call__(DataFrame=df2, Bands=bandsNew, Names=channelName)
def run_eval(self, output_path, store_path, model_path=None, test_dataloader=None): curdatLayer = importData(self.config) if test_dataloader is None: train, test_dataloader = curdatLayer.getRGBDataLoader() del train if Path.exists( Path.joinpath( output_path, self.config.model_name + '_' + str(self.config.epochs) + '.pt')) and self.config.run_polyaxon == False: ##Hvis det er med wgan generator, altså layernorm, indsæt Wgangenerator istedet for generator() if self.config.new_generator: gen = Wgangenerator().to(self.config.device) else: gen = generator().to(self.config.device) gen.load_state_dict( torch.load( Path.joinpath( output_path, self.config.model_name + '_' + str(self.config.epochs) + '.pt'))) ## Use epochs to identify model number elif Path.exists(Path(str(model_path))): if self.config.nir_data: gen = generatorNIR().to(self.config.device) elif self.config.new_generator: gen = Wgangenerator().to(self.config.device) else: gen = generator().to(self.config.device) print("Just loaded model from path " + str(model_path)) gen.load_state_dict( torch.load(model_path)) ## Use epochs to identify model number else: print("Unable to find path to model") gen.eval() loadAndAgumentMasks = makeMasks.MaskClass(self.config, rand_seed=None, evaluation=True) names = [] # Find names of test images, in order to save the generated files with same name, for further reference localImg = test_dataloader.dataset.image_list # Slice string to only include the name of the file, ie after the last // localNames = [] # if self.config.run_polyaxon: # split_path = localImg[0].split('/') ##Linux # else: # split_path = localImg[0].split("\\") # local_index= split_path.index('processed') # local_country= split_path[local_index+1] for i in localImg: if self.config.run_polyaxon: selected_image = i.split('/')[-1] ##Linux else: selected_image = i.split("\\")[-1] localNames.append(selected_image) names = names + localNames print("Found this many names " + str(len(names))) current_number = 0 if not os.path.exists( Path.joinpath(output_path, self.config.model_name)): os.makedirs(Path.joinpath(output_path, self.config.model_name)) now = datetime.now() dt_string = now.strftime("%d_%m_%Y_%H_%M_%S") local_test_path = output_path / self.config.model_name / dt_string / 'Data' local_test_nir_path = output_path / self.config.model_name / dt_string / 'DataNir' local_store_path = store_path / self.config.model_name / dt_string / 'stored_Data' os.makedirs(local_test_path) os.makedirs(local_store_path) os.makedirs(local_test_nir_path) start_time = datetime.now() for real, sar in tqdm(test_dataloader, disable=self.config.run_polyaxon): masks = loadAndAgumentMasks.returnTensorMasks( self.config.batch_size) masks = torch.from_numpy(masks) masks = masks.type(torch.cuda.FloatTensor) masks = 1 - masks masks.to(self.config.device) real = real.to(self.config.device) fake_masked_images = torch.mul(real, masks) generated_images = gen(fake_masked_images, masks) image_names = names[current_number:current_number + self.config.batch_size] current_number = current_number + self.config.batch_size ## Change naming to include all names # modelHelper.save_tensor_batch(generated_images,fake_masked_images,config.batch_size,path) for index, image in enumerate(generated_images): namePath = Path.joinpath(local_test_path, image_names[index]) if self.config.nir_data: modelHelper.save_tensor_single_NIR( image, Path.joinpath(local_test_path, image_names[index]), Path.joinpath(local_test_nir_path, image_names[index]), raw=True) else: modelHelper.save_tensor_single(image, Path.joinpath( local_test_path, image_names[index]), raw=True) end_time = datetime.now() time_ran = str(end_time - start_time) # create dataloader with generated images generated_images_dataloader = curdatLayer.getGeneratedImagesDataloader( local_test_path) #print("generated image 429 "+str(generated_images_dataloader.dataset.image_list[429])) #print ("test image 429 "+str(test_dataloader.dataset.image_list[429])) # calculate FID if self.config.nir_data: # Loader test_dataloader in for NIR igen da den skal have 3 channels og ikke 4. train, test_dataloader = curdatLayer.getRGBDataLoader() del train FID_Value = FIDCalculator( test_dataloader, generated_images_dataloader, len(test_dataloader) * self.config.batch_size, self.config.batch_size, self.config).get_FID_scores() # Calculate PSNR and SSIM dataloader_iterator = iter(generated_images_dataloader) #dataloader_iterator = iter(test_dataloader) maeValues = [] sddValues = [] ssimscikitValues = [] SSIMValues = [] psnrValues = [] CCValues = [] rmseValues = [] # loop to calculate PSNR and SSIM for all test and generated images. count = 0 for i, images_real in enumerate(test_dataloader): try: images_generated = next(dataloader_iterator) except StopIteration: dataloader_iterator = iter(generated_images_dataloader) images_generated = next(dataloader_iterator) for index2 in range(self.config.batch_size): psnrValues.append(PSNR().__call__(images_real[index2], images_generated[index2])) if psnrValues[-1] < 3: print(str(psnrValues[-1])) modelHelper.save_tensor_single( normalize_batch_tensor(images_real[index2]), Path.joinpath(local_store_path, str(i) + '_' + str(count) + '_real.tiff')) modelHelper.save_tensor_single( normalize_batch_tensor(images_generated[index2]), Path.joinpath(local_store_path, str(i) + '_' + str(count) + 'gen.tiff')) CCValues.append(CC().__call__(images_real[index2], images_generated[index2])) maeValues.append(MSE().__call__(images_real[index2], images_generated[index2])) sddValues.append( SDD.__call__(images_real[index2], images_generated[index2])) ssimscikitValues.append( SSIM_SKI.__call__(images_real[index2], images_generated[index2])) image1 = images_real[index2].unsqueeze(0) image2 = images_generated[index2].unsqueeze(0) SSIMValues.append(ssim(image1, image2)) rmseValues.append( RMSE.__call__(images_real[index2], images_generated[index2])) count = count + 1 meanMAE = sum(maeValues) / len(maeValues) minMAE = min(maeValues) maxMAE = max(maeValues) meanSDD = sum(sddValues) / len(sddValues) minSDD = min(sddValues) maxSDD = max(sddValues) meanPSNR = sum(psnrValues) / len(psnrValues) minPSNR = min(psnrValues) maxPSNR = max(psnrValues) meanSSIM = sum(SSIMValues) / len(SSIMValues) minSSIM = min(SSIMValues) maxSSIM = max(SSIMValues) meanSCISSIM = sum(ssimscikitValues) / len(ssimscikitValues) minSCISSIM = min(ssimscikitValues) maxSCISSIM = max(ssimscikitValues) meanCC = sum(CCValues) / len(CCValues) minCC = min(CCValues) maxCC = max(CCValues) meanRMSE = sum(rmseValues) / len(rmseValues) minRMSE = min(rmseValues) maxRMSE = max(rmseValues) # Save final results of evaluation metrics saveEvalToTxt(self.config.model_name, meanMAE, minMAE, maxMAE, meanSDD, minSDD, maxSDD, meanSSIM.item(), minSSIM.item(), maxSSIM.item(), meanSCISSIM, minSCISSIM, maxSCISSIM, meanPSNR, minPSNR, maxPSNR, meanCC, minCC, maxCC, meanRMSE, minRMSE, maxRMSE, FID_Value, time_ran, local_store_path) # Clean modelHelper.clearFolder(local_test_path.parent)
def main(args): config = TrainingConfig() config = update_config(args, config) logger = logging.getLogger(__name__) if config.run_polyaxon: input_root_path = Path(get_data_paths()['data']) output_root_path = Path(get_outputs_path()) inpainting_data_path = input_root_path / 'inpainting' os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache') config.data_path=inpainting_data_path config.output_path=output_root_path imageOutputPath = config.data_path /'data' /'generated' model_path =inpainting_data_path /'models' modelOutputPath = Path.joinpath(model_path, 'OutputModels') stores_output_path = config.output_path /'data'/'storedData' else: imageOutputPath = Path().absolute().parent /'data' /'generated' localdir = Path().absolute().parent modelOutputPath = Path.joinpath(localdir, 'OutputModels') stores_output_path = localdir /'data'/'storedData' #Import test data test = eval_model(config) test.run_eval(modelOutputPath,stores_output_path) curdatLayer = importData(config) train, test_dataloader = curdatLayer.getRGBDataLoader() del train test = Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt') print(Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt')) if Path.exists(Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt')): ##Hvis det er med wgan generator, altså layernorm, indsæt Wgangenerator istedet for generator() gen = generator().to(config.device) gen.load_state_dict(torch.load(Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt'))) ## Use epochs to identify model number else: print("Unable to find path to model") gen.eval() loadAndAgumentMasks = makeMasks.MaskClass(config,rand_seed=None,evaluation=True) names = [] for i in range(len(test_dataloader.dataset.datasets)): # Find names of test images, in order to save the generated files with same name, for further reference localImg = test_dataloader.dataset.datasets[i].image_list # Slice string to only include the name of the file, ie after the last // localNames = [] for i in localImg: if config.run_polyaxon: selected_image=i.split('/')[-1] ##Linux else: selected_image=i.split("\\")[-1] localNames.append(selected_image) names=names+localNames print("Found this many names "+str(len(names))) current_number = 0 if not os.path.exists(Path.joinpath(imageOutputPath, config.model_name)): os.makedirs(Path.joinpath(imageOutputPath, config.model_name)) now = datetime.now() dt_string = now.strftime("%d_%m_%Y_%H_%M_%S") local_test_path= imageOutputPath / config.model_name / dt_string /'Data' local_store_path = stores_output_path / config.model_name / dt_string /'stored_Data' os.makedirs(local_test_path) os.makedirs(local_store_path) start_time = datetime.now() testCount = 3 for real in tqdm(test_dataloader): masks = loadAndAgumentMasks.returnTensorMasks(config.batch_size) masks = torch.from_numpy(masks) masks = masks.type(torch.cuda.FloatTensor) masks = 1 - masks masks.to(config.device) real = real.to(config.device) fake_masked_images = torch.mul(real, masks) generated_images = gen(fake_masked_images, masks) image_names = names[current_number:current_number+config.batch_size] current_number = current_number + config.batch_size ## Change naming to include all names #modelHelper.save_tensor_batch(generated_images,fake_masked_images,config.batch_size,path) for index, image in enumerate(generated_images): namePath= Path.joinpath(local_test_path,image_names[index]) modelHelper.save_tensor_single(image,Path.joinpath(local_test_path, image_names[index]),raw=True) if testCount<0: break testCount = testCount-1 print("Saved image to " +str(local_test_path)) end_time = datetime.now() time_ran = str(end_time - start_time) #create dataloader with generated images generated_images_dataloader = curdatLayer.getGeneratedImagesDataloader(local_test_path) #calculate FID FID_Value = FIDCalculator(test_dataloader,generated_images_dataloader, len(test_dataloader)*config.batch_size, config.batch_size,config).get_FID_scores() #Calculate PSNR and SSIM dataloader_iterator = iter(generated_images_dataloader) psnrValues = [] maeValues = [] sddValues=[] ##ssimValues=[] SSIMValues = [] CCValues = [] #loop to calculate PSNR and SSIM for all test and generated images. for images_real in test_dataloader: try: images_generated = next(dataloader_iterator) except StopIteration: dataloader_iterator = iter(generated_images_dataloader) images_generated = next(dataloader_iterator) for index2 in range(config.batch_size): psnrValues.append(PSNR().__call__(images_real[index2], images_generated[index2])) ##CCValues.append(CC().__call__(images_real[index2], images_generated[index2])) maeValues.append(MSE().__call__(images_real[index2], images_generated[index2])) sddValues.append(SDD.__call__(images_real[index2], images_generated[index2])) ##ssimValues.append(SSIM.__call__(images_real[index2], images_generated[index2])) image1 = images_real[index2].unsqueeze(0) image2 = images_generated[index2].unsqueeze(0) SSIMValues.append(ssim(image1, image2)) break meanMAE= sum(maeValues)/len(maeValues) minMAE = min(maeValues) maxMAE = max(maeValues) meanSDD = sum(sddValues) / len(sddValues) minSDD = min(sddValues) maxSDD = max(sddValues) meanPSNR = sum(psnrValues)/len(psnrValues) minPSNR = min(psnrValues) maxPSNR = max(psnrValues) meanSSIM = sum(SSIMValues) / len(SSIMValues) minSSIM = min(SSIMValues) maxSSIM = max(SSIMValues) meanCC = sum(CCValues) / len(CCValues) minCC = min(CCValues) maxCC = max(CCValues) #Save final results of evaluation metrics saveEvalToTxt(config.model_name,meanMAE,minMAE,maxMAE,meanSSIM,minSDD,maxSDD,meanSSIM,minSSIM,maxSSIM,PSNR.item(),minPSNR.item(),maxPSNR.item(),meanCC.item(),minCC.item(),maxCC.item(),FID_Value,time_ran, local_store_path) #Clean modelHelper.clearFolder(local_test_path.parent)
def main(args): """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into cleaned dataLayer ready to be analyzed (saved in ../processed). """ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" ## Talk to Rune about how dataLayer is handle. config = TrainingConfig() config = update_config(args,config) ## For polyaxon #if config.run_polyaxon: localdir = Path().absolute().parent dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization') def mse(x, y): return np.linalg.norm(x - y) def MaxMin(a, b): minvalue = min(a.min(), b.min()) maxvalue = max(a.max(), b.max()) return maxvalue - minvalue logger = logging.getLogger(__name__) logger.info('making final dataLayer set from raw dataLayer') plt.rcParams.update({'font.size': 18}) path_generated = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\generated\Base_Line\18_12_2020_09_58_50\Data" curdatLayer = importData(config) generated_images = curdatLayer.open_Imagefiles_as_array(path_generated) generated_img = generated_images[0] path_real = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\processed\Belarus\T35UNB_20200617T092029\bandTCIRGB\Test\RGBImages\original_0RGB" original_images = curdatLayer.open_Imagefiles_as_array(path_real) org_img = original_images[0] another_img=original_images[1] win_size=11 generated_img = convertToFloat32(generated_img) org_img = convertToFloat32(org_img) another_img = convertToFloat32(another_img) generated_img_copy = generated_img.copy() # generated_img = remove_outliers(generated_img) org_img_copy = org_img.copy() # org_img = remove_outliers(org_img) another_img_copy=another_img.copy() # another_img = remove_outliers(another_img) # mse_org = mse(org_img, org_img) ssim_org, ssim_org_full = ssim(org_img, org_img, data_range=MaxMin(org_img,org_img), multichannel=True, win_size=win_size,full=True) mse_org_vs_inpaint = mse(org_img, generated_img) ssim_org_vs_inpaint,ssim_org_vs_inpaint_full = ssim(org_img, generated_img, data_range=MaxMin(org_img,generated_img), multichannel=True, win_size=win_size,full=True) mse_org_vs_another = mse(org_img, another_img) ssim_org_vs_another,ssim_org_vs_another_full = ssim(org_img, another_img, data_range=MaxMin(org_img,another_img), multichannel=True, win_size=win_size,full=True) fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True) ax = axes.ravel() label = 'MSE: {:.2f}, SSIM: {:.2f}' ax[0].imshow(_normalize(org_img_copy)) ax[0].set_xlabel(label.format(mse_org, ssim_org)) ax[0].set_title('Original image') ax[0].set_yticklabels([]) ax[0].set_xticklabels([]) ax[1].imshow(_normalize(generated_img_copy)) ax[1].set_xlabel(label.format(mse_org_vs_inpaint, ssim_org_vs_inpaint)) ax[1].set_title('Original vs inpainted') ax[1].set_yticklabels([]) ax[1].set_xticklabels([]) ax[2].imshow(_normalize(another_img_copy)) ax[2].set_xlabel(label.format(mse_org_vs_another, ssim_org_vs_another)) ax[2].set_title('Original vs Another Image') ax[2].set_yticklabels([]) ax[2].set_xticklabels([]) ax[3].imshow(ssim_org_full) ax[3].set_title('Original image') ax[3].set_yticklabels([]) ax[3].set_xticklabels([]) ax[4].imshow(ssim_org_vs_inpaint_full) ax[4].set_title('Original vs inpainted') ax[4].set_yticklabels([]) ax[4].set_xticklabels([]) ax[5].imshow(ssim_org_vs_another_full) ax[5].set_title('Original vs Another Image') ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) plt.tight_layout() plt.show() # # noise = np.ones_like(org_img) * 0.2 * (org_img.max() - org_img.min()) # noise[np.random.random(size=noise.shape) > 0.5] *= -1 # # # # img_noise = org_img + noise # img_const = org_img + abs(noise) # # win_size = 11 # # #image_result = convertToFloat32(image_result) # #org_img_float = convertToFloat32(org_img_float) # #image_defect = convertToFloat32(image_defect) # # # mse_none = mse(org_img, org_img) # ssim_none,ssim_none_full = ssim(org_img, org_img,multichannel=True , data_range=MaxMin(org_img,org_img) # ,gaussian_weights=True,win_size=win_size,full=True) # # ssim_none = compare_ssim( # # generated_img, # # generated_img, # # win_size=11, # # gaussian_weights=True, # # multichannel=True, # # data_range=1.0, # # K1=0.01, # # K2=0.03, # # sigma=1.5) # mse_noise = mse(org_img, img_noise) # ssim_noise,ssim_noise_full = ssim(org_img, img_noise,multichannel=True , data_range=MaxMin(org_img,img_noise) # , gaussian_weights=True,win_size=win_size,full=True) # # ssim_noise = compare_ssim( # # generated_img, # # img_noise, # # win_size=11, # # gaussian_weights=True, # # multichannel=True, # # data_range=1.0, # # K1=0.01, # # K2=0.03, # # sigma=1.5) # mse_const = mse(org_img, img_const) # ssim_const,ssim_const_full = ssim(org_img, img_const,multichannel=True, data_range=MaxMin(org_img,img_const) # ,gaussian_weights=True,win_size=win_size,full=True) # # fig, axes = plt.subplots(nrows=2, ncols=3, # sharex=True, sharey=True) # ax = axes.ravel() # label = 'MSE: {:.2f}, SSIM: {:.2f}' # # ax[0].imshow(_normalize(org_img)) # ax[0].set_xlabel(label.format(mse_none, ssim_none)) # ax[0].set_title('Original image') # ax[0].set_yticklabels([]) # ax[0].set_xticklabels([]) # # ax[1].imshow(_normalize(img_noise)) # ax[1].set_xlabel(label.format(mse_noise, ssim_noise)) # ax[1].set_title('Original vs noise') # ax[1].set_yticklabels([]) # ax[1].set_xticklabels([]) # # ax[2].imshow(_normalize(img_const)) # ax[2].set_xlabel(label.format(mse_const, ssim_const)) # ax[2].set_title('Original vs constant') # ax[2].set_yticklabels([]) # ax[2].set_xticklabels([]) # # ax[3].imshow(ssim_none_full) # ax[3].set_title('Original image') # ax[3].set_yticklabels([]) # ax[3].set_xticklabels([]) # # ax[4].imshow(ssim_noise_full) # ax[4].set_title('Original vs inpainted') # ax[4].set_yticklabels([]) # ax[4].set_xticklabels([]) # # ax[5].imshow(ssim_const_full) # ax[5].set_title('Original vs noneinpaited') # ax[5].set_yticklabels([]) # ax[5].set_xticklabels([]) # # plt.tight_layout() # plt.show() # # # # # # # # norm = np.linalg.norm(org_img) # org_img_copy = org_img/norm # generated_img_copy = generated_img/norm # mse_generated_vs_original = mse(generated_img_copy,org_img_copy) # ssim_generated_vs_original, ssim_image = ssim(generated_img_copy, org_img_copy, # data_range=MaxMin(generated_img_copy,org_img_copy),multichannel=True,gaussian_weights=True,win_size=win_size,full=True) # # ssim_generated_vs_original = compare_ssim( # # generated_img, # # org_img, # # win_size=11, # # gaussian_weights=True, # # multichannel=True, # # data_range=1.0, # # K1=0.01, # # K2=0.03, # # sigma=1.5) # mse_anotherimg_vs_generated= mse(generated_img,another_img) # ssim_anotherimg_vs_generated=ssim(generated_img,another_img,data_range=MaxMin(generated_img,another_img),multichannel=True,gaussian_weights=True,win_size=win_size) # # ssim_anotherimg_vs_generated= compare_ssim( # # generated_img, # # another_img, # # win_size=11, # # gaussian_weights=True, # # multichannel=True, # # data_range=1.0, # # K1=0.01, # # K2=0.03, # # sigma=1.5) # # mse_new_mask = mse(org_img, image_result) # ssim_new_mask= ssim(org_img, image_result, data_range=MaxMin(org_img_float,image_result), # multichannel=True,win_size=win_size) # # ssim_new_mask = compare_ssim( # # org_img_float, # # image_result, # # win_size=11, # # gaussian_weights=True, # # multichannel=True, # # data_range=1.0, # # K1=0.01, # # K2=0.03, # # sigma=1.5) # mse_with_mask = mse(org_img,image_defect) # ssim_with_mask= ssim(org_img, image_defect, data_range=MaxMin(org_img_float,image_defect), # multichannel=True, win_size=win_size) # # ssim_with_mask = compare_ssim( # # org_img_float, # # image_defect, # # win_size=11, # # gaussian_weights=True, # # multichannel=True, # # data_range=1.0, # # K1=0.01, # # K2=0.03, # # sigma=1.5) # # fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(10, 5), # sharex=True, sharey=True) # ax = axes.ravel() # label = 'MSE: {:.2f}, SSIM: {:.2f}' # # ax[0].imshow(_normalize(generated_img)) # ax[0].set_xlabel(label.format(mse_none, ssim_none)) # ax[0].set_title('Generated image') # # ax[1].imshow(_normalize(img_noise)) # ax[1].set_xlabel(label.format(mse_noise, ssim_noise)) # ax[1].set_title('Image with noise') # # ax[2].imshow(_normalize(img_const)) # ax[2].set_xlabel(label.format(mse_const, ssim_const)) # ax[2].set_title('Image plus constant') # # ax[3].imshow(_normalize(org_img)) # ax[3].set_xlabel(label.format(mse_generated_vs_original, ssim_generated_vs_original)) # ax[3].set_title('Original Image') # # ax[4].imshow(_normalize(another_img)) # ax[4].set_xlabel(label.format(mse_anotherimg_vs_generated, ssim_anotherimg_vs_generated)) # ax[4].set_title('A different Image') # # ax[5].imshow(_normalize(image_result)) # ax[5].set_xlabel(label.format(mse_new_mask, ssim_new_mask)) # ax[5].set_title('Original vs Generated with different Mask') # # ax[6].imshow(_normalize(image_defect)) # ax[6].set_xlabel(label.format(mse_with_mask, ssim_with_mask)) # ax[6].set_title('Defected image with no inpainting on') # # ax[7].imshow(ssim_image) # ax[7].set_title('SSIM image') # # plt.tight_layout() # plt.show() win_size = 11 def mse(x, y): return np.linalg.norm(x - y) def MaxMin(a, b): minvalue = min(a.min(),b.min()) maxvalue = max(a.max(),b.max()) return maxvalue-minvalue image_orig = data.astronaut()[0:200, 0:200] # Create mask with three defect regions: left, middle, right respectively mask = np.zeros(image_orig.shape[:-1]) mask[20:60, 0:20] = 1 mask[160:180, 70:155] = 1 mask[30:60, 170:195] = 1 # Defect image over the same region in each color channel image_defect = image_orig.copy() for layer in range(image_defect.shape[-1]): image_defect[np.where(mask)] = 0 image_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True) image_orig = img_as_float(image_orig) image_defect= img_as_float(image_defect) mse_org = mse(image_orig, image_orig) ssim_org,ssim_org_full= ssim(image_orig, image_orig, data_range=MaxMin(image_orig,image_orig), multichannel=True, win_size=win_size, full=True) mse_org_vs_inpaint = mse(image_orig, image_result) ssim_org_vs_inpaint,ssim_org_vs_inpaint_full = ssim(image_orig, image_result, data_range=MaxMin(image_orig,image_result), multichannel=True, win_size=win_size,full=True) mse_org_vs_masked= mse(image_orig, image_defect) ssim_org_vs_masked,ssim_org_vs_masked_full = ssim(image_orig, image_defect, data_range=MaxMin(image_orig,image_defect), multichannel=True, win_size=win_size,full=True) fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True) ax = axes.ravel() label = 'MSE: {:.2f}, SSIM: {:.2f}' ax[0].imshow(image_orig) ax[0].set_xlabel(label.format(mse_org, ssim_org)) ax[0].set_title('Original image') ax[0].set_yticklabels([]) ax[0].set_xticklabels([]) ax[1].imshow(image_result) ax[1].set_xlabel(label.format(mse_org_vs_inpaint, ssim_org_vs_inpaint)) ax[1].set_title('Original vs inpainted') ax[1].set_yticklabels([]) ax[1].set_xticklabels([]) ax[2].imshow(image_defect) ax[2].set_xlabel(label.format(mse_org_vs_masked, ssim_org_vs_masked)) ax[2].set_title('Original vs noneinpaited') ax[2].set_yticklabels([]) ax[2].set_xticklabels([]) ax[3].imshow(ssim_org_full) ax[3].set_title('Original image') ax[3].set_yticklabels([]) ax[3].set_xticklabels([]) ax[4].imshow(ssim_org_vs_inpaint_full) ax[4].set_title('Original vs inpainted') ax[4].set_yticklabels([]) ax[4].set_xticklabels([]) ax[5].imshow(ssim_org_vs_masked_full) ax[5].set_title('Original vs noneinpaited') ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) plt.tight_layout() plt.show() #pathToRidgeImages = Path.joinpath(dataPath, 'Ridgeplot') pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\generated\PartialConvolutions\12_12_2020_18_55_21\Data" curdatLayer = importData(config) images = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages) test = SSIM_SKI.__call__(images[0],images[1]) test2= SSIM_SKI.__call__(convertToFloat32(images[0]),convertToFloat32(images[1])) test3 = SSIM_SKI.__call__((images[0]*0.0255),(images[1]*0.0255)) test4 = SSIM_SKI.__call__(images[0]/4095,images[1]/4095) show_images(images[0]/10000,(images[0]*0.0255),(images[0]*0.0255)) lol =""
def main(args): """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into cleaned dataLayer ready to be analyzed (saved in ../processed). """ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" ## Talk to Rune about how dataLayer is handle. config = TrainingConfig() config = update_config(args,config) ## For polyaxon #if config.run_polyaxon: localdir = Path().absolute().parent dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization') logger = logging.getLogger(__name__) logger.info('making final dataLayer set from raw dataLayer') curdatLayer = importData(config) ## Original pathToNIR = r"E:\Speciale\NDVIExperiment\Croatia\Original_Data\NIR" nir_images = curdatLayer.open_Imagefiles_as_array(pathToNIR) nir_image = nir_images[0] pathtoRGB = r"E:\Speciale\NDVIExperiment\Croatia\Original_Data\RGB" rgb_images=curdatLayer.open_Imagefiles_as_array(pathtoRGB) rgb_image= rgb_images[0] r,g,b = cv2.split(rgb_image) org_ndvi = (nir_image - r) / (nir_image + r) titles = ["Sentinel 2 - Normalized Difference Vegetation Index (NDVI) over Original"] # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html # Turn off bytescale scaling due to float values for NDVI ep.plot_bands(org_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1) ## Inpainted pathToNIR = r"E:\Speciale\NDVIExperiment\Croatia\PartialConvolutions\big_mask\DataNir" nir_images = curdatLayer.open_Imagefiles_as_array(pathToNIR) nir_image = nir_images[0] pathtoRGB = r"E:\Speciale\NDVIExperiment\Croatia\PartialConvolutions\big_mask\Data" rgb_images = curdatLayer.open_Imagefiles_as_array(pathtoRGB) rgb_image = rgb_images[0] r, g, b = cv2.split(rgb_image) gen_ndvi = (nir_image - r) / (nir_image + r) titles = ["Sentinel 2- Normalized Difference Vegetation Index (NDVI) over generated"] # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html # Turn off bytescale scaling due to float values for NDVI ep.plot_bands(gen_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1) diff_ndvi=org_ndvi-gen_ndvi old= sum(gen_ndvi.flatten()) new = sum(org_ndvi.flatten()) diffSumsWithMaria = ((new-old)/old) diff_percent_sum = sum((gen_ndvi.flatten()-org_ndvi.flatten())/org_ndvi.flatten()*100) print("The NDVI have changed " +str(diffSumsWithMaria)+" %") titles = ["Sentinel 2- Normalized Difference Vegetation Index (NDVI) difference"] # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html # Turn off bytescale scaling due to float values for NDVI ep.plot_bands(diff_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1) loadAndAgumentMasks = makeMasks.MaskClass(config, rand_seed=None, evaluation=True) mask = loadAndAgumentMasks.returnMask(787) mask = mask[0, :, :] # Get real and set to GPU #Invert mask = 1-mask # Augment with masks # Check if this applies to all three color channels? gen_ndvi_masked = gen_ndvi.copy() org_ndvi_masked = org_ndvi.copy() for layer in range(gen_ndvi_masked.shape[-1]): gen_ndvi_masked[np.where(mask)] = 0 for layer in range(org_ndvi_masked.shape[-1]): org_ndvi_masked[np.where(mask)] = 0 ep.plot_bands(gen_ndvi_masked, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1) maeValues = [] sddValues = [] ssimscikitValues = [] psnrValues = [] CCValues = [] rmseValues = [] org_ndvi_masked=org_ndvi_masked[org_ndvi_masked!=0] gen_ndvi_masked=gen_ndvi_masked[gen_ndvi_masked!=0] psnrValues.append(PSNR().__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False)) CCValues.append(CC().__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False)) maeValues.append(MSE().__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False)) sddValues.append(SDD.__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False)) #ssimscikitValues.append(SSIM_SKI.__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False)) rmseValues.append(RMSE.__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False)) meanMAE = sum(maeValues) / len(maeValues) minMAE = min(maeValues) maxMAE = max(maeValues) meanSDD = sum(sddValues) / len(sddValues) minSDD = min(sddValues) maxSDD = max(sddValues) meanPSNR = sum(psnrValues) / len(psnrValues) minPSNR = min(psnrValues) maxPSNR = max(psnrValues) meanSSIM = 0 minSSIM = 0 maxSSIM = 0 meanSCISSIM = 0 minSCISSIM = 0 maxSCISSIM = 0 # meanSCISSIM = sum(ssimscikitValues) / len(ssimscikitValues) # minSCISSIM = min(ssimscikitValues) # maxSCISSIM = max(ssimscikitValues) meanCC = sum(CCValues) / len(CCValues) minCC = min(CCValues) maxCC = max(CCValues) meanRMSE = sum(rmseValues) / len(rmseValues) minRMSE = min(rmseValues) maxRMSE = max(rmseValues) FID_Value=0.0 time_ran=0.0 local_store_path=Path(r"E:\Speciale\NDVIExperiment\Croatia") saveEvalToTxt(config.model_name, meanMAE, minMAE, maxMAE, meanSDD, minSDD, maxSDD, meanSSIM, minSSIM, maxSSIM, meanSCISSIM, minSCISSIM, maxSCISSIM, meanPSNR, minPSNR, maxPSNR, meanCC, minCC, maxCC, meanRMSE, minRMSE, maxRMSE, FID_Value, time_ran, local_store_path) # v_nir_image = np.concatenate((nir_images[0], nir_images[1]), axis=1) # v_rgb_image = np.concatenate((rgb_images[0], rgb_images[1]), axis=1) # v_r, v_g, v_b = cv2.split(v_rgb_image) # # v_ndvi = (v_nir_image - v_r) / (v_nir_image + v_r) # titles = ["Sentinel 2- Normalized Difference Vegetation Index (NDVI) over two samples"] # # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html # # Turn off bytescale scaling due to float values for NDVI # ep.plot_bands(v_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1) # nir_images= nir_images/10000 # r = r/10000 # ndvi = (nir_images - r) / (nir_images + r) # # fig = plt.figure(figsize=(10, 10)) # fig.set_facecolor('white') # plt.imshow(ndvi, cmap='RdYlGn') # Typically the color map for NDVI maps are the Red to Yellow to Green # plt.title('NDVI') # plt.show() # lol = ""
def main(args): """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into cleaned dataLayer ready to be analyzed (saved in ../processed). """ ## Talk to Rune about how dataLayer is handle. config = TrainingConfig() config = update_config(args, config) ## For polyaxon if config.run_polyaxon: input_root_path = Path(get_data_paths()['data']) output_root_path = Path(get_outputs_path()) inpainting_data_path = input_root_path / 'inpainting' os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache') config.data_path = inpainting_data_path config.output_path = output_root_path config.polyaxon_experiment = Experiment() logger = logging.getLogger(__name__) logger.info('making final dataLayer set from raw dataLayer') curdatLayer = importData(config) if config.nir_data: train, test_dataloader = curdatLayer.getNIRDataLoader() else: train, test_dataloader = curdatLayer.getRGBDataLoader() local_model_path = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\OutputModels\PartialConvolutionsWgan_301.pt" local_output_path = Path(r"E:\Speciale\final_model") #gen = Wgangenerator().to(config.device) if config.nir_data: gen = generatorNIR().to(config.device) else: gen = generator().to(config.device) gen.load_state_dict( torch.load(local_model_path)) ## Use epochs to identify model number gen.eval() loadAndAgumentMasks = makeMasks.MaskClass(config, rand_seed=None, evaluation=True, noFlip=True) names = [] # Find names of test images, in order to save the generated files with same name, for further reference localImg = test_dataloader.dataset.image_list # Slice string to only include the name of the file, ie after the last // localNames = [] # if self.config.run_polyaxon: # split_path = localImg[0].split('/') ##Linux # else: # split_path = localImg[0].split("\\") # local_index= split_path.index('processed') # local_country= split_path[local_index+1] for i in localImg: if config.run_polyaxon: selected_image = i.split('/')[-1] ##Linux else: selected_image = i.split("\\")[-1] localNames.append(selected_image) names = names + localNames print("Found this many names " + str(len(names))) current_number = 0 if not os.path.exists(Path.joinpath(local_output_path, config.model_name)): os.makedirs(Path.joinpath(local_output_path, config.model_name)) now = datetime.now() dt_string = now.strftime("%d_%m_%Y_%H_%M_%S") local_test_path = local_output_path / config.model_name / dt_string / 'Data' local_test_nir_path = local_output_path / config.model_name / dt_string / 'DataNir' local_store_path = local_output_path / config.model_name / dt_string / 'stored_Data' os.makedirs(local_test_path) if config.nir_data: os.makedirs(local_test_nir_path) start_time = datetime.now() for real in tqdm(test_dataloader, disable=config.run_polyaxon): masks = loadAndAgumentMasks.returnTensorMasks(config.batch_size) masks = torch.from_numpy(masks) masks = masks.type(torch.cuda.FloatTensor) masks = 1 - masks masks.to(config.device) real = real.to(config.device) fake_masked_images = torch.mul(real, masks) generated_images = gen(fake_masked_images, masks) image_names = names[current_number:current_number + config.batch_size] current_number = current_number + config.batch_size ## Change naming to include all names # modelHelper.save_tensor_batch(generated_images,fake_masked_images,config.batch_size,path) if config.nir_data: for index, image in enumerate(generated_images): namePath = Path.joinpath(local_test_path, image_names[index]) if config.nir_data: modelHelper.save_tensor_single_NIR( image, Path.joinpath(local_test_path, image_names[index]), Path.joinpath(local_test_nir_path, image_names[index]), raw=True) else: modelHelper.save_tensor_batch( real, fake_masked_images, generated_images, config.batch_size, Path.joinpath(local_test_path, "_final_model_" + str(current_number))) current_number = current_number + 1 end_time = datetime.now()
def main(args): """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into cleaned dataLayer ready to be analyzed (saved in ../processed). """ ## Talk to Rune about how dataLayer is handle. config = TrainingConfig() config = update_config(args,config) logger = logging.getLogger(__name__) logger.info('making final dataLayer set from raw dataLayer') ## For polyaxon if config.run_polyaxon: input_root_path = Path(get_data_paths()['data']) output_root_path = Path(get_outputs_path()) inpainting_data_path = input_root_path / 'inpainting' os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache') config.data_path = inpainting_data_path config.output_path = output_root_path config.polyaxon_experiment = Experiment() curdatLayer = importData(config) train_array,names = curdatLayer.get_images_for_baseLine() print("Total test in baseline " +str(len(train_array))) print("Total test names in baseline" +str(len(names))) train_dataloader,test_dataloader = curdatLayer.getRGBDataLoader() local_train_array = [] for i in train_array: local_train_array.append(convertToFloat32(i)) train_array = local_train_array curBaseLineModel = baselineModel(train_array,names,config) pathToGenerated, time_ran = curBaseLineModel.baselineExperiment() #pathToGenerated = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\generated\test_baseLine\22_11_2020_13_01_28" if config.run_polyaxon: pathToEval=config.output_path /'evalMetrics' else: pathToEval = Path().absolute().parent / 'models' # create dataloader with generated images generated_images_dataloader = curdatLayer.getGeneratedImagesDataloader(pathToGenerated) print(str(pathToGenerated) + "is the generated image path") # calculate FID #missing gen print("Done with generating images") FID_Value = FIDCalculator(test_dataloader, generated_images_dataloader, len(test_dataloader) * config.batch_size, config.batch_size,config).get_FID_scores() # Calculate PSNR and SSIM dataloader_iterator = iter(generated_images_dataloader) maeValues = [] sddValues = [] ssimscikitValues= [] SSIMValues = [] psnrValues = [] CCValues = [] rmseValues = [] # loop to calculate PSNR and SSIM for all test and generated images. for images_real in test_dataloader: try: images_generated = next(dataloader_iterator) except StopIteration: dataloader_iterator = iter(generated_images_dataloader) images_generated = next(dataloader_iterator) for index2 in range(config.batch_size): psnrValues.append(PSNR().__call__(images_real[index2], images_generated[index2])) CCValues.append(CC().__call__(images_real[index2], images_generated[index2])) maeValues.append(MSE().__call__(images_real[index2], images_generated[index2])) sddValues.append(SDD.__call__(images_real[index2], images_generated[index2])) ssimscikitValues.append(SSIM_SKI.__call__(images_real[index2], images_generated[index2])) image1 = images_real[index2].unsqueeze(0) image2 = images_generated[index2].unsqueeze(0) SSIMValues.append(ssim(image1, image2)) rmseValues.append(RMSE.__call__(images_real[index2], images_generated[index2])) meanMAE = sum(maeValues) / len(maeValues) minMAE = min(maeValues) maxMAE = max(maeValues) meanSDD = sum(sddValues) / len(sddValues) minSDD = min(sddValues) maxSDD = max(sddValues) meanPSNR = sum(psnrValues) / len(psnrValues) minPSNR = min(psnrValues) maxPSNR = max(psnrValues) meanSSIM = sum(SSIMValues) / len(SSIMValues) minSSIM = min(SSIMValues) maxSSIM = max(SSIMValues) meanSCISSIM = sum(ssimscikitValues) / len(ssimscikitValues) minSCISSIM = min(ssimscikitValues) maxSCISSIM = max(ssimscikitValues) meanCC = sum(CCValues) / len(CCValues) minCC = min(CCValues) maxCC = max(CCValues) meanRMSE = sum(rmseValues) / len(rmseValues) minRMSE = min(rmseValues) maxRMSE = max(rmseValues) # Save final results of evaluation metrics FID = FID_Value if not pathToEval.parent.exists(): pathToEval.parent.mkdir() #saveEvalToTxt(config.model_name,meanPSNR.item(),minPSNR,maxPSNR,meanSSIM.item(), minSSIM,maxSSIM ,FID ,time, pathToEval) saveEvalToTxt(config.model_name, meanMAE, minMAE, maxMAE, meanSDD, minSDD, maxSDD, meanSSIM.item(), minSSIM.item(), maxSSIM.item(),meanSCISSIM,minSCISSIM,maxSCISSIM, meanPSNR, minPSNR, maxPSNR, meanCC, minCC, maxCC, meanRMSE, minRMSE, maxRMSE, FID_Value, time_ran, pathToEval)