def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args, config)
    ## For polyaxon

    if config.run_polyaxon:
        input_root_path = Path(get_data_paths()['data'])  #'data'
        output_root_path = Path(get_outputs_path())
        inpainting_data_path = input_root_path / 'inpainting'
        os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache')
        config.data_path = inpainting_data_path
        config.output_path = output_root_path
        config.polyaxon_experiment = Experiment()
        pathToData = str(input_root_path /
                         '/workspace/data_landset8/testImages')
    else:
        pathToData = Path(r"C:\Users\Morten From\PycharmProjects\testDAta")

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')
    logger.info(pathToData)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    B_size = 1
    beta_test_path_list = glob(str(pathToData) + "/*/")
    ImageDict = get_dataset(beta_test_path_list, batch_size=B_size)
    train = ImageDict['train_dataloader']
    test = ImageDict['test_dataloader']

    genPath = r'C:\Users\Morten From\PycharmProjects\Speciale\Master_Satelite_Image_Inpainting\models\New_400.pth'
    outputPathImages = Path(
        r'C:\Users\Morten From\PycharmProjects\Speciale\Master_Satelite_Image_Inpainting\images'
    )
    testGen = UnetGenerator(3, 3, 8)
    testGen.load_state_dict(torch.load(genPath))
    testGen = testGen.to(device)

    testGen.eval()
    iterater = 0
    for real, SAR in tqdm(train, position=0, leave=True, disable=True):
        batchOfImages = real.to(device)
        batchOfImagesSAR = SAR.to(device)
        outputs = testGen(batchOfImagesSAR)
        modelHelper.save_tensor_batchSAR(
            batchOfImages, batchOfImagesSAR, outputs, B_size,
            Path.joinpath(outputPathImages, 'iter' + str(iterater)))
        iterater = iterater + 1
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args, config)
    ## For polyaxon
    #if config.run_polyaxon:
    localdir = Path().absolute().parent
    dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization')

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')
    #flatten all images used in ridgeplot so their pixel values go in one column
    curdatLayer = importData(config)

    #pathToRidgeImages = Path.joinpath(dataPath, 'Ridgeplot')
    pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\test\Turkey\T37SCC_20200729T081609\bandTCIRGB\Test\NIRImages\original_0NIR"
    images_test = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages)
    pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\test\Turkey\T37SCC_20200729T081609\bandTCIRGB\Train\NIRImages\original_0NIR"
    images_train = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages)
    images = images_test + images_train
    import seaborn as sns
    sns.set(font_scale=2.5)
    plt.rcParams.update({'font.size': 26})
    import cv2

    df = []

    for i in images:
        i = i.flatten()
        df.append(pd.DataFrame(i, columns=['NIR']))
    d = {'color': ['r']}
    df_merged = pd.concat(df)
    axes = df_merged.plot(kind='hist',
                          subplots=True,
                          layout=(1, 1),
                          bins=200,
                          color=['r'],
                          yticks=[],
                          sharey=True,
                          sharex=True)
    axes[0, 0].yaxis.set_visible(False)
    fig = axes[0, 0].figure
    fig.text(0.5, 0.04, "Pixel Value", ha="center", va="center")
    fig.text(0.05,
             0.5,
             "Pixel frequency",
             ha="center",
             va="center",
             rotation=90)
    #plt.xlim(0, 4000)
    plt.show()

    STOP = True
def main(args):
    config = TrainingConfig()
    config = update_config(args, config)
    logger = logging.getLogger(__name__)

    if config.run_polyaxon:
        input_root_path = Path(get_data_paths()['data'])
        output_root_path = Path(get_outputs_path())
        inpainting_data_path = input_root_path / 'inpainting'
        os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache')
        config.data_path = inpainting_data_path
        config.output_path = output_root_path
        model_path = inpainting_data_path / 'models'
        modelOutputPath = Path.joinpath(model_path, 'OutputModels')
        stores_output_path = config.output_path / 'data' / 'storedData'
    else:
        localdir = Path().absolute().parent
        modelOutputPath = Path.joinpath(localdir, 'OutputModels')
        stores_output_path = localdir / 'data' / 'storedData'
    #Import test data
    test = eval_model(config)
    test.run_eval(modelOutputPath, stores_output_path)
Example #4
0
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle. If it should be part of the "big" project.
    ## set number_tiles:1764
    config = TrainingConfig()
    config = update_config(args, config)
    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    curprepRBGdata = prepRGBdata(config)
    curprepRBGdata.prepAllInRaw()
Example #5
0
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args,config)
    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    curdatLayer = importData(config)
    train, names = curdatLayer.get_images_for_baseLine()
    curBaseLineModel = baselineModel(train,names,config)
    curBaseLineModel.baselineExperiment()
Example #6
0
def update_config(args,config):
    # Instantiate the parser
    d = dict(arg.split(':') for arg in args)
    c = config.__dict__
    for key in d.keys():
        if key in c.keys():
            newValue = d[key]
            localType = c[key]
            localType = type(localType)
            if localType==int:
                c[key] = int(newValue)
            elif localType==float:
                c[key]=float(newValue)
            elif localType==bool:
                if newValue=='False':
                    c[key] = False
                else :
                    c[key] =True
            else:
                c[key]=newValue
    new_config = TrainingConfig(**c)
    return new_config
Example #7
0
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args, config)
    ## For polyaxon
    #if config.run_polyaxon:
    localdir = Path().absolute().parent
    dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization')

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    channelName = ["R", "G", "B"]
    #flatten all images used in ridgeplot so their pixel values go in one column
    curdatLayer = importData(config)

    #pathToRidgeImages = Path.joinpath(dataPath, 'Ridgeplot')
    pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\processed\Belarus\T35UNB_20200617T092029\bandTCIRGB\Test\RGBImages\original_0RGB"
    images = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages)

    # tuple to select colors of each channel line
    colors = ("r", "g", "b")
    channel_ids = (0, 1, 2)

    labels = []
    ridgeImages = []
    Rband = [np.arange(40000).flatten(), np.zeros((40000))]
    Gband = [np.arange(40000).flatten(), np.zeros((40000))]
    Bband = [np.arange(40000).flatten(), np.zeros((40000))]
    #Tag alle billeder, læg R kanal i en, G kanal i en, B kanal i en
    #med label Danmark_Rød, Danmark_grøn...
    #for i in range(len(images)):
    #    #for each image, put the channels with correct name into the ridgeimage and labels

    #    RGB = np.split(images[i], 3, axis=2)
    #    for j in range(3):
    #        ridgeImages.extend(RGB[j].ravel())
    #        labels.extend(np.tile(channelName[j], len(RGB[j].ravel())))
    for i in range(len(images)):
        RGB = np.split(images[i], 3, axis=2)
        uniqueR = np.unique(RGB[0], return_counts=True)
        uniqueG = np.unique(RGB[1], return_counts=True)
        uniqueB = np.unique(RGB[2], return_counts=True)
        Rband[1][uniqueR[0]] = uniqueR[1]
        Gband[1][uniqueG[0]] = uniqueG[1]
        Bband[1][uniqueB[0]] = uniqueB[1]

    #df = pd.DataFrame({'Rband': Rband[1],'Gband': Gband[1],'Bband': Bband[1]})
    dfs = []
    dfs.append(
        pd.DataFrame({
            'band': Rband[1],
            'index': Rband[0],
            'ChannelName': 'redBand'
        }))

    dfs.append(
        pd.DataFrame({
            'band': Gband[1],
            'index': Gband[0],
            'ChannelName': 'greenBand'
        }))

    dfs.append(
        pd.DataFrame({
            'band': Bband[1],
            'index': Bband[0],
            'ChannelName': 'blueBand'
        }))
    df2 = pd.concat(dfs, axis=0)
    bandsNew = []
    bandsNew.extend(Rband[1])
    bandsNew.extend(Gband[1])
    bandsNew.extend(Bband[1])
    #df = pd.DataFrame(dict(Pixel_Values=images, g=labels))
    plotting = RidgePlot().__call__(DataFrame=df2,
                                    Bands=bandsNew,
                                    Names=channelName)
Example #8
0
def main():
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    # config = update_config(args,config)
    ## For polyaxon

    config.epochs = 501
    config.run_polyaxon = True
    config.batch_size = 8
    config.lr = 0.0002
    config.save_model_step = 100
    config.n_critic = 2
    config.model_name = 'PartialConvolutionsWgan'

    # Test parametre vi kører med, som normalt sættes i experiments
    if config.run_polyaxon:
        # The POLYAXON_NO_OP env variable had to be set before any Polyaxon imports were allowed to happen
        from polyaxon import tracking
        tracking.init()
        input_root_path = Path(
            r'/data/inpainting/data_landset8/Test_dataset/Betaset')
        cache_path = Path('/cache')
        output_root_path = Path(tracking.get_outputs_path())
        pathToData = input_root_path  ## Delete later HACK
        inpainting_data_path = input_root_path / 'inpainting'
        # Set PyTorch to use the data directory for caching pre-trained models. If this is not done, each experiment
        # will download the pre-trained model and store it in each individual experiment container, thereby wasting
        # large amounts of disk space.
        # Code is from here: https://stackoverflow.com/a/52784628
        os.environ['TORCH_HOME'] = str(
            cache_path / 'pytorch_cache')  # setting the environment variable

        config.output_path = Path(os.getcwd()).joinpath('outputs')
        config.data_path = Path(r'/data/inpainting/')
        config.polyaxon_tracking = tracking
    if not config.run_polyaxon:
        os.environ['POLYAXON_NO_OP'] = 'true'
    # Setup Polyaxon (import must be done here as the POLYAXON_NO_OP variable was set inside Python)

    beta_test_path_list = glob(str(pathToData) + "/*/")

    # S1A_20201005_034656_DSC_109_RGBsar_cog.tif
    # S2B_MSIL2A_20201002T090719_N0214_R050_T35TMH_20201002T113443_B02_cog
    # S2B_MSIL2A_20201002T090719_N0214_R050_T35TMH_20201002T113443_B03_cog.tif
    # S2B_MSIL2A_20201002T090719_N0214_R050_T35TMH_20201002T113443_B04_cog.tif

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    logger.info(pathToData)

    ImageDict = get_dataset(beta_test_path_list, batch_size=config.batch_size)
    train = ImageDict['train_dataloader']
    test = ImageDict['test_dataloader']

    # Kører på WGAN GP
    if config.model_name == 'PartialConvolutions':
        curtraingModel = trainInpaintingWgan(train, test, generator,
                                             criticWgan, config)
        local_model_path = curtraingModel.trainGAN()
    elif config.model_name == 'PartialConvolutionsWgan':
        curtraingModel = trainInpaintingWgan(train, test, generator,
                                             criticWgan, config)
        local_model_path = curtraingModel.trainGAN()

    # local_model_path = Path(r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\OutputModels\PartialConvolutionsWgan_200.pt")
    if config.run_polyaxon:
        model_path = inpainting_data_path / 'models'
        modelOutputPath = Path.joinpath(model_path, 'OutputModels')
        stores_output_path = config.output_path / 'data' / 'storedData'
    else:
        localdir = Path().absolute().parent
        modelOutputPath = Path.joinpath(localdir, 'OutputModels')
        stores_output_path = localdir / 'data' / 'storedData'

    curevalModel = eval_model(config)
    curevalModel.run_eval(modelOutputPath,
                          stores_output_path,
                          model_path=local_model_path,
                          test_dataloader=test)
Example #9
0
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args, config)
    ## For polyaxon

    if config.run_polyaxon:
        input_root_path = Path(get_data_paths()['data'])  #'data'
        output_root_path = Path(get_outputs_path())
        inpainting_data_path = input_root_path / 'inpainting'
        os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache')
        config.data_path = inpainting_data_path
        config.output_path = output_root_path
        config.polyaxon_experiment = Experiment()

        pathToData = str(input_root_path /
                         '/workspace/data_landset8/testImages')
    else:
        pathToData = Path(r"C:\Users\Morten From\PycharmProjects\testDAta")

    testPathData = Path(
        r'/workspace/data_landset8/unzipped/GrassCrops/BC/LC81820302014180LGN00'
    )

    #S1A_20201005_034656_DSC_109_RGBsar_cog.tif
    #S2B_MSIL2A_20201002T090719_N0214_R050_T35TMH_20201002T113443_B02_cog
    #S2B_MSIL2A_20201002T090719_N0214_R050_T35TMH_20201002T113443_B03_cog.tif
    #S2B_MSIL2A_20201002T090719_N0214_R050_T35TMH_20201002T113443_B04_cog.tif

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    logger.info(pathToData)

    ImageDict = get_dataset(pathToData, batch_size=config.batch_size)
    train = ImageDict['train_dataloader']
    test = ImageDict['test_dataloader']

    #Kører begge på Wgan loop lige nu
    if config.model_name == 'PartialConvolutions':
        curtraingModel = trainInpaintingWgan(train, test, generator,
                                             discriminator, config)
        local_model_path = curtraingModel.trainGAN()
    elif config.model_name == 'PartialConvolutionsWgan':
        curtraingModel = trainInpaintingWgan(train, test, generator,
                                             criticWgan, config)
        local_model_path = curtraingModel.trainGAN()
    #local_model_path = Path(r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\OutputModels\PartialConvolutionsWgan_200.pt")
    if config.run_polyaxon:
        model_path = inpainting_data_path / 'models'
        modelOutputPath = Path.joinpath(model_path, 'OutputModels')
        stores_output_path = config.output_path / 'data' / 'storedData'
    else:
        localdir = Path().absolute().parent
        modelOutputPath = Path.joinpath(localdir, 'OutputModels')
        stores_output_path = localdir / 'data' / 'storedData'
    curevalModel = eval_model(config)
    curevalModel.run_eval(modelOutputPath,
                          stores_output_path,
                          model_path=local_model_path,
                          test_dataloader=test)
            saved_data = {"ROI": pts}
            joblib.dump(value=saved_data, filename="config.pkl")
            break
    cv2.destroyAllWindows()
    mask = np.zeros(img.shape, np.uint8)
    points = np.array(pts, np.int32)
    points = points.reshape((-1, 1, 2))
    mask = cv2.polylines(mask, [points], True, (255, 255, 255), 2)
    mask2 = cv2.fillPoly(mask.copy(), [points], (255, 255, 255))
    mask2[mask2 == 0] = 1
    mask2[mask2 == 255] = 0

    img = cv2.imread(
        r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\processed\Remove_cloud\T35UNB_20200617T092029\bandTCIRGB\Test\RGBImages\original_0RGB\T35UNB_20200617T092029_test_1450.tiff",
        -1)
    config = TrainingConfig()
    image = convertToFloat32(img)
    image = remove_outliers(image)
    image = torch.from_numpy(np.array(image).astype(np.float32)).transpose(
        0, 1).transpose(0, 2).contiguous()
    image = image.to(config.device)
    masks = torch.from_numpy(mask2).transpose(0, 1).transpose(0,
                                                              2).contiguous()
    masks = masks.type(torch.cuda.FloatTensor)
    #masks = 1 - masks
    masks.to(config.device)

    # Load the model
    gen = generator().to(config.device)
    gen.load_state_dict(
        torch.load(model_path))  ## Use epochs to identify model number
def main(args):
    config = TrainingConfig()
    config = update_config(args, config)
    logger = logging.getLogger(__name__)

    if config.run_polyaxon:
        input_root_path = Path(get_data_paths()['data'])
        output_root_path = Path(get_outputs_path())
        inpainting_data_path = input_root_path / 'inpainting'
        os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache')
        config.data_path=inpainting_data_path
        config.output_path=output_root_path
        imageOutputPath = config.data_path /'data' /'generated'
        model_path =inpainting_data_path /'models'
        modelOutputPath = Path.joinpath(model_path, 'OutputModels')
        stores_output_path = config.output_path /'data'/'storedData'
    else:
        imageOutputPath = Path().absolute().parent /'data' /'generated'
        localdir = Path().absolute().parent
        modelOutputPath = Path.joinpath(localdir, 'OutputModels')
        stores_output_path = localdir /'data'/'storedData'
    #Import test data
    test = eval_model(config)
    test.run_eval(modelOutputPath,stores_output_path)
    curdatLayer = importData(config)

    train, test_dataloader = curdatLayer.getRGBDataLoader()
    del train
    test = Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt')
    print(Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt'))
    if Path.exists(Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt')):
        ##Hvis det er med wgan generator, altså layernorm, indsæt Wgangenerator istedet for generator()
        gen = generator().to(config.device)
        gen.load_state_dict(torch.load(Path.joinpath(modelOutputPath, config.model_name + '_'+str(config.epochs) + '.pt'))) ## Use epochs to identify model number
    else:
        print("Unable to find path to model")
    gen.eval()

    loadAndAgumentMasks = makeMasks.MaskClass(config,rand_seed=None,evaluation=True)
    names = []
    for i in range(len(test_dataloader.dataset.datasets)):
        # Find names of test images, in order to save the generated files with same name, for further reference
        localImg = test_dataloader.dataset.datasets[i].image_list
        # Slice string to only include the name of the file, ie after the last //
        localNames = []
        for i in localImg:
            if config.run_polyaxon:
                selected_image=i.split('/')[-1] ##Linux
            else:
                selected_image=i.split("\\")[-1]
            localNames.append(selected_image)
        names=names+localNames
    print("Found this many names "+str(len(names)))


    current_number = 0

    if not os.path.exists(Path.joinpath(imageOutputPath, config.model_name)):
        os.makedirs(Path.joinpath(imageOutputPath, config.model_name))

    now = datetime.now()
    dt_string = now.strftime("%d_%m_%Y_%H_%M_%S")

    local_test_path= imageOutputPath / config.model_name /  dt_string /'Data'
    local_store_path = stores_output_path / config.model_name /  dt_string /'stored_Data'
    os.makedirs(local_test_path)
    os.makedirs(local_store_path)
    start_time = datetime.now()
    testCount = 3
    for real in tqdm(test_dataloader):
        masks = loadAndAgumentMasks.returnTensorMasks(config.batch_size)
        masks = torch.from_numpy(masks)
        masks = masks.type(torch.cuda.FloatTensor)
        masks = 1 - masks
        masks.to(config.device)

        real = real.to(config.device)
        fake_masked_images = torch.mul(real, masks)
        generated_images = gen(fake_masked_images, masks)
        image_names = names[current_number:current_number+config.batch_size]
        current_number = current_number + config.batch_size ## Change naming to include all names
        #modelHelper.save_tensor_batch(generated_images,fake_masked_images,config.batch_size,path)
        for index, image in enumerate(generated_images):
            namePath= Path.joinpath(local_test_path,image_names[index])
            modelHelper.save_tensor_single(image,Path.joinpath(local_test_path, image_names[index]),raw=True)
        if testCount<0:
            break
        testCount = testCount-1
        print("Saved image to " +str(local_test_path))
    end_time = datetime.now()
    time_ran = str(end_time - start_time)
    #create dataloader with generated images
    generated_images_dataloader = curdatLayer.getGeneratedImagesDataloader(local_test_path)

    #calculate FID
    FID_Value = FIDCalculator(test_dataloader,generated_images_dataloader, len(test_dataloader)*config.batch_size, config.batch_size,config).get_FID_scores()

    #Calculate PSNR and SSIM
    dataloader_iterator = iter(generated_images_dataloader)
    psnrValues = []
    maeValues = []
    sddValues=[]
    ##ssimValues=[]
    SSIMValues = []
    CCValues = []
    #loop to calculate PSNR and SSIM for all test and generated images.
    for images_real in test_dataloader:
        try:
            images_generated  = next(dataloader_iterator)
        except StopIteration:
            dataloader_iterator  = iter(generated_images_dataloader)
            images_generated = next(dataloader_iterator)

        for index2 in range(config.batch_size):
            psnrValues.append(PSNR().__call__(images_real[index2], images_generated[index2]))
            ##CCValues.append(CC().__call__(images_real[index2], images_generated[index2]))
            maeValues.append(MSE().__call__(images_real[index2], images_generated[index2]))
            sddValues.append(SDD.__call__(images_real[index2], images_generated[index2]))
            ##ssimValues.append(SSIM.__call__(images_real[index2], images_generated[index2]))
            image1 = images_real[index2].unsqueeze(0)
            image2 = images_generated[index2].unsqueeze(0)
            SSIMValues.append(ssim(image1, image2))
        break
    meanMAE= sum(maeValues)/len(maeValues)
    minMAE = min(maeValues)
    maxMAE = max(maeValues)

    meanSDD = sum(sddValues) / len(sddValues)
    minSDD = min(sddValues)
    maxSDD = max(sddValues)

    meanPSNR = sum(psnrValues)/len(psnrValues)
    minPSNR = min(psnrValues)
    maxPSNR = max(psnrValues)

    meanSSIM = sum(SSIMValues) / len(SSIMValues)
    minSSIM = min(SSIMValues)
    maxSSIM = max(SSIMValues)

    meanCC = sum(CCValues) / len(CCValues)
    minCC = min(CCValues)
    maxCC = max(CCValues)

    #Save final results of evaluation metrics
    saveEvalToTxt(config.model_name,meanMAE,minMAE,maxMAE,meanSSIM,minSDD,maxSDD,meanSSIM,minSSIM,maxSSIM,PSNR.item(),minPSNR.item(),maxPSNR.item(),meanCC.item(),minCC.item(),maxCC.item(),FID_Value,time_ran, local_store_path)
    #Clean
    modelHelper.clearFolder(local_test_path.parent)
Example #12
0
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle. If it should be part of the "big" project.
    ## set number_tiles:1764
    config = TrainingConfig()
    config = update_config(args, config)
    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')
    userTuple = [['pandagud', 'damp4ever'], ['pandagud2', 'damp4ever'],
                 ['pandagud3', 'damp4ever'], ['au524478', 'Palantir1234']]
    current_user = random.choice(userTuple)

    api = SentinelAPI(current_user[0], current_user[1],
                      'https://scihub.copernicus.eu/dhus')

    # search by polygon, time, and SciHub query keywords
    path = r"C:\Users\panda\Downloads\LC80290292014132LGN00.geojson"
    footprint = geojson_to_wkt(read_geojson(path))
    products = api.query(area=footprint,
                         date=('20210101', '20210105'),
                         platformname='Sentinel-2',
                         order_by='+ingestiondate',
                         limit=1)
    areas = api.to_geodataframe(products)
    geojson = api.to_geojson(products)
    api.download_all(products, into=r'C:\Users\panda\Sat_paper\Alfa')

    products = api.query(area=footprint,
                         date=('20210401', '20210430'),
                         producttype='GRD',
                         platformname='Sentinel-1',
                         sensoroperationalmode='IW',
                         polarisationmode='VV VH',
                         order_by='ingestiondate')
    firstproduct = next(iter(products))
    online_product = ''
    for i in products:
        is_online = api.is_online(products.get(i).get('uuid'))
        if is_online:
            online_product = i
            break
    delete_list = []
    for i in products:
        if i != online_product:
            delete_list.append(i)
    for i in delete_list:
        del products[i]

    ground_geojsons = read_geojson(path)
    products_geojsons = api.to_geojson(products)

    ground_polygon = ground_geojsons.get('features')[0].get('geometry').get(
        'coordinates')
    ground_polygon = geometry.Polygon(ground_polygon[0][0])
    import numpy as np
    titles = []
    ids = []
    for item in products_geojsons.get('features'):
        id = item.get('properties').get('id')
        item = item.get('properties').get('title')
        item = (item[17:25] + item[48:55])
        titles.append(item)
        ids.append([item, id])
    unique = list(set(titles))
    union_list = []
    for i, element in enumerate(unique):
        local_polygon = Polygon()
        for j in range(len(titles)):
            if titles[j] == element:
                item = products_geojsons.get('features')[j]
                item = item.get('geometry').get('coordinates')
                item = geometry.Polygon(item[0][0])
                item = affinity.scale(item, xfact=1.01, yfact=1.01)
                polygons = [item, local_polygon]
                local_polygons = unary_union(polygons)
                local_polygon = item
        union_list.append([local_polygons, element])
    for index, element in enumerate(union_list):
        wkt = element[0].wkt
        if ground_polygon.within(element[0]):
            found_id = element[1]
            break
    for i in ids:
        if found_id != i[0]:
            del products[i[1]]
    area_list = []
    for index, item in enumerate(products_geojsons.get('features')):
        item = item.get('geometry').get('coordinates')
        item = geometry.Polygon(item[0][0])
        local_intersection = item.intersection(ground_polygon)
        local_intersection = [local_intersection.area, index]
        area_list.append(local_intersection)
    area_list.sort(reverse=True)
    for index in range(len(area_list)):
        item = products_geojsons.get('features')[area_list[index][1]]
        id = item.get('properties').get('id')
        item = item.get('geometry').get('coordinates')
        item = geometry.Polygon(item[0][0])
        if item.intersects(ground_polygon):
            local_intersection = ground_polygon.intersection(item)
            print(str(ground_polygon.area))
            print(str(local_intersection.area))
            # ground_polygon = ground_polygon.difference(local_intersection)
            ground_polygon = (ground_polygon.symmetric_difference(
                local_intersection)).difference(local_intersection)
        else:
            del products[id]
    import datetime
    from datetime import timedelta
    S2_geojson = read_geojson(path)

    start_S1_date = S2_geojson.get('features')[0].get('properties').get(
        'ingestiondate')
    start_S1_date = start_S1_date.split('T')[0]
    start_S1_date = datetime.datetime.strptime(start_S1_date,
                                               '%Y-%m-%d').date()
    ## New end date for S1
    end_S1_date = start_S1_date + timedelta(days=7)
    start_S1_date = start_S1_date - timedelta(days=7)
    start_S1_date_str = str(start_S1_date).replace('-', '')
    end_S1_date_str = str(end_S1_date).replace('-', '')

    ## COMBINE FOOTPRINT
    geom_in_geojson = []
    geom_in_geojson.append(
        geojson.Feature(geometry=ground_polygon,
                        properties={"MissingData": "Test"}))
    feature_collection = FeatureCollection(geom_in_geojson)
    pathToFile = r'C:\Users\panda\Sat_paper\missing.geojson'
    with open(pathToFile, 'w') as f:
        dump(feature_collection, f)

    print("Done")
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args,config)
    ## For polyaxon
    #if config.run_polyaxon:
    localdir = Path().absolute().parent
    dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization')

    def mse(x, y):
        return np.linalg.norm(x - y)

    def MaxMin(a, b):
        minvalue = min(a.min(), b.min())
        maxvalue = max(a.max(), b.max())
        return maxvalue - minvalue
    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')
    plt.rcParams.update({'font.size': 18})
    path_generated = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\generated\Base_Line\18_12_2020_09_58_50\Data"
    curdatLayer = importData(config)
    generated_images = curdatLayer.open_Imagefiles_as_array(path_generated)
    generated_img = generated_images[0]
    path_real = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\processed\Belarus\T35UNB_20200617T092029\bandTCIRGB\Test\RGBImages\original_0RGB"
    original_images = curdatLayer.open_Imagefiles_as_array(path_real)
    org_img = original_images[0]
    another_img=original_images[1]
    win_size=11

    generated_img = convertToFloat32(generated_img)
    org_img = convertToFloat32(org_img)
    another_img = convertToFloat32(another_img)
    generated_img_copy = generated_img.copy()
    # generated_img = remove_outliers(generated_img)
    org_img_copy = org_img.copy()
    # org_img = remove_outliers(org_img)
    another_img_copy=another_img.copy()
    # another_img = remove_outliers(another_img)
    #
    mse_org = mse(org_img, org_img)
    ssim_org, ssim_org_full = ssim(org_img, org_img, data_range=MaxMin(org_img,org_img),
                    multichannel=True, win_size=win_size,full=True)

    mse_org_vs_inpaint = mse(org_img, generated_img)
    ssim_org_vs_inpaint,ssim_org_vs_inpaint_full = ssim(org_img, generated_img, data_range=MaxMin(org_img,generated_img),
                               multichannel=True, win_size=win_size,full=True)

    mse_org_vs_another = mse(org_img, another_img)
    ssim_org_vs_another,ssim_org_vs_another_full = ssim(org_img, another_img, data_range=MaxMin(org_img,another_img),
                               multichannel=True, win_size=win_size,full=True)


    fig, axes = plt.subplots(nrows=2, ncols=3,
                             sharex=True, sharey=True)
    ax = axes.ravel()
    label = 'MSE: {:.2f}, SSIM: {:.2f}'

    ax[0].imshow(_normalize(org_img_copy))
    ax[0].set_xlabel(label.format(mse_org, ssim_org))
    ax[0].set_title('Original image')
    ax[0].set_yticklabels([])
    ax[0].set_xticklabels([])

    ax[1].imshow(_normalize(generated_img_copy))
    ax[1].set_xlabel(label.format(mse_org_vs_inpaint, ssim_org_vs_inpaint))
    ax[1].set_title('Original vs inpainted')
    ax[1].set_yticklabels([])
    ax[1].set_xticklabels([])

    ax[2].imshow(_normalize(another_img_copy))
    ax[2].set_xlabel(label.format(mse_org_vs_another, ssim_org_vs_another))
    ax[2].set_title('Original vs Another Image')
    ax[2].set_yticklabels([])
    ax[2].set_xticklabels([])

    ax[3].imshow(ssim_org_full)
    ax[3].set_title('Original image')
    ax[3].set_yticklabels([])
    ax[3].set_xticklabels([])

    ax[4].imshow(ssim_org_vs_inpaint_full)
    ax[4].set_title('Original vs inpainted')
    ax[4].set_yticklabels([])
    ax[4].set_xticklabels([])

    ax[5].imshow(ssim_org_vs_another_full)
    ax[5].set_title('Original vs Another Image')
    ax[5].set_yticklabels([])
    ax[5].set_xticklabels([])



    plt.tight_layout()
    plt.show()
    #
    #
    noise = np.ones_like(org_img) * 0.2 * (org_img.max() - org_img.min())
    # noise[np.random.random(size=noise.shape) > 0.5] *= -1
    #
    #
    #
    # img_noise = org_img + noise
    # img_const = org_img + abs(noise)
    #
    # win_size = 11
    #
    # #image_result = convertToFloat32(image_result)
    # #org_img_float = convertToFloat32(org_img_float)
    # #image_defect = convertToFloat32(image_defect)
    #
    #
    # mse_none = mse(org_img, org_img)
    # ssim_none,ssim_none_full = ssim(org_img, org_img,multichannel=True , data_range=MaxMin(org_img,org_img)
    #                                 ,gaussian_weights=True,win_size=win_size,full=True)
    # # ssim_none = compare_ssim(
    # #     generated_img,
    # #     generated_img,
    # #     win_size=11,
    # #     gaussian_weights=True,
    # #     multichannel=True,
    # #     data_range=1.0,
    # #     K1=0.01,
    # #     K2=0.03,
    # #     sigma=1.5)
    # mse_noise = mse(org_img, img_noise)
    # ssim_noise,ssim_noise_full = ssim(org_img, img_noise,multichannel=True , data_range=MaxMin(org_img,img_noise)
    #                                   , gaussian_weights=True,win_size=win_size,full=True)
    # # ssim_noise = compare_ssim(
    # #     generated_img,
    # #     img_noise,
    # #     win_size=11,
    # #     gaussian_weights=True,
    # #     multichannel=True,
    # #     data_range=1.0,
    # #     K1=0.01,
    # #     K2=0.03,
    # #     sigma=1.5)
    # mse_const = mse(org_img, img_const)
    # ssim_const,ssim_const_full = ssim(org_img, img_const,multichannel=True, data_range=MaxMin(org_img,img_const)
    #                                   ,gaussian_weights=True,win_size=win_size,full=True)
    #
    # fig, axes = plt.subplots(nrows=2, ncols=3,
    #                          sharex=True, sharey=True)
    # ax = axes.ravel()
    # label = 'MSE: {:.2f}, SSIM: {:.2f}'
    #
    # ax[0].imshow(_normalize(org_img))
    # ax[0].set_xlabel(label.format(mse_none, ssim_none))
    # ax[0].set_title('Original image')
    # ax[0].set_yticklabels([])
    # ax[0].set_xticklabels([])
    #
    # ax[1].imshow(_normalize(img_noise))
    # ax[1].set_xlabel(label.format(mse_noise, ssim_noise))
    # ax[1].set_title('Original vs noise')
    # ax[1].set_yticklabels([])
    # ax[1].set_xticklabels([])
    #
    # ax[2].imshow(_normalize(img_const))
    # ax[2].set_xlabel(label.format(mse_const, ssim_const))
    # ax[2].set_title('Original vs constant')
    # ax[2].set_yticklabels([])
    # ax[2].set_xticklabels([])
    #
    # ax[3].imshow(ssim_none_full)
    # ax[3].set_title('Original image')
    # ax[3].set_yticklabels([])
    # ax[3].set_xticklabels([])
    #
    # ax[4].imshow(ssim_noise_full)
    # ax[4].set_title('Original vs inpainted')
    # ax[4].set_yticklabels([])
    # ax[4].set_xticklabels([])
    #
    # ax[5].imshow(ssim_const_full)
    # ax[5].set_title('Original vs noneinpaited')
    # ax[5].set_yticklabels([])
    # ax[5].set_xticklabels([])
    #
    # plt.tight_layout()
    # plt.show()
    #
    #
    #
    #
    #
    #
    #
    #
    norm = np.linalg.norm(org_img)
    # org_img_copy = org_img/norm
    # generated_img_copy = generated_img/norm
    # mse_generated_vs_original = mse(generated_img_copy,org_img_copy)
    # ssim_generated_vs_original, ssim_image = ssim(generated_img_copy, org_img_copy,
    #                  data_range=MaxMin(generated_img_copy,org_img_copy),multichannel=True,gaussian_weights=True,win_size=win_size,full=True)
    # # ssim_generated_vs_original = compare_ssim(
    # #     generated_img,
    # #     org_img,
    # #     win_size=11,
    # #     gaussian_weights=True,
    # #     multichannel=True,
    # #     data_range=1.0,
    # #     K1=0.01,
    # #     K2=0.03,
    # #     sigma=1.5)
    # mse_anotherimg_vs_generated= mse(generated_img,another_img)
    # ssim_anotherimg_vs_generated=ssim(generated_img,another_img,data_range=MaxMin(generated_img,another_img),multichannel=True,gaussian_weights=True,win_size=win_size)
    # # ssim_anotherimg_vs_generated= compare_ssim(
    # #     generated_img,
    # #     another_img,
    # #     win_size=11,
    # #     gaussian_weights=True,
    # #     multichannel=True,
    # #     data_range=1.0,
    # #     K1=0.01,
    # #     K2=0.03,
    # #     sigma=1.5)
    #
    # mse_new_mask = mse(org_img, image_result)
    # ssim_new_mask= ssim(org_img, image_result, data_range=MaxMin(org_img_float,image_result),
    #                                     multichannel=True,win_size=win_size)
    # # ssim_new_mask = compare_ssim(
    # #     org_img_float,
    # #     image_result,
    # #     win_size=11,
    # #     gaussian_weights=True,
    # #     multichannel=True,
    # #     data_range=1.0,
    # #     K1=0.01,
    # #     K2=0.03,
    # #     sigma=1.5)
    # mse_with_mask = mse(org_img,image_defect)
    # ssim_with_mask= ssim(org_img, image_defect, data_range=MaxMin(org_img_float,image_defect),
    #                      multichannel=True, win_size=win_size)
    # # ssim_with_mask = compare_ssim(
    # #     org_img_float,
    # #     image_defect,
    # #     win_size=11,
    # #     gaussian_weights=True,
    # #     multichannel=True,
    # #     data_range=1.0,
    # #     K1=0.01,
    # #     K2=0.03,
    # #     sigma=1.5)
    #
    # fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(10, 5),
    #                          sharex=True, sharey=True)
    # ax = axes.ravel()
    # label = 'MSE: {:.2f}, SSIM: {:.2f}'
    #
    # ax[0].imshow(_normalize(generated_img))
    # ax[0].set_xlabel(label.format(mse_none, ssim_none))
    # ax[0].set_title('Generated image')
    #
    # ax[1].imshow(_normalize(img_noise))
    # ax[1].set_xlabel(label.format(mse_noise, ssim_noise))
    # ax[1].set_title('Image with noise')
    #
    # ax[2].imshow(_normalize(img_const))
    # ax[2].set_xlabel(label.format(mse_const, ssim_const))
    # ax[2].set_title('Image plus constant')
    #
    # ax[3].imshow(_normalize(org_img))
    # ax[3].set_xlabel(label.format(mse_generated_vs_original, ssim_generated_vs_original))
    # ax[3].set_title('Original Image')
    #
    # ax[4].imshow(_normalize(another_img))
    # ax[4].set_xlabel(label.format(mse_anotherimg_vs_generated, ssim_anotherimg_vs_generated))
    # ax[4].set_title('A different Image')
    #
    # ax[5].imshow(_normalize(image_result))
    # ax[5].set_xlabel(label.format(mse_new_mask, ssim_new_mask))
    # ax[5].set_title('Original vs Generated with different Mask')
    #
    # ax[6].imshow(_normalize(image_defect))
    # ax[6].set_xlabel(label.format(mse_with_mask, ssim_with_mask))
    # ax[6].set_title('Defected image with no inpainting on')
    #
    # ax[7].imshow(ssim_image)
    # ax[7].set_title('SSIM image')
    #
    # plt.tight_layout()
    # plt.show()
    win_size = 11

    def mse(x, y):
        return np.linalg.norm(x - y)

    def MaxMin(a, b):
        minvalue = min(a.min(),b.min())
        maxvalue = max(a.max(),b.max())
        return maxvalue-minvalue

    image_orig = data.astronaut()[0:200, 0:200]

    # Create mask with three defect regions: left, middle, right respectively
    mask = np.zeros(image_orig.shape[:-1])
    mask[20:60, 0:20] = 1
    mask[160:180, 70:155] = 1
    mask[30:60, 170:195] = 1

    # Defect image over the same region in each color channel
    image_defect = image_orig.copy()
    for layer in range(image_defect.shape[-1]):
        image_defect[np.where(mask)] = 0

    image_result = inpaint.inpaint_biharmonic(image_defect, mask,
                                              multichannel=True)
    image_orig = img_as_float(image_orig)
    image_defect= img_as_float(image_defect)
    mse_org = mse(image_orig, image_orig)
    ssim_org,ssim_org_full= ssim(image_orig, image_orig, data_range=MaxMin(image_orig,image_orig),
                               multichannel=True, win_size=win_size, full=True)

    mse_org_vs_inpaint = mse(image_orig, image_result)
    ssim_org_vs_inpaint,ssim_org_vs_inpaint_full = ssim(image_orig, image_result,   data_range=MaxMin(image_orig,image_result),
                               multichannel=True, win_size=win_size,full=True)

    mse_org_vs_masked= mse(image_orig, image_defect)
    ssim_org_vs_masked,ssim_org_vs_masked_full = ssim(image_orig, image_defect, data_range=MaxMin(image_orig,image_defect),
                               multichannel=True, win_size=win_size,full=True)

    fig, axes = plt.subplots(nrows=2, ncols=3,
                             sharex=True, sharey=True)
    ax = axes.ravel()
    label = 'MSE: {:.2f}, SSIM: {:.2f}'

    ax[0].imshow(image_orig)
    ax[0].set_xlabel(label.format(mse_org, ssim_org))
    ax[0].set_title('Original image')
    ax[0].set_yticklabels([])
    ax[0].set_xticklabels([])

    ax[1].imshow(image_result)
    ax[1].set_xlabel(label.format(mse_org_vs_inpaint, ssim_org_vs_inpaint))
    ax[1].set_title('Original vs inpainted')
    ax[1].set_yticklabels([])
    ax[1].set_xticklabels([])

    ax[2].imshow(image_defect)
    ax[2].set_xlabel(label.format(mse_org_vs_masked, ssim_org_vs_masked))
    ax[2].set_title('Original vs noneinpaited')
    ax[2].set_yticklabels([])
    ax[2].set_xticklabels([])

    ax[3].imshow(ssim_org_full)
    ax[3].set_title('Original image')
    ax[3].set_yticklabels([])
    ax[3].set_xticklabels([])

    ax[4].imshow(ssim_org_vs_inpaint_full)
    ax[4].set_title('Original vs inpainted')
    ax[4].set_yticklabels([])
    ax[4].set_xticklabels([])

    ax[5].imshow(ssim_org_vs_masked_full)
    ax[5].set_title('Original vs noneinpaited')
    ax[5].set_yticklabels([])
    ax[5].set_xticklabels([])

    plt.tight_layout()
    plt.show()
    #pathToRidgeImages = Path.joinpath(dataPath, 'Ridgeplot')
    pathToRidgeImages = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\generated\PartialConvolutions\12_12_2020_18_55_21\Data"
    curdatLayer = importData(config)
    images = curdatLayer.open_Imagefiles_as_array(pathToRidgeImages)
    test = SSIM_SKI.__call__(images[0],images[1])
    test2= SSIM_SKI.__call__(convertToFloat32(images[0]),convertToFloat32(images[1]))
    test3 = SSIM_SKI.__call__((images[0]*0.0255),(images[1]*0.0255))
    test4 = SSIM_SKI.__call__(images[0]/4095,images[1]/4095)
    show_images(images[0]/10000,(images[0]*0.0255),(images[0]*0.0255))
    lol =""
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args,config)
    ## For polyaxon
    #if config.run_polyaxon:
    localdir = Path().absolute().parent
    dataPath = Path.joinpath(localdir, 'data\ImagesForVisualization')

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    curdatLayer = importData(config)

    ## Original
    pathToNIR = r"E:\Speciale\NDVIExperiment\Croatia\Original_Data\NIR"
    nir_images = curdatLayer.open_Imagefiles_as_array(pathToNIR)
    nir_image = nir_images[0]
    pathtoRGB = r"E:\Speciale\NDVIExperiment\Croatia\Original_Data\RGB"
    rgb_images=curdatLayer.open_Imagefiles_as_array(pathtoRGB)
    rgb_image= rgb_images[0]
    r,g,b = cv2.split(rgb_image)

    org_ndvi = (nir_image - r) / (nir_image + r)
    titles = ["Sentinel 2 - Normalized Difference Vegetation Index (NDVI) over Original"]
    # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html
    # Turn off bytescale scaling due to float values for NDVI
    ep.plot_bands(org_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1)

    ## Inpainted
    pathToNIR = r"E:\Speciale\NDVIExperiment\Croatia\PartialConvolutions\big_mask\DataNir"
    nir_images = curdatLayer.open_Imagefiles_as_array(pathToNIR)
    nir_image = nir_images[0]
    pathtoRGB = r"E:\Speciale\NDVIExperiment\Croatia\PartialConvolutions\big_mask\Data"
    rgb_images = curdatLayer.open_Imagefiles_as_array(pathtoRGB)
    rgb_image = rgb_images[0]
    r, g, b = cv2.split(rgb_image)

    gen_ndvi = (nir_image - r) / (nir_image + r)
    titles = ["Sentinel 2- Normalized Difference Vegetation Index (NDVI) over generated"]
    # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html
    # Turn off bytescale scaling due to float values for NDVI
    ep.plot_bands(gen_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1)

    diff_ndvi=org_ndvi-gen_ndvi
    old= sum(gen_ndvi.flatten())
    new = sum(org_ndvi.flatten())
    diffSumsWithMaria = ((new-old)/old)
    diff_percent_sum = sum((gen_ndvi.flatten()-org_ndvi.flatten())/org_ndvi.flatten()*100)

    print("The NDVI have changed " +str(diffSumsWithMaria)+" %")

    titles = ["Sentinel 2- Normalized Difference Vegetation Index (NDVI) difference"]
    # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html
    # Turn off bytescale scaling due to float values for NDVI
    ep.plot_bands(diff_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1)
    loadAndAgumentMasks = makeMasks.MaskClass(config, rand_seed=None, evaluation=True)
    mask = loadAndAgumentMasks.returnMask(787)
    mask = mask[0, :, :]
    # Get real and set to GPU
    #Invert
    mask = 1-mask
    # Augment with masks
    # Check if this applies to  all three color channels?
    gen_ndvi_masked = gen_ndvi.copy()
    org_ndvi_masked = org_ndvi.copy()
    for layer in range(gen_ndvi_masked.shape[-1]):
        gen_ndvi_masked[np.where(mask)] = 0
    for layer in range(org_ndvi_masked.shape[-1]):
        org_ndvi_masked[np.where(mask)] = 0

    ep.plot_bands(gen_ndvi_masked, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1)
    maeValues = []
    sddValues = []
    ssimscikitValues = []
    psnrValues = []
    CCValues = []
    rmseValues = []
    org_ndvi_masked=org_ndvi_masked[org_ndvi_masked!=0]
    gen_ndvi_masked=gen_ndvi_masked[gen_ndvi_masked!=0]
    psnrValues.append(PSNR().__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False))
    CCValues.append(CC().__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False))
    maeValues.append(MSE().__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False))
    sddValues.append(SDD.__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False))
    #ssimscikitValues.append(SSIM_SKI.__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False))
    rmseValues.append(RMSE.__call__(org_ndvi_masked, gen_ndvi_masked,tensor=False))
    meanMAE = sum(maeValues) / len(maeValues)
    minMAE = min(maeValues)
    maxMAE = max(maeValues)

    meanSDD = sum(sddValues) / len(sddValues)
    minSDD = min(sddValues)
    maxSDD = max(sddValues)

    meanPSNR = sum(psnrValues) / len(psnrValues)
    minPSNR = min(psnrValues)
    maxPSNR = max(psnrValues)

    meanSSIM = 0
    minSSIM = 0
    maxSSIM = 0

    meanSCISSIM = 0
    minSCISSIM = 0
    maxSCISSIM = 0
    # meanSCISSIM = sum(ssimscikitValues) / len(ssimscikitValues)
    # minSCISSIM = min(ssimscikitValues)
    # maxSCISSIM = max(ssimscikitValues)

    meanCC = sum(CCValues) / len(CCValues)
    minCC = min(CCValues)
    maxCC = max(CCValues)

    meanRMSE = sum(rmseValues) / len(rmseValues)
    minRMSE = min(rmseValues)
    maxRMSE = max(rmseValues)
    FID_Value=0.0
    time_ran=0.0
    local_store_path=Path(r"E:\Speciale\NDVIExperiment\Croatia")
    saveEvalToTxt(config.model_name, meanMAE, minMAE, maxMAE, meanSDD, minSDD, maxSDD, meanSSIM,
                  minSSIM,
                  maxSSIM, meanSCISSIM, minSCISSIM, maxSCISSIM, meanPSNR, minPSNR, maxPSNR, meanCC, minCC, maxCC,
                  meanRMSE, minRMSE, maxRMSE, FID_Value, time_ran, local_store_path)

    # v_nir_image = np.concatenate((nir_images[0], nir_images[1]), axis=1)
    # v_rgb_image = np.concatenate((rgb_images[0], rgb_images[1]), axis=1)
    # v_r, v_g, v_b = cv2.split(v_rgb_image)
    #
    # v_ndvi = (v_nir_image - v_r) / (v_nir_image + v_r)
    # titles = ["Sentinel 2- Normalized Difference Vegetation Index (NDVI) over two samples"]
    # # https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_calculate_classify_ndvi.html
    # # Turn off bytescale scaling due to float values for NDVI
    # ep.plot_bands(v_ndvi, cmap="RdYlGn", cols=1, title=titles, vmin=-1, vmax=1)
    # nir_images= nir_images/10000
    # r = r/10000
    # ndvi = (nir_images - r) / (nir_images + r)
    #
    # fig = plt.figure(figsize=(10, 10))
    # fig.set_facecolor('white')
    # plt.imshow(ndvi, cmap='RdYlGn')  # Typically the color map for NDVI maps are the Red to Yellow to Green
    # plt.title('NDVI')
    # plt.show()
    #



    lol = ""
Example #15
0
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args, config)
    ## For polyaxon
    if config.run_polyaxon:
        input_root_path = Path(get_data_paths()['data'])
        output_root_path = Path(get_outputs_path())
        inpainting_data_path = input_root_path / 'inpainting'
        os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache')
        config.data_path = inpainting_data_path
        config.output_path = output_root_path
        config.polyaxon_experiment = Experiment()

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    curdatLayer = importData(config)
    if config.nir_data:
        train, test_dataloader = curdatLayer.getNIRDataLoader()
    else:
        train, test_dataloader = curdatLayer.getRGBDataLoader()
    local_model_path = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\OutputModels\PartialConvolutionsWgan_301.pt"
    local_output_path = Path(r"E:\Speciale\final_model")
    #gen = Wgangenerator().to(config.device)
    if config.nir_data:
        gen = generatorNIR().to(config.device)
    else:
        gen = generator().to(config.device)
    gen.load_state_dict(
        torch.load(local_model_path))  ## Use epochs to identify model number
    gen.eval()

    loadAndAgumentMasks = makeMasks.MaskClass(config,
                                              rand_seed=None,
                                              evaluation=True,
                                              noFlip=True)
    names = []
    # Find names of test images, in order to save the generated files with same name, for further reference
    localImg = test_dataloader.dataset.image_list
    # Slice string to only include the name of the file, ie after the last //
    localNames = []
    # if self.config.run_polyaxon:
    #     split_path = localImg[0].split('/')  ##Linux
    # else:
    #     split_path = localImg[0].split("\\")
    # local_index= split_path.index('processed')
    # local_country= split_path[local_index+1]
    for i in localImg:
        if config.run_polyaxon:
            selected_image = i.split('/')[-1]  ##Linux
        else:
            selected_image = i.split("\\")[-1]
        localNames.append(selected_image)
    names = names + localNames

    print("Found this many names " + str(len(names)))

    current_number = 0

    if not os.path.exists(Path.joinpath(local_output_path, config.model_name)):
        os.makedirs(Path.joinpath(local_output_path, config.model_name))

    now = datetime.now()
    dt_string = now.strftime("%d_%m_%Y_%H_%M_%S")

    local_test_path = local_output_path / config.model_name / dt_string / 'Data'
    local_test_nir_path = local_output_path / config.model_name / dt_string / 'DataNir'
    local_store_path = local_output_path / config.model_name / dt_string / 'stored_Data'
    os.makedirs(local_test_path)
    if config.nir_data:
        os.makedirs(local_test_nir_path)
    start_time = datetime.now()
    for real in tqdm(test_dataloader, disable=config.run_polyaxon):
        masks = loadAndAgumentMasks.returnTensorMasks(config.batch_size)
        masks = torch.from_numpy(masks)
        masks = masks.type(torch.cuda.FloatTensor)
        masks = 1 - masks
        masks.to(config.device)

        real = real.to(config.device)
        fake_masked_images = torch.mul(real, masks)
        generated_images = gen(fake_masked_images, masks)
        image_names = names[current_number:current_number + config.batch_size]
        current_number = current_number + config.batch_size  ## Change naming to include all names
        # modelHelper.save_tensor_batch(generated_images,fake_masked_images,config.batch_size,path)
        if config.nir_data:
            for index, image in enumerate(generated_images):
                namePath = Path.joinpath(local_test_path, image_names[index])
                if config.nir_data:
                    modelHelper.save_tensor_single_NIR(
                        image,
                        Path.joinpath(local_test_path, image_names[index]),
                        Path.joinpath(local_test_nir_path, image_names[index]),
                        raw=True)
        else:
            modelHelper.save_tensor_batch(
                real, fake_masked_images, generated_images, config.batch_size,
                Path.joinpath(local_test_path,
                              "_final_model_" + str(current_number)))

        current_number = current_number + 1
    end_time = datetime.now()
Example #16
0
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args,config)
    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')

    ## For polyaxon
    if config.run_polyaxon:
        input_root_path = Path(get_data_paths()['data'])
        output_root_path = Path(get_outputs_path())
        inpainting_data_path = input_root_path / 'inpainting'
        os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache')
        config.data_path = inpainting_data_path
        config.output_path = output_root_path
        config.polyaxon_experiment = Experiment()

    curdatLayer = importData(config)
    train_array,names = curdatLayer.get_images_for_baseLine()
    print("Total test in baseline " +str(len(train_array)))
    print("Total test names in baseline" +str(len(names)))
    train_dataloader,test_dataloader = curdatLayer.getRGBDataLoader()
    local_train_array = []
    for i in train_array:
        local_train_array.append(convertToFloat32(i))
    train_array = local_train_array
    curBaseLineModel = baselineModel(train_array,names,config)
    pathToGenerated, time_ran = curBaseLineModel.baselineExperiment()
    #pathToGenerated = r"C:\Users\panda\PycharmProjects\Image_Inpainting_Sat\Master_Satelite_Image_Inpainting\data\generated\test_baseLine\22_11_2020_13_01_28"
    if config.run_polyaxon:
        pathToEval=config.output_path /'evalMetrics'
    else:
        pathToEval = Path().absolute().parent / 'models'
    # create dataloader with generated images
    generated_images_dataloader = curdatLayer.getGeneratedImagesDataloader(pathToGenerated)
    print(str(pathToGenerated) + "is the generated image path")
    # calculate FID
    #missing gen
    print("Done with generating images")
    FID_Value = FIDCalculator(test_dataloader, generated_images_dataloader,
                              len(test_dataloader) * config.batch_size, config.batch_size,config).get_FID_scores()

    # Calculate PSNR and SSIM
    dataloader_iterator = iter(generated_images_dataloader)
    maeValues = []
    sddValues = []
    ssimscikitValues= []
    SSIMValues = []
    psnrValues = []
    CCValues = []
    rmseValues = []
    # loop to calculate PSNR and SSIM for all test and generated images.

    for images_real in test_dataloader:
        try:
            images_generated  = next(dataloader_iterator)
        except StopIteration:
            dataloader_iterator  = iter(generated_images_dataloader)
            images_generated = next(dataloader_iterator)
        for index2 in range(config.batch_size):
            psnrValues.append(PSNR().__call__(images_real[index2], images_generated[index2]))
            CCValues.append(CC().__call__(images_real[index2], images_generated[index2]))
            maeValues.append(MSE().__call__(images_real[index2], images_generated[index2]))
            sddValues.append(SDD.__call__(images_real[index2], images_generated[index2]))
            ssimscikitValues.append(SSIM_SKI.__call__(images_real[index2], images_generated[index2]))
            image1 = images_real[index2].unsqueeze(0)
            image2 = images_generated[index2].unsqueeze(0)
            SSIMValues.append(ssim(image1, image2))
            rmseValues.append(RMSE.__call__(images_real[index2], images_generated[index2]))



    meanMAE = sum(maeValues) / len(maeValues)
    minMAE = min(maeValues)
    maxMAE = max(maeValues)

    meanSDD = sum(sddValues) / len(sddValues)
    minSDD = min(sddValues)
    maxSDD = max(sddValues)

    meanPSNR = sum(psnrValues) / len(psnrValues)
    minPSNR = min(psnrValues)
    maxPSNR = max(psnrValues)

    meanSSIM = sum(SSIMValues) / len(SSIMValues)
    minSSIM = min(SSIMValues)
    maxSSIM = max(SSIMValues)

    meanSCISSIM = sum(ssimscikitValues) / len(ssimscikitValues)
    minSCISSIM = min(ssimscikitValues)
    maxSCISSIM = max(ssimscikitValues)

    meanCC = sum(CCValues) / len(CCValues)
    minCC = min(CCValues)
    maxCC = max(CCValues)

    meanRMSE = sum(rmseValues) / len(rmseValues)
    minRMSE = min(rmseValues)
    maxRMSE = max(rmseValues)
    # Save final results of evaluation metrics
    FID = FID_Value
    if not pathToEval.parent.exists():
        pathToEval.parent.mkdir()
    #saveEvalToTxt(config.model_name,meanPSNR.item(),minPSNR,maxPSNR,meanSSIM.item(), minSSIM,maxSSIM ,FID ,time, pathToEval)
    saveEvalToTxt(config.model_name, meanMAE, minMAE, maxMAE, meanSDD, minSDD, maxSDD, meanSSIM.item(),
                  minSSIM.item(),
                  maxSSIM.item(),meanSCISSIM,minSCISSIM,maxSCISSIM, meanPSNR, minPSNR, maxPSNR, meanCC, minCC, maxCC, meanRMSE, minRMSE, maxRMSE,
                  FID_Value, time_ran, pathToEval)