Пример #1
0
    def findRegionMasks(self):

        grayImage = rgb2gray(self.boardImage)
        image = skimage.img_as_float64(self.boardImage)

        ###detect red regions in the board
        redRegions = image[:,:,0]-grayImage
        self.myMask.red = redRegions > threshold_otsu(redRegions) + 0.05

        ###detect green regions in the board
        greenRegions = image[:,:,1]-grayImage
        self.myMask.green = greenRegions > (-threshold_otsu(greenRegions)) -0.05

        ###detect multipliers regions
        self.myMask.multipliers=self.myMask.red + self.myMask.green
        self.myMask.multipliers = skimage.img_as_float64(self.myMask.multipliers)
        SE = disk(np.round(np.size(image[:,1,1])/100))

        ###detect multrigns region
        self.myMask.multRings = binary_closing(self.myMask.multipliers,SE)
        self.myMask.multRings = skimage.img_as_float64(self.myMask.multRings)

        ### imfill holes of multrings  
        seed = np.copy(self.myMask.multRings)
        seed[1:-1, 1:-1] = self.myMask.multRings.max()
        mask = self.myMask.multRings
        self.myMask.board = reconstruction(seed, mask, method='erosion')
        change_to_bool = np.array(self.myMask.board , dtype=bool)
        self.myMask.miss= ~change_to_bool
        self.myMask.single = self.myMask.board-self.myMask.multRings

        #### imfill holes of self.myMask.single 
        seed = np.copy(self.myMask.single)
        seed[1:-1, 1:-1] = self.myMask.single.max()
        mask = self.myMask.single
        Tempp= reconstruction(seed, mask, method='erosion')
        self.myMask.double = self.myMask.board-Tempp

        ### inner Ring 
        inner_Ring  = self.myMask.board-self.myMask.double-self.myMask.single
        seed = np.copy(inner_Ring)
        seed[1:-1, 1:-1] = inner_Ring.max()
        mask = inner_Ring
        Tempp= reconstruction(seed, mask, method='erosion')
        inner_Ring = Tempp-inner_Ring

        ### triple region 
        seed = np.copy(inner_Ring)
        seed[1:-1, 1:-1] = inner_Ring.max()
        mask = inner_Ring
        Tempp= reconstruction(seed, mask, method='erosion')
        self.myMask.triple = self.myMask.board-self.myMask.double-self.myMask.single - Tempp
        self.myMask.triple[self.myMask.triple < 0] = 0

        ### outer Bull 
        self.myMask.outer_bull = (self.myMask.multRings - self.myMask.double - self.myMask.triple) * self.myMask.green
        
        #### inner Bull 
        self.myMask.inner_bull = (self.myMask.multRings - self.myMask.double - self.myMask.triple) * self.myMask.red
Пример #2
0
def baseline_predict_scene(LR, QM, before=True, interpolation=cv2.INTER_CUBIC):
    """
        baseline version 1 :
            average images with the maximum number of clearance pixel
            if before is true, average the image then apply the resize and return the resize image
            else resize the images and return the average
    """
    # load clearance map

    n = len(QM)
    clearance = np.zeros((n, ))

    #for cl in QM:
    for i in prange(n):
        cl = QM[i]
        img_cl = skimage.img_as_float64(cv2.imread(cl, -1)).astype(np.bool)
        if img_cl is None:
            print("error")
        if len(np.unique(img_cl)) > 2:
            print(np.unique(img_cl))
            raise ("Error during loading clearance map !!!! ")
        #img_cl = img_cl/255 # normalize value 0-1
        clearance[i] = np.sum(img_cl)

    maxcl = clearance.max()
    maxclears = [i for i in prange(len(clearance)) if clearance[i] == maxcl
                 ]  # save index of image with max clearance

    if before:
        img_predict = np.zeros((128, 128), dtype=np.float64)
        #for ids in maxclears:
        for i in prange(len(maxclears)):
            ids = maxclears[i]
            im = skimage.img_as_float64(cv2.imread(LR[ids], -1))
            img_predict += im
        img_predict = img_predict / len(maxclears)

        im_rescale = cv2.resize(
            img_predict, (384, 384), interpolation=interpolation
        )  # rescale(im, scale=3, order=3, mode='edge', anti_aliasing=False, multichannel=False)#
        return im_rescale
    else:

        # upscale

        img_predict = np.zeros((384, 384), dtype=np.float64)

        #for ids in maxclears:
        for i in prange(len(maxclears)):
            ids = maxclears[i]
            im = skimage.img_as_float64(cv2.imread(LR[ids], -1))
            im_rescale = cv2.resize(
                im, (384, 384), interpolation=interpolation
            )  # rescale(im, scale=3, order=3, mode='edge', anti_aliasing=False, multichannel=False)#
            img_predict += im_rescale
        img_predict = img_predict / len(maxclears)

        return img_predict
Пример #3
0
def baseline_predict_scenev2(LR, QM, interpolation=cv2.INTER_CUBIC):
    """
        baseline version 2 :
            average image with the maximum number of clearance pixel of one imageset
    """
    # load clearance map
    n = len(QM)
    clearance = np.zeros((n, ))

    #for cl in QM:
    for i in prange(n):
        cl = QM[i]
        img_cl = skimage.img_as_float64(cv2.imread(cl, -1)).astype(np.bool)
        if img_cl is None:
            print("error")
        if len(np.unique(img_cl)) > 2:
            print(np.unique(img_cl))
            raise ("Error during loading clearance map !!!! ")
        #img_cl = img_cl/255 # normalize value 0-1
        clearance[i] = np.sum(img_cl)

    maxcl = clearance.max()
    maxclears = [i for i in prange(len(clearance)) if clearance[i] == maxcl
                 ]  # save index of image with max clearance

    dim = len(maxclears)
    clearance_map = np.zeros((dim, 128, 128), dtype=np.float64)
    im = np.zeros((dim, 128, 128), dtype=np.float64)
    for i in prange(dim):
        ids = maxclears[i]
        cl = QM[ids]
        clearance_map[i] = skimage.img_as_float64(cv2.imread(cl, -1))
        im[i] = skimage.img_as_float64(cv2.imread(LR[ids], -1))

    img = im * clearance_map  # pixel with no clearance equal 0

    clear = clearance_map.sum(axis=0)
    np.place(clear, clear == 0, np.nan)
    img_predict = np.sum(img, axis=0) / clear

    # average value of maxclearance and replace nan value by them
    img_average = img.mean(axis=0)
    img_predict[np.isnan(img_predict)] = img_average[np.isnan(img_predict)]

    # upscale img
    img_resize = cv2.resize(img_predict, (384, 384),
                            interpolation=interpolation)

    return img_resize
def CP_to_RGB_single(im_cp):
    # change channels first to channels last format
    channel_first = False
    if im_cp.shape[0] < 10:
        channel_first = True
        im_cp = np.moveaxis(im_cp, 0, 2)
    col1 = np.array([0, 0, 255], dtype=np.uint8)
    col2 = np.array([0, 255, 0], dtype=np.uint8)
    col3 = np.array([255, 255, 0], dtype=np.uint8)
    col4 = np.array([255, 150, 0], dtype=np.uint8)
    col5 = np.array([255, 0, 0], dtype=np.uint8)
    channel_colors = [col1, col2, col3, col4, col5]
    comb_pars = [3, 2, 3, 2, 2]
    colorImagesList = []
    #     print(im_cp.shape[2])
    for i in range(im_cp.shape[2]):
        image_gray = im_cp[:, :, i]
        image_gray_normalized, _ = normalize(image_gray)
        image_color = colorize_image(image_gray_normalized, channel_colors[i])
        colorImagesList.append(image_color)
        colorImagesList2 = [
            a * b.astype(np.uint16) for a, b in zip(comb_pars, colorImagesList)
        ]
    colorImage0, _ = normalize(sum(colorImagesList2))
    colorImage0 = skimage.img_as_float64(colorImage0)
    #         print(image_gray.shape,image_gray_normalized.shape,image_color.shape,colorImage0.shape)
    if channel_first:
        colorImage = np.moveaxis(colorImage0, 2, 0)
    else:
        colorImage = colorImage0.copy()
    return colorImage
Пример #5
0
def pyramid_blending(im1, im2, mask, max_levels, filter_size_im,
                     filter_size_mask):
    """
    Implementation of pyramid blending as described in the lecture.
    :param im1, im2: two input grayscale images to be blended. Both have the same dimension.
    :param mask: a boolean (of dtype np.bool) mask containing True and False representing which parts
                of im1 and im2 should appear in the resulting im_blend, while True is 1, False is 0. Has the same
                dimension as im1 and im2.
    :param max_levels: the max_levels parameter you should use when generating the Gaussian and Laplacian
                pyramids.
    :param filter_size_im: the size of the Gaussian filter which defining the filter used in the construction of the
                Laplacian pyramids of im1 and im2.
    :param filter_size_mask: the size of the Gaussian filter which defining the filter used in the construction of the
                Gaussian pyramid of mask.
    :return: im_blend: valid grayscale image in the range [0, 1].
    """
    lapyr1 = build_laplacian_pyramid(im1, max_levels, filter_size_im)[0]
    lapyr2 = build_laplacian_pyramid(im2, max_levels, filter_size_im)[0]
    mask_pyr, filter_vec = build_gaussian_pyramid(img_as_float64(mask),
                                                  max_levels, filter_size_mask)
    mask_pyr = stretch_levels(mask_pyr)
    lap_out = []
    for i in range(len(mask_pyr)):
        level = np.multiply(mask_pyr[i], lapyr1[i]) + np.multiply(
            (1 - mask_pyr[i]), (lapyr2[i]))
        lap_out.append(level)
    return laplacian_to_image(lap_out, filter_vec,
                              [1 for _ in range(len(lap_out))])
Пример #6
0
def reconstruct_from_cr_data(data_path, width, height, dtype="uint16", kvp=70, angular_steps=360, debug=False):
    """Reconstructs the focal spot and the sinogram
    from raw binary image specified by the data path.

    :param data_path: A path to the raw binary data
    :param width: The width of the binary image
    :param height: The height of the binary image
    :param dtype: The data type of the binary image
    :param kvp: The kVp used in the acquisition of the CR data
    :param angular_steps: The number of radial slices taken of the penumbra, defaults to 360
    :type angular_steps: int, optional
    :param debug: A boolean value representing if debug images are output
    :returns: A tuple containing the focal spot image
    and the sinogram image.
    """

    image = np.fromfile(data_path, dtype=dtype)
    image = image.reshape(width, height)
    image = map_cr_values(image, kvp=kvp)
    #image = equalize_adapthist(image)
    # Ensuring float and ubyte images are available
    float_image = img_as_float64(image)
    ubyte_image = img_as_ubyte(image)

    return reconstruct(float_image, ubyte_image, angular_steps=angular_steps, debug=debug)
Пример #7
0
def cast_img_float64(tensor):
    """Cast the data in np.float64.

    If the input data is in (unsigned) integer, the values are scaled between
    0 and 1. When converting from a np.float dtype, values are not modified.

    Parameters
    ----------
    tensor : np.ndarray
        Tensor to cast.

    Returns
    -------
    tensor : np.ndarray, np.float64
        Tensor cast.

    """
    # check tensor dtype
    check_array(tensor,
                ndim=[2, 3, 4, 5],
                dtype=[
                    np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
                    np.int16, np.int32, np.int64, np.float32, np.float64
                ])

    # cast tensor
    tensor = img_as_float64(tensor)

    return tensor
Пример #8
0
def MSRCR(img, sigmas: list, alpha, beta, G, b):
    """
    MSRCR (Multi-scale retinex with color restoration)

    Parameters :

    img : input image
    sigmas : list of all standard deviations in the X and Y directions, for Gaussian filter
    alpha : controls the strength of the nonlinearity
    beta : gain constant
    G : final gain
    b : offset
    """
    img = img_as_float64(img) + 1

    img_msr = multiScale(img, sigmas)
    img_color = crf(img, alpha, beta)
    img_msrcr = G * (img_msr * img_color + b)

    for i in range(img_msrcr.shape[2]):
        img_msrcr[:, :, i] = (img_msrcr[:, :, i] - np.min(img_msrcr[:, :, i])) / \
                                (np.max(img_msrcr[:, :, i]) - np.min(img_msrcr[:, :, i])) * \
                                255

    img_msrcr = np.uint8(np.minimum(np.maximum(img_msrcr, 0), 255))

    return img_msrcr
Пример #9
0
def comparePictures():

    for i in range(imageCount):

        img = img_as_float64(
            imread(defOutputDir + "Pic_" + '{0:03d}'.format(i) + ".png ",
                   as_gray=True))
        imgVecDef = img_as_float64(
            imread(rasterDefDir + "potrace_Pic_" + '{0:03d}'.format(i) +
                   ".png ",
                   as_gray=True))
        imgVecAc = img_as_float64(
            imread(rasterAcDir + "potrace_Pic_" + '{0:03d}'.format(i) +
                   ".png ",
                   as_gray=True))
        imgVecPCA = img_as_float64(
            imread(rasterPCADir + "potrace_Pic_" + '{0:03d}'.format(i) +
                   ".png ",
                   as_gray=True))

        mse_none = mse(img, img)
        ssim_none = ssim(img, img, data_range=img.max() - img.min())

        mse_defaultVec = mse(img, imgVecDef)
        ssim_defaultVec = ssim(img,
                               imgVecDef,
                               data_range=imgVecDef.max() - imgVecDef.min())

        mse_acVec = mse(img, imgVecAc)
        ssim_acVec = ssim(img,
                          imgVecAc,
                          data_range=imgVecAc.max() - imgVecAc.min())

        mse_pcaVec = mse(img, imgVecPCA)
        ssim_pcaVec = ssim(img,
                           imgVecPCA,
                           data_range=imgVecPCA.max() - imgVecPCA.min())

        evalArray[i, 3] = mse_none
        evalArray[i, 4] = mse_defaultVec
        evalArray[i, 5] = mse_acVec
        evalArray[i, 6] = mse_pcaVec

        evalArray[i, 7] = ssim_none
        evalArray[i, 8] = ssim_defaultVec
        evalArray[i, 9] = ssim_acVec
        evalArray[i, 10] = ssim_pcaVec
Пример #10
0
def load_and_normalize_hr(scene_path, normalize=False):
    hr, _ = highres_image(scene_path, img_as_float=False)
    #hr = skimage.img_as_float64(hr << 2)
    hr = skimage.img_as_float64(hr)
    if normalize:
        return normalize(hr)
    else:
        return hr
Пример #11
0
def load_hr_sm(scene_path):
    """
    Loads high resolution image and it's corresponding status map given path to scene directory
    """
    hr = skimage.img_as_float64(
        load_image(scene_path + '/HR.png', dtype=np.uint16))
    sm = load_image(scene_path + '/SM.png', dtype=np.bool)
    return (hr, sm)
Пример #12
0
def difference_maker():
    average = io.imread('average.jpg')
    average = img_as_float64(average)
    n = 0
    for file in glob.glob('beePics/*.jpg'):
        image = io.imread(file)

        image = image[460:-460, 880:-680]
        image = resize(image, (299, 299), anti_aliasing=True)
        image = img_as_float64(image)
        image = abs(average - image)

        name = 'difference' + str(n) + '.png'  # creting new names
        image = img_as_ubyte(image)
        image = rgb2gray(image)  # convert to greyscale for ease of analysis
        io.imsave(name, image)  #save new image after every interation
        n += 1
Пример #13
0
def load_image2D(path, expand=False):
    img = skimage.img_as_float64(cv2.imread(path, -1))
    #height, width = img.shape
    #if scale > 1:
    #    img = cv2.resize(img,  (height*scale, width*scale), interpolation = cv2.INTER_CUBIC)
    if expand:
        img = np.expand_dims(img, axis=2)
    return img
Пример #14
0
 def new_image(self, image):
     if self.resize:
         image = transform.resize(image, self.size)
     else:
         # Transform Uint8 into float64
         image = img_as_float64(image)
     image = self.transform(image)
     return image
Пример #15
0
def load_and_normalize_lrs(scene_path):
    
    normalized_lrs = []
    for lr, _ in lowres_image_iterator(scene_path, img_as_float=False):
        lr = skimage.img_as_float64(lr << 2)
        normalized_lrs.append(normalize(lr))
    
    return normalized_lrs
Пример #16
0
def baseline_predict(data,
                     istrain=True,
                     evaluate=True,
                     version=1,
                     interpolation=cv2.INTER_CUBIC):
    num = len(data)
    predicted = np.zeros(
        (num, 384, 384))  # number of images in the dataset to check
    zsub = np.zeros((num, ))

    if istrain:
        for i in prange(num):
            LR, QM, norm, SM, HR = data[i]
            if version == 1:
                img_predict = baseline_predict_scene(
                    LR, QM, interpolation=interpolation)
            elif version == 2:
                img_predict = baseline_predict_scenev2(
                    LR, QM, interpolation=interpolation)

            elif version == 3:
                img_predict = baseline_predict_scenev3(
                    LR, QM, interpolation=interpolation)
            else:
                raise ("methode not implemented ! ")

            # save img
            predicted[i] = img_predict
            # evaluate

            if evaluate:
                num_crop = 6
                clearHR = skimage.img_as_float64(cv2.imread(SM, -1))
                hr = skimage.img_as_float64(cv2.imread(HR, -1))
                zSR = score_scene(img_predict,
                                  hr,
                                  clearHR,
                                  norm,
                                  num_crop=num_crop)

                zsub[i] = zSR
        if evaluate:
            print("evaluation \n number of elements : {0} \n Z = {1}".format(
                len(zsub), zsub.mean()))
        return predicted
Пример #17
0
def image_test():
    file = glob.glob('Y0030001.jpg')
    for thing in file:
        image = io.imread(thing)
    image = image[460:-460, 880:-680]
    image = resize(image, (299, 299), anti_aliasing=False)
    image = img_as_float64(image) / 2
    image = img_as_ubyte(image)
    io.imsave('test.jpg', image)
Пример #18
0
def test_reconstruct(penumbra_circle, focal_spot_circle, sinogram_circle):
    float_image = img_as_float64(penumbra_circle)
    uint8_image = img_as_ubyte(penumbra_circle)
    focal_spot, sinogram = api.reconstruct(float_image, uint8_image)

    fs_check = utils.duplicate_grayimage_check(img_as_ubyte(focal_spot), focal_spot_circle)
    sino_check = utils.duplicate_grayimage_check(img_as_ubyte(sinogram), sinogram_circle)
    
    assert fs_check and sino_check
Пример #19
0
def processInput(x):
    file_in = os.path.join(inputfolder, file_list[x])
    sys.stdout.write('Loading: ' + str(file_in) + '\n')
    img = skimage.io.imread(file_in)
    img_float64 = skimage.img_as_float64(img)
    image = skimage.exposure.rescale_intensity(img_float64, in_range=(np.percentile(img_float64, cutperc), np.percentile(img_float64, 100-cutperc)), out_range=(0, 1))
    img_uint8 = skimage.img_as_ubyte(image)
    file_out = os.path.join(outputfolder, file_list[x])
    sys.stdout.write('Saving: ' + str(file_out) + '\n')
    skimage.io.imsave(file_out, img_uint8)
Пример #20
0
    def compare_images_metrics(self, img, gt):
        """
        Compare two images

        @param img: image to compare
        @param gt: ground truth (color image)

        @return: (structural similarity, mean squared error, peak noise to signal ratio)
        """
        img_float_64 = skimage.img_as_float64(img)
        gt_float_64 = skimage.img_as_float64(gt)

        ssim = skimage.metrics.structural_similarity(
            img_float_64, gt_float_64, multichannel=True
        )
        mse = skimage.metrics.mean_squared_error(img_float_64, gt_float_64)
        pnsr = skimage.metrics.peak_signal_noise_ratio(img_float_64, gt_float_64)

        return (ssim, mse, pnsr)
Пример #21
0
def processInput(x, **kwargs):
    """
    Processes images based on the given input 1) List of filenames 2) z-index of stack mem-map

    if x is [str] the input to the script is assumed to be a folder of multiple .png/.tif image files

    if x is [int] the input to the script is assumed to be single tif stack
    """
    if isinstance(x, str):
        file_in = os.path.join(inputfolder, x)
        sys.stdout.write('Loading: ' + str(file_in) + ' -> ')
        name, extension = os.path.splitext(x)
        img = cv2.imread(file_in, cv2.IMREAD_UNCHANGED)
        file_out = os.path.join(outputfolder, str(name + '.png'))
    elif isinstance(x, int):
        mem_map_path = kwargs['mmap']
        stack_arr = load(mem_map_path, mmap_mode='r')
        sys.stdout.write('Loading: image layer no. ' + str(x + 1) + ' -> ')
        img = stack_arr[x, :, :]
        name_with_path, extension = os.path.splitext(inputfolder)
        out_name = str(
            os.path.basename(name_with_path)) + "_" + str(x + 1) + ".png"
        file_out = os.path.join(outputfolder, out_name)
    else:
        raise ValueError("Input supplied is incompatible")

    sys.stdout.write('Type: ' + str(img.dtype) + '\n')
    # Check 3rd dimension here, if loaded as RGB, remove 3rd dimension here
    if len(img.shape) > 2:
        # print('Converting RGB  to grey level image')
        img = img[:, :, 0]
    try:
        img = skimage.util.img_as_float(img)
    except:
        img = skimage.img_as_float64(img)
    # remove extreme outlier pixels before denoising
    img = skimage.exposure.rescale_intensity(img,
                                             in_range=(np.percentile(img, 1),
                                                       np.percentile(img, 99)),
                                             out_range=(0, 1))
    sigma_est1 = skimage.restoration.estimate_sigma(skimage.img_as_float(img))
    img = ptv.tv1_2d(img, sigma_est1 / 2, n_threads=num_threads)
    # img = skimage.restoration.denoise_tv_chambolle(img, weight=sigma_est1/2, multichannel=False)
    # img = skimage.restoration.denoise_tv_bregman(img, weight=sigma_est1/2, max_iter=100, eps=0.001, isotropic=True);
    img = skimage.exposure.rescale_intensity(
        img,
        in_range=(np.percentile(img,
                                cutperc), np.percentile(img, 100 - cutperc)),
        out_range=(0, 1))
    sigma_est2 = skimage.restoration.estimate_sigma(skimage.img_as_float(img))
    # sys.stdout.write(file_out + ": Estimated Gaussian noise stdev before " + str(sigma_est1) + " vs after denoising = " + str(sigma_est2))
    sys.stdout.write('Saving: ' + str(file_out) + '\n')
    img = 255 * img
    img = img.astype(np.uint8)
    cv2.imwrite(file_out, img)
Пример #22
0
def load_image2D(path, expand=False):
    """
        @path : absolute path of the image to load
        @expand : if true, add a dimension in the last channel. Usefull for grayscale image => (n, n ) to (n, n, 1)
    """
    img = skimage.img_as_float64( cv2.imread(path, -1) )
    #height, width = img.shape
    #if scale > 1:
    #    img = cv2.resize(img,  (height*scale, width*scale), interpolation = cv2.INTER_CUBIC)
    if expand:
        img = np.expand_dims(img, axis=2)
    return img
def prepare_landsat_image(bands):

    # prepare a false color image for a combination of 3 landsat bands
    # Note this is specific to the set of images we provided, and the
    # hardcoded values for rotating need to be changed if you download a new
    # set of landsat images.

    ls_red = io.imread('landsat_band' + str(bands[0]) + '.tif')  # red
    ls_green = io.imread('landsat_band' + str(bands[1]) + '.tif')  # green
    ls_blue = io.imread('landsat_band' + str(bands[2]) + '.tif')  # blue

    ls_red = img_as_float64(ls_red)
    ls_green = img_as_float64(ls_green)
    ls_blue = img_as_float64(ls_blue)

    nx, ny = ls_red.shape
    ls_false = np.zeros([nx, ny, 3], dtype=np.float64)

    ls_false[:, :, 0] = ls_red
    ls_false[:, :, 1] = ls_green
    ls_false[:, :, 2] = ls_blue

    flow = 2
    fhigh = 98

    ls_false_rescale = rescale_intensities(ls_false, flow, fhigh)

    angle = 12.8
    ls_false_rescale = rotate(ls_false_rescale, angle)

    left = 124
    right = 1345

    top = 122
    bottom = 1378

    ls_false_rescale = ls_false_rescale[top:bottom, left:right]

    return ls_false_rescale
Пример #24
0
    def generate_raw_ndvi(self):
        img = io.imread(self.path + self.img_ex)

        dimx, dimy = img[:, :, 2].shape
        fraction = 0.01
        noise = fraction * np.ones((dimx, dimy), dtype=float)

        red = img_as_float64(img[:, :, 2]) + noise
        nir = img_as_float64(img[:, :, 3]) + noise
        #ndvi_out = np.where(((nir-red)==0)&((nir+red)==0),0,(nir-red)/(nir+red))

        #print red
        #print nir
        #plt.figure(0)
        #plt.imshow(nir,cmap="gray")
        #plt.title("nir")
        #plt.show()

        self.ndvi_img = (nir - red) / (nir + red)
        #self.ndvi_img =  rgb2gray(ndvi_out)

        new_dir = self.path + "ndvi_imgs/"
Пример #25
0
def read_image(filename, representation):
    """
    Reads an image file and converts it into a given representation
    :param filename: The filename of an image on disk (could be grayscale or RGB).
    :param representation: Representation code, either 1 or 2 defining whether the output should be a grayscale
            image (1) or an RGB image (2).
    :return: an image represented by a matrix of type np.float64 with intensities normalized to [0,1]
    """
    rgb_img = imread(filename)
    rgb_img = img_as_float64(rgb_img)
    if representation == GRAY_REP:
        rgb_img = rgb2gray(rgb_img)
    return rgb_img
Пример #26
0
def load_lr_qm(scene_path, quality_map_only=False, lr_only=False):
    """
    Loads low resolution images and their corresponding quality map given path to scene directory
    Can also load LR images and Qm separately if the options are given
    """

    # both Lr and Qm are loaded together
    if not quality_map_only and not lr_only:
        lr_qm_images = []
        for lr_image in glob.glob(scene_path + "/LR*"):
            lr_image_path = lr_image
            qm_image_path = lr_image[:-10] + "/QM" + lr_image[-7:]
            lr = skimage.img_as_float64(
                load_image(lr_image_path, dtype=np.uint16)
            )  # loading Lr image and converting it to float
            qm = load_image(qm_image_path,
                            dtype=np.bool)  # loading qm image as a boolean
            lr_qm_images.append((lr, qm))
        return lr_qm_images

    # loading qm only
    elif quality_map_only:
        qm_images = []
        for qm_image in glob.glob(scene_path + "/QM*"):
            qm_image_path = qm_image
            qm = load_image(qm_image_path, dtype=np.bool)
            qm_images.append(qm)
        return np.asarray(qm_images)

    # loading Lr images only
    else:
        lr_images = []
        for lr_image in glob.glob(scene_path + "/LR*"):
            lr_image_path = lr_image
            lr = skimage.img_as_float64(
                load_image(lr_image_path, dtype=np.uint16))
            lr_images.append(lr)
        return np.asarray(lr_images)
Пример #27
0
def check_img_as_float(img, validate=True):
    """
	Ensure `img` is a matrix of values in floating point format in [0.0, 1.0].
	Returns `img` if it already obeys those requirements, otherwise converts it.
	"""
    if not issubclass(img.dtype.type, np.floating):
        img = skimage.img_as_float64(img)
    # https://scikit-image.org/docs/dev/api/skimage.html#img-as-float64

    if validate:
        # safeguard against unwanted conversions to values outside the
        # [0.0, 1.0] range (would happen if `img` had signed values).
        assert img.min() >= 0.0 and img.max() <= 1.0

    return img
Пример #28
0
def scale_image_sk(image_path, out_dir, value=8.0):

    print(image_path)
    img_rgb = img_as_float64(io.imread(image_path))
    # grayscale_image = color.rgb2gray(img_rgb)

    image_resized = resize(
        img_rgb, (img_rgb.shape[0] * value, img_rgb.shape[1] * value),
        anti_aliasing=False)

    image_w = img_as_ubyte(image_resized)
    img_path_parts = image_path.split('/')
    out_img = Image.fromarray(np.uint8(image_w))
    out_img_path = '{}/{}'.format(out_dir, img_path_parts[-1])
    print(out_img_path)
    out_img.save(out_img_path, "PNG", optimize=False, quality=95)

    return out_img_path
Пример #29
0
def highres_image(path, img_as_float=True):
	"""
	Load a scene's high resolution image and its corresponding status map.
	
	Returns a `(hr, sm)` tuple, where:
	* `hr`: matrix with the loaded high-resolution image (values as np.uint16 or
	        np.float64 depending on `img_as_float`),
	* `sm`: the image's corresponding "clear pixel?" boolean mask.
	
	Scenes' image files are described at:
	https://kelvins.esa.int/proba-v-super-resolution/data/
	"""
	path = path if path[-1] in {'/', '\\'} else (path + '/')
	hr = skimage.io.imread(path + 'HR.png', dtype=np.uint16)
	sm = skimage.io.imread(path + 'SM.png', dtype=np.bool)
	if img_as_float:
		hr = skimage.img_as_float64(hr)
	return (hr, sm)
Пример #30
0
def reconstruct_from_array(image_array, angular_steps=360, debug=False):
    """Reconstructs the focal spot and the sinogram
    from a passed penumbra image in the form of an array.
    
    :param image_array: The penumbra image as a numpy array
    :type image_path: numpy.ndarray
    :param angular_steps: The number of radial slices taken of the penumbra, defaults to 360
    :type angular_steps: int, optional
    :param debug: A boolean value representing if debug images are output, defaults to False
    :type debug: bool, optional
    :return: A tuple containing the reconstructed image and the sinogram image.
    :rtype: (numpy.ndarray, numpy.ndarray)
    """

    # Ensuring float and ubyte images are available
    float_image = img_as_float64(image_array)
    ubyte_image = img_as_ubyte(image_array)

    return reconstruct(float_image, ubyte_image, angular_steps=angular_steps, debug=debug)