Пример #1
0
def main():
    figa = np.array(Image.open('images/Fig1130(a)(uniform_noise).tif').convert('L'))
    figb = np.array(Image.open('images/Fig1130(b)(sinusoidal).tif').convert('L'))
    figc = np.array(Image.open('images/Fig1130(c)(cktboard_section).tif').convert('L'))

    glcma = glcm(figa, (0, 1))
    glcmb = glcm(figb, (0, 1))
    glcmc = glcm(figc, (0, 1))
    fa = glcm_features(glcma)
    fb = glcm_features(glcmb)
    fc = glcm_features(glcmc)

    ax = plt.subplot('321')
    ax.set_title("Figure a")
    ax.imshow(figa, 'gray')
    ax.axis('off')

    ax = plt.subplot('322')
    ax.set_title("GLCM of Figure b")
    ax.imshow(adjust_log(glcma), 'gray')
    ax.axis('off')

    ax = plt.subplot('323')
    ax.set_title("Figure b")
    ax.imshow(figb, 'gray')
    ax.axis('off')

    ax = plt.subplot('324')
    ax.set_title("GLCM of Figure b")
    ax.imshow(adjust_log(glcmb), 'gray')
    ax.axis('off')

    ax = plt.subplot('325')
    ax.set_title("Figure c")
    ax.imshow(figc, 'gray')
    ax.axis('off')

    ax = plt.subplot('326')
    ax.set_title("GLCM of Figure c")
    plt.imshow(adjust_log(glcmc), 'gray')
    ax.axis('off')

    plt.show()
Пример #2
0
def show_sarimage3d(SI,
                    pdict,
                    axismod=None,
                    title=None,
                    cmap=None,
                    isimgadj=False,
                    aspect=None,
                    outfile=None,
                    figsize=None):
    r"""[summary]

    [description]

    Arguments:
        SI {[type]} -- [description]
        sarplat {[type]} -- [description]

    Keyword Arguments:
        axismod {[type]} -- [description] (default: {None})
        title {[type]} -- [description] (default: {None})
        cmap {[type]} -- [description] (default: {None})
        isimgadj bool -- [description] (default: {False})
        aspect {[type]} -- [description] (default: {None})
        outfile {[type]} -- [description] (default: {None})
        figsize {[type]} -- [description] (default: {None})
    """

    extent, xlabelstr, ylabelstr = saraxis(pdict=pdict, axismod=axismod)

    Z = np.absolute(SI)
    if isimgadj:
        Z = exposure.adjust_log(Z)
    Z = np.flipud(Z)
    # fig = plt.figure(figsize=figsize)
    M, N = np.shape(Z)

    X = np.arange(0, N, 1)
    Y = np.arange(0, M, 1)
    X, Y = np.meshgrid(X, Y)

    # mlab.figure(size=(400, 500))
    # mlab.mesh(X, Y, Z)
    # mlab.surf(X, Y, Z)
    # mlab.colorbar()
    # mlab.xlabel(xlabelstr)
    # mlab.ylabel(ylabelstr)
    # mlab.zlabel("Amplitude")
    # mlab.title(title)
    # mlab.show()

    if outfile is not None:
        # mlab.savefig(outfile)
        print("sar image has been saved to: ", outfile)
Пример #3
0
    def open_tiff_files(self):

        self.tiff_file_to_view = QtWidgets.QFileDialog.getOpenFileName(
            directory=self.user_dir, filter='*.tiff', parent=self)[0]

        img = plt.imread(self.tiff_file_to_view)
        logarithmic_corrected = exposure.adjust_log(img, 1)

        self.figureiff_image.ax.clear()
        self.figure_tiff_image.ax.imshow(logarithmic_corrected,
                                         cmap='BuPu_r',
                                         vmax=2000)
        self.canvas_tiff_image.draw_idle()
Пример #4
0
def log_compression(file_name, img):
    """
    Replace pixel value with its logarithm (effectively enhancing low intensity
    pixel values).

    :param file_name: file name of the image as used to save the image.
    :param img: the jpeg image.
    :return: output image (in .jpg)
    """
    log_out = exposure.adjust_log(img)
    log_hist(file_name, log_out)
    img_log = io.imsave(file_name + '_log.jpg', log_out)
    return log_out
Пример #5
0
    def logcorr(self):
        """logarithmic exposure added to image

        Parameters
        ----------
        image : n-dimensional array
            Input image folder.

        Returns
        -------
            Image with logarathmic exposure.
        """
        return exposure.adjust_log(self)
def log_compression(img):
    """Performs log compression processing on raw image

    Args:
        img (np array): raw image in the form of a np array

    Returns:
        np array: image array after having log compression
            performed
    """
    img_log = exposure.adjust_log(img, 1)
    logging.info('Log compression performed!')
    return img_log
Пример #7
0
def log_correct_img(img):
    """
    Take address of image to be processed as img_loc.
    Computes log correction and returns both the input img and corrected img.

    param:
    img - address to access image to be processed

    returns:
    img - 2D array of input image
    log_img - 2D array of log corrected image
    """
    log_img = np.asarray(adjust_log(img, 2), dtype='uint8')
    return log_img[:, :, 0:3]
Пример #8
0
def createTFRecord(filename, mapfile, num):
    class_map = {}
    data_dir = 'dataset/'
    data_dir_d = 'dataset_d/'
    classes = {
        'apple', 'ball', 'banana', 'bowl', 'garlic', 'green', 'lemon',
        'mushroom', 'onion', 'orange', 'peach', 'pear', 'potato', 'tomato'
    }
    # 输出TFRecord文件的地址
    writer = tf.python_io.TFRecordWriter(filename)
    i = 0
    for index, name in enumerate(classes):
        class_path = data_dir + name + '/'
        class_map[index] = name
        for img_name in os.listdir(class_path):
            i += 1
            if (i % num == 0) and (i % (num * 10) != 0):
                img_path = class_path + img_name  # 每个图片的地址
                img_path_d = data_dir_d + name + '/' + img_name
                img = Image.open(img_path)
                img = img.resize((208, 208))
                img = img.convert("RGB")
                img_raw = img.tobytes()  # 将图片转化
                # 成二进制格式

                img_d = Image.open(img_path_d)
                img_d = img_d.resize((208, 208))
                img_d = img_d.convert("I")
                img_d = img_as_float(img_d)
                img_d = exposure.adjust_gamma(img_d, 1.0)
                img_d = exposure.adjust_log(img_d, 100000000)
                img_d = exposure.rescale_intensity(img_d,
                                                   in_range='image',
                                                   out_range=np.uint8)
                img_raw_d = img_d.tobytes()

                example = tf.train.Example(features=tf.train.Features(
                    feature={
                        'label': _int64_feature(index),
                        'image_raw': _bytes_feature(img_raw),
                        'image_raw_d': _bytes_feature(img_raw_d)
                    }))
                writer.write(example.SerializeToString())
    writer.close()

    txtfile = open(mapfile, 'w+')
    for key in class_map.keys():
        txtfile.writelines(str(key) + ":" + class_map[key] + "\n")
    txtfile.close()
Пример #9
0
def test_adjust_log():
    """Verifying the output with expected results for logarithmic
    correction with multiplier constant multiplier equal to unity"""
    image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
    expected = np.array([[  0,   5,  11,  16,  22,  27,  33,  38],
        [ 43,  48,  53,  58,  63,  68,  73,  77],
        [ 82,  86,  91,  95, 100, 104, 109, 113],
        [117, 121, 125, 129, 133, 137, 141, 145],
        [149, 153, 157, 160, 164, 168, 172, 175],
        [179, 182, 186, 189, 193, 196, 199, 203],
        [206, 209, 213, 216, 219, 222, 225, 228],
        [231, 234, 238, 241, 244, 246, 249, 252]], dtype=np.uint8)

    result = exposure.adjust_log(image, 1)
    assert_array_equal(result, expected)
Пример #10
0
def test_adjust_inv_log():
    """Verifying the output with expected results for inverse logarithmic
    correction with multiplier constant multiplier equal to unity"""
    image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
    expected = np.array([[  0,   2,   5,   8,  11,  14,  17,  20],
        [ 23,  26,  29,  32,  35,  38,  41,  45],
        [ 48,  51,  55,  58,  61,  65,  68,  72],
        [ 76,  79,  83,  87,  90,  94,  98, 102],
        [106, 110, 114, 118, 122, 126, 130, 134],
        [138, 143, 147, 151, 156, 160, 165, 170],
        [174, 179, 184, 188, 193, 198, 203, 208],
        [213, 218, 224, 229, 234, 239, 245, 250]], dtype=np.uint8)

    result = exposure.adjust_log(image, 1, True)
    assert_array_equal(result, expected)
Пример #11
0
def augmentations(image_array: ndarray):
    v_min, v_max = np.percentile(image_array, (0.2, 99.8))
    return (
        image_array,
        transform.rotate(image_array, random.uniform(-50, 50)),
        exposure.rescale_intensity(image_array, in_range=(v_min, v_max)),
        util.random_noise(image_array),
        ndimage.gaussian_filter(image_array, 2),
        exposure.adjust_log(image_array),
        exposure.adjust_sigmoid(image_array),
        #color.rgb2gray(image_array), (FOR COLORED IMAGES)
        #np.invert(image_array), (FOR COLORED IMAGES)
        exposure.adjust_gamma(image_array, gamma=0.4, gain=0.9),
        image_array[:, ::-1],
        image_array[::-1, :])
Пример #12
0
def adjust(frame, method, **kwargs):
    if method == "equalize":
        adjusted = exp.equalize_hist(frame, **kwargs)
    elif method == "gamma":
        adjusted = exp.adjust_gamma(frame, **kwargs)
    elif method == "log":
        adjusted = exp.adjust_log(frame, **kwargs)
    elif method == "sigmoid":
        adjusted = exp.adjust_sigmoid(frame, **kwargs)
    elif method == "adaptive":
        adjusted = exp.equalize_adapthist(frame, **kwargs)
    else:
        raise ValueError(
            "method can be equalize, gamma, log, sigmoid or adaptive")
    return adjusted
Пример #13
0
def test_adjust_inv_log():
    """Verifying the output with expected results for inverse logarithmic
    correction with multiplier constant multiplier equal to unity"""
    image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
    expected = np.array(
        [[0, 2, 5, 8, 11, 14, 17, 20], [23, 26, 29, 32, 35, 38, 41, 45],
         [48, 51, 55, 58, 61, 65, 68, 72], [76, 79, 83, 87, 90, 94, 98, 102],
         [106, 110, 114, 118, 122, 126, 130, 134],
         [138, 143, 147, 151, 156, 160, 165, 170],
         [174, 179, 184, 188, 193, 198, 203, 208],
         [213, 218, 224, 229, 234, 239, 245, 250]],
        dtype=np.uint8)

    result = exposure.adjust_log(image, 1, True)
    assert_array_equal(result, expected)
Пример #14
0
def detect_watershed_patches(A,
                             mask=0,
                             validation=False,
                             blobs=None,
                             label=''):
    A_copy = np.copy(A)
    A_copy[A_copy < mask] = 0
    A_log = adjust_log(A_copy)
    A_norm = rescale_intensity(A_log)
    print('Raster shape (%d,%d)' % A_norm.shape)
    if blobs is None:
        blobs = blob_dog(A_norm, max_sigma=30, threshold=.1)
        print('Detected %d blobs to seed watershed' % len(blobs))
    else:
        print('Seeding watershed with {len} predefined blobs'.format(
            len=len(blobs)))
    if validation:
        title = 'DetectBlobs' if label == '' else '{label} Blobs'.format(
            label=label)
        ax = plot(A_copy, title)
        for blob in blobs:
            y, x, sigma = blob
            # plot blobs as circles. NOTE: multiply 'r' by sqrt(2) because the blob_dog docs say that the
            # radius of each blob is approximately sqrt(2)*(std. deviation of that blob's Gaussian kernel)
            c = plt.Circle((x, y),
                           sqrt(2) * sigma,
                           color='k',
                           linewidth=1,
                           fill=False)
            ax.add_patch(c)
    x, y = np.indices(A_norm.shape)
    markers = np.zeros(A_norm.shape)
    idxs = range(len(blobs))
    random.shuffle(idxs)
    for i, blob in zip(idxs, blobs):
        xb, yb, rb = blob
        mask_circle = (x - xb)**2 + (y - yb)**2 < rb**2
        markers[mask_circle] = i + 1
    watershed_patches = watershed(1 - A_norm, markers, mask=A_copy)
    if validation:
        title = 'Watershed' if label == '' else 'Watershed {label}'.format(
            label=label)
        plt.figure(title, figsize=(8, 8))
        watershed_patches[watershed_patches < 1] = np.nan
        plt.imshow(watershed_patches, cmap='Paired')
        plt.tight_layout()
        plt.title(title)
    return watershed_patches, len(blobs)
Пример #15
0
def test_adjust_log():
    """Verifying the output with expected results for logarithmic
    correction with multiplier constant multiplier equal to unity"""
    image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
    expected = np.array(
        [[0, 5, 11, 16, 22, 27, 33, 38], [43, 48, 53, 58, 63, 68, 73, 77],
         [82, 86, 91, 95, 100, 104, 109, 113],
         [117, 121, 125, 129, 133, 137, 141, 145],
         [149, 153, 157, 160, 164, 168, 172, 175],
         [179, 182, 186, 189, 193, 196, 199, 203],
         [206, 209, 213, 216, 219, 222, 225, 228],
         [231, 234, 238, 241, 244, 246, 249, 252]],
        dtype=np.uint8)

    result = exposure.adjust_log(image, 1)
    assert_array_equal(result, expected)
Пример #16
0
def receive_blue_images(imgs):
    blue_imgs = []
    for img in imgs:
        h, w, t = img.shape
        temp = img.copy()
        temp = exposure.adjust_log(temp)
        mean = int(round(np.mean(temp)) * 1.1)
        for i in range(h):
            for j in range(w):
                if (temp[i][j][2] < temp[i][j][1]) or (temp[i][j][2] <
                                                       temp[i][j][0]):
                    temp[i][j][0] = mean
                    temp[i][j][1] = mean
                    temp[i][j][2] = mean
        blue_imgs.append(temp)
    return blue_imgs
def adjust_log(img):
    image = exposure.adjust_log(img)  #对数调整
    plt.figure('adjust_gamma', figsize=(8, 8))

    plt.subplot(121)
    plt.title('origin image')
    plt.imshow(img, plt.cm.gray)
    plt.axis('off')

    plt.subplot(122)
    plt.title('log')
    plt.imshow(image, plt.cm.gray)
    plt.axis('off')

    plt.show()
    return face_detection(image)
Пример #18
0
def image_transformation(X, method_type='blur', **kwargs):
    # https://www.kaggle.com/tomahim/image-manipulation-augmentation-with-skimage
    q = kwargs['percentile'] if 'percentile' in kwargs else (0.2, 99.8)
    angle = kwargs['angle'] if 'angle' in kwargs else 60
    transformation_dict = {
        'blur': normalize(ndimage.uniform_filter(X)),
        'invert': normalize(util.invert(X)),
        'rotate': rotate(X, angle=angle),
        'rescale_intensity': _rescale_intensity(X, q=q),
        'gamma_correction': exposure.adjust_gamma(X, gamma=0.4, gain=0.9),
        'log_correction': exposure.adjust_log(X),
        'sigmoid_correction': exposure.adjust_sigmoid(X),
        'horizontal_flip': X[:, ::-1],
        'vertical_flip': X[::-1, :],
        'rgb2gray': skimage.color.rgb2gray(X)
    }
    return transformation_dict[method_type]
Пример #19
0
def _rgb(img, band=('RED','GREEN','BLUE')):
    gain = 3
    scale = 1E4
    
    #READ BANDS
    red = img.feature(band[0], dtype=np.float32)
    green = img.feature(band[1], dtype=np.float32)
    blue = img.feature(band[2], dtype=np.float32)    

    #PROCESS RGB
    RGB = np.stack( ( red, green, blue ), axis=2 ) 
    RGB = RGB/scale #normalize float values to [0;1]
    RGB = adjust_log(RGB,gain) # adjust gamma
    RGB[RGB>1] = 1 # clip saturated values
    RGB[RGB<0] = 0

    return RGB
Пример #20
0
def image_transformation(X, method_type='blur', **kwargs):
    # https://www.kaggle.com/tomahim/image-manipulation-augmentation-with-skimage
    q = kwargs['percentile'] if 'percentile' in kwargs else (0.2, 99.8)
    angle = kwargs['angle'] if 'angle' in kwargs else 60
    transformation_dict = {
        'blur': normalize(ndimage.uniform_filter(X)),
        'invert': normalize(util.invert(X)),
        'rotate': rotate(X, angle=angle),
        'rescale_intensity': _rescale_intensity(X, q=q),
        'gamma_correction': exposure.adjust_gamma(X, gamma=0.4, gain=0.9),
        'log_correction': exposure.adjust_log(X),
        'sigmoid_correction': exposure.adjust_sigmoid(X),
        'horizontal_flip': X[:, ::-1],
        'vertical_flip': X[::-1, :],
        'rgb2gray': skimage.color.rgb2gray(X)
    }
    return transformation_dict[method_type]
Пример #21
0
def m5():
    Image.MAX_IMAGE_PIXELS = None
    for fn in os.listdir("all"):
        if fn.endswith("TIF"):
            adj_fn = fn.split("_B")[1]
            if len(adj_fn) == 5:
                adj_fn = "0" + adj_fn
            adj_fn = "A_" + adj_fn[:-4]
            print(adj_fn)

            image = io.imread("all/" + fn)
            print(image.shape)

            if fn.endswith("8.TIF"):
                pass
                region = image[3500 * 2:3900 * 2, 800 * 2:1200 * 2]
            else:
                region = image[3500:3900, 800:1200]
                region = transform.resize(region, (800, 800),
                                          anti_aliasing=False)

            io.imsave("edited/" + adj_fn + ".png", region)

            exp = exposure.equalize_adapthist(region)
            io.imsave("edited/" + adj_fn + "adj_adaphist.png", exp)

            region = exposure.equalize_hist(region)
            io.imsave("edited/" + adj_fn + "adj_hist.png", region)

            exp = exposure.adjust_sigmoid(region)
            io.imsave("edited/" + adj_fn + "adj_sig.png", exp)
            exp = exposure.adjust_gamma(region)
            io.imsave("edited/" + adj_fn + "adj_gam.png", exp)
            exp = exposure.adjust_log(region)
            io.imsave("edited/" + adj_fn + "adj_log.png", exp)
            # exp = exposure.equalize_hist(region)
            # io.imsave("edited/"+adj_fn +"adj_hist.png", exp)

            #roberts, sobel, scharr, prewitt
            edges = filters.roberts(exp)
            io.imsave("edited/" + adj_fn + "f_rob.png", edges)

            edges = filters.sobel(exp)
            io.imsave("edited/" + adj_fn + "f_sob.png", edges)
Пример #22
0
def create_mask_pass1(img):
    """
    This is the first pass in creating a mask. The skimage.exposure is used as it helps to get rid
    of the glue around the tissue. This also calls the compute_mask method which is part
    of the nipy package which you will need to install from github
    :param img: the raw image
    :return:  the 1st pass of the image
    """
    img = exposure.adjust_log(img, 1)
    img = exposure.adjust_gamma(img, 2)

    mask = compute_mask(img, m=0.2, M=0.9, cc=False, opening=2, exclude_zeros=True)
    mask = mask.astype(int)
    mask[mask==0] = 0
    mask[mask==1] = 255
    kernel = np.ones((5, 5), np.uint8)
    mask = cv2.dilate(mask.astype(np.uint8), kernel, iterations=2)
    mask = mask.astype(np.uint8)
    return mask
Пример #23
0
    def standardize(self, x):
        if self.rescale:
            x *= self.rescale
        # x is a single image, so it doesn't have image number at index 0
        img_channel_index = self.channel_index - 1
        grayscale = x.shape[img_channel_index] == 1
        if self.samplewise_center:
            if grayscale:
                raise ValueError(
                    'samplewise_center on a grey image does not make sense')
            x -= np.mean(x, axis=img_channel_index, keepdims=True)
        if self.samplewise_std_normalization:
            x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)

        if self.featurewise_center:
            x -= self.mean
        if self.featurewise_std_normalization:
            x /= (self.std + 1e-7)

        if self.zca_whitening:
            flatx = np.reshape(x, (x.size))
            whitex = np.dot(flatx, self.principal_components)
            x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
        # Add our custome stuff
        # vgg prerpocessing
        if self.bmodel_preprocessing is not None:
            module = getattr(bmodel, self.bmodel_preprocessing.lower())
            s = x.shape
            x = np.expand_dims(x, axis=0)
            x = getattr(module, 'preprocess_input')(x)
            x = x.reshape(s)

        # add_channel to gray images
        if x.shape[img_channel_index] == 1:
            x = x.transpose(2, 0, 1)
            if self.add_channel:
                log = exposure.adjust_log(x)
                hist = exposure.equalize_adapthist(
                    x[0].astype('int16'))[np.newaxis]
                x = np.vstack((log, hist, x)).transpose(1, 2, 0)
            else:
                x = np.vstack(([x] * 3)).transpose(1, 2, 0)
        return x
Пример #24
0
def process_image(image, list_processing_method, actions):
    """
    Function process_image allows user to conduct the following
    image processing methods.
    Author: Haitong Wang
    Date: Dec, 7th, 2018
    Version: 1.0.0

    :param image: 2d grayscale array
    :param list_processing_method:
    :param actions: array of integer indicates the number of
    processing actions
    :return: output image/processed image, actions, size of
    the image

    """
    output = image
    for n in list_processing_method:
        if n is "HE":
            output = exposure.equalize_hist(output)
            output = exposure.rescale_intensity(output, out_range=(0, 255))
            actions[0] += 1
        elif n is "CS":
            p5, p95 = np.percentile(output, (5, 95))
            output = exposure.rescale_intensity(output, in_range=(p5, p95))
            actions[1] += 1
        elif n is "LC":
            output = exposure.adjust_log(output, 1)
            output = exposure.rescale_intensity(output, out_range=(0, 255))
            actions[2] += 1
        elif n is "RV":
            output = util.invert(output)
            output = exposure.rescale_intensity(output, out_range=(0, 255))
            actions[3] += 1
        elif n is "GC":
            output = exposure.adjust_gamma(output, 2)
            output = exposure.rescale_intensity(output, out_range=(0, 255))
            actions[4] += 1

    size = image.shape

    # add another feature for the phase 2
    return output, actions, size
def image_augmentation(original_image, i, j):
    aug_list = []

    image_rescaled = rescale(original_image, 1.0 / 4.0)
    cv2.imwrite(path2 + str(i) + str(j) + ".jpg", image_rescaled)

    image_with_random_noise = random_noise(original_image)
    cv2.imwrite(path2 + str(i) + str(j) + ".jpg", image_with_random_noise)

    gray_scale_image = rgb2gray(original_image)
    aug_list.append(gray_scale_image)

    color_inversion_image = util.invert(original_image)
    aug_list.append(color_inversion_image)

    image_with_rotation = rotate(original_image, 45)
    aug_list.append(image_with_rotation)

    v_min, v_max = np.percentile(original_image, (0.2, 99.8))
    better_contrast = exposure.rescale_intensity(original_image,
                                                 in_range=(v_min, v_max))
    aug_list.append(better_contrast)

    #adjusted_gamma_image = exposure.adjust_gamma(original_image, gamma=0.4, gain=0.9)
    #aug_list.append(adjust_gamma)

    log_correction_image = exposure.adjust_log(original_image)
    aug_list.append(log_correction_image)

    sigmoid_correction_image = exposure.adjust_sigmoid(original_image)
    aug_list.append(sigmoid_correction_image)

    horizontal_flip = original_image[:, ::-1]
    aug_list.append(horizontal_flip)

    vertical_flip = original_image[::-1, :]
    aug_list.append(vertical_flip)

    blured_image = ndimage.uniform_filter(original_image, size=(11, 11, 1))
    aug_list.append(blured_image)

    return aug_list
Пример #26
0
def main(image):
    img = image

    # Gamma
    gamma_corrected = exposure.adjust_gamma(img, 2)

    # Logarithmic
    logarithmic_corrected = exposure.adjust_log(img, 1)

    # Display results
    fig = plt.figure(figsize=(8, 5))
    axes = np.zeros((2, 3), dtype=np.object)
    axes[0, 0] = plt.subplot(2, 3, 1)
    axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0])
    axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0])
    axes[1, 0] = plt.subplot(2, 3, 4)
    axes[1, 1] = plt.subplot(2, 3, 5)
    axes[1, 2] = plt.subplot(2, 3, 6)

    ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
    ax_img.set_title('Low contrast image')

    y_min, y_max = ax_hist.get_ylim()
    ax_hist.set_ylabel('Number of pixels')
    ax_hist.set_yticks(np.linspace(0, y_max, 5))

    ax_img, ax_hist, ax_cdf = plot_img_and_hist(gamma_corrected, axes[:, 1])
    ax_img.set_title('Gamma correction')

    ax_img, ax_hist, ax_cdf = plot_img_and_hist(logarithmic_corrected, axes[:,
                                                                            2])
    ax_img.set_title('Logarithmic correction')

    ax_cdf.set_ylabel('Fraction of total intensity')
    ax_cdf.set_yticks(np.linspace(0, 1, 5))

    # prevent overlap of y-axis labels
    fig.tight_layout()

    return plt, gamma_corrected, logarithmic_corrected
Пример #27
0
def getmask(rgb, fmedian=True):
    """Get eye mask"""
    if (len(rgb.shape) > 3):
        rgb = rgb[0, ...]
    # Convert to Grayscale
    gray = rgb2gray(rgb)

    # Adjust Contrast
    gray = gray - np.min(gray)
    gray = gray / np.max(gray)
    # Contrast Enhancement
    gray = adjust_log(gray, 5)
    # Apply median filter to remove outliers
    if fmedian:
        gray = medfilt(gray, 5)

    # Threshold
    mask = np.zeros(gray.shape)
    mask[np.where(gray > np.min(gray))] = 1
    mask = np.asarray(mask)

    return mask
Пример #28
0
def Contrast(filein, fileout):
    image = cv2.imread(filein, 0)
    gam1 = exposure.adjust_log(image)  # 对数调整
    count = 1

    for i in range(8, 15):
        if i == 10:
            break
        gam = exposure.adjust_gamma(image, 0.1 * i)
        cv2.imwrite(fileout + '_con' + str(count) + '.tif', gam)
        count += 1

    for i in range(4, 6):
        for j in range(6, 10):
            if i == 5 and j == 5:
                break
            img = np.array(image)
            mean = np.mean(img)
            img = img - mean
            img = img * 0.2 * j + mean * 0.2 * i  # 修对比度和亮度
            cv2.imwrite(fileout + '_con' + str(count) + '.tif', img)
            count += 1
Пример #29
0
 def sPipe(image):
     print "Reading Image"
     t0 = time.time()
     retina = readImage(image)
     t1 = time.time()
     print("Done took %.2f seconds" % (t1 - t0))
     print 'Converting to Grayscale with luminance preservation'
     t0 = time.time()
     retina_gray = convertImage(retina)
     t1 = time.time()
     print("Done took %.2f seconds" % (t1 - t0))
     print 'Cropping image from background'
     t0 = time.time()
     retina_crop = cropImage(retina_gray, 0.05)
     t1 = time.time()
     print("Done took %.2f seconds" % (t1 - t0))
     print 'Logarithmic Correction'
     t0 = time.time()
     retina_adjust = exposure.adjust_log(retina_crop)
     t1 = time.time()
     print("Done took %.2f seconds" % (t1 - t0))
     viewImage(retina_adjust)
     return retina_adjust
Пример #30
0
def process_imgs_with_method(list_of_decoded_imgs, procedure):
    """Performs the image processing procedure on a list of images

    Args:
        list_of_decoded_imgs (list): all images from the request in matrix form
        procedure (str): specifies which procedure to perform

    Returns:
        list: all images after applying the procedure
    """
    list_of_processed_imgs = []
    for before_filtering in list_of_decoded_imgs:
        if procedure == 'histogram_eq':
            processed = exposure.equalize_hist(before_filtering)
        elif procedure == 'contrast_str':
            p2, p98 = np.percentile(before_filtering, (2, 98))
            processed = exposure.rescale_intensity(before_filtering,
                                                   in_range=(p2, p98))
        elif procedure == 'log_compress':
            processed = exposure.adjust_log(before_filtering)
        else:
            processed = invert(before_filtering)
        list_of_processed_imgs.append(processed)
    return list_of_processed_imgs
Пример #31
0
def zbar_decode(img):
    from pyzbar.pyzbar import decode as pyzbar_decode
    #img = img.mean(axis=2)
    res=pyzbar_decode(img)

    if len(res)==0:
        if len(img.shape)>2:
            imgg = img.mean(axis=2)
            res=pyzbar_decode(imgg)
        if len(res)==0:
            imgb = gaussian(img,1,preserve_range=True)
            res=pyzbar_decode(imgb)
            if len(res)==0:
                imgc = adjust_log(img)
                res=pyzbar_decode(imgc)
            #if len(res)==0:
            #    imgs = unsharp_mask(img,preserve_range=True)
            #    res=pyzbar_decode(imgs)
    if len(res)==0:
        return None
    elif len(res)==1:
        return res[0].data.decode("utf-8")
    else:
        return [r.data.decode("utf-8") for r in res]
Пример #32
0
def area(x, y):
    AREA = label(x, background=0)
    #print(AREA)
    plt.imsave(str(m) + y + 'Area.tif', numpy.array(AREA))
    properties = regionprops(AREA)
    proparea = [p.area for p in properties]
    #centre mean of image
    proparea2 = [p.centroid for p in properties]
    #co-ordinates
    proparea3 = [p.coords for p in properties]
    areatype = img_as_ubyte(x)
    areatype = grey2rgb(areatype)
    areatype = adjust_log(areatype, gain=50)
    for a in proparea2:
        #print(a[0])
        #print(a[1])
        rr, cc = circle(a[0], a[1], 1)
        areatype[rr, cc] = (0, 255, 255)
    proparea = DataFrame(proparea, columns=['Area'])
    proparea2 = DataFrame(proparea2, columns=['row - y', 'column - x'])
    merged = concat([proparea, proparea2], axis=1)
    plt.imsave(str(m) + y + '_arearegions.tif', numpy.array(areatype))
    merged.to_csv(str(m) + y + '_proparea.csv')
    print("Calculated area saved!")
Пример #33
0
def gen_log(image):
    images_log, images_otsu, images_medfilt = [], [], []

    start_log = 0.006
    step_log = 0.001

    for i in range(11):
        p = start_log + i * step_log

        im_log = ex.adjust_log(image, p)

        thresh = threshold_otsu(im_log)
        im_log_otsu = (im_log > thresh) * 1
        im_log_medfilt = medfilt(im_log)

        im_log = normalize(im_log)
        im_log_otsu = normalize(im_log_otsu)
        im_log_medfilt = normalize(im_log_medfilt)

        images_log.append(im_log.astype(int))
        images_otsu.append(im_log_otsu.astype(int))
        images_medfilt.append(im_log_medfilt.astype(int))

    return images_log, images_otsu, images_medfilt
Пример #34
0
def extract_cbf(cbf_img, seed, brain_mask):
    '''
    Input:
        1.the cbf image getting from Ais-system
        2.seed is the location of maximal lesion value in irf image
        3.brain_mask is the main matter of brain
    Output:
        lesion core extracted from cbf image

    Describe:
        1. Enhence the contrast ratio of Input Image using Clahe.
        2. Calculate the power image at x and y directions, and conbine together.
        3. Enhencing the contrast ratio by expanding distribution between [0, 1] using log correction function, a * log(1 + x).
        4. Split the power image into two parts, basin and ridge, using otsu or cluster.
        5. Locating the target basin area through seed location, the ridge around target basin is contour of lesion in cbf image.
        6. Mapping pixel location into distance from seed location.
        7. Spliting brain into three parts, drop off the part farthest away from target area and select the part closest to seed location.

    '''
    h, w = cbf_img.shape
    x, y = seed
    aeh_img = exposure.equalize_adapthist(cbf_img / 255, kernel_size=None, clip_limit=0.01, nbins=256)
    sobel_x = cv2.Sobel(aeh_img, cv2.CV_64F, 1, 0, ksize=3)
    sobel_y = cv2.Sobel(aeh_img, cv2.CV_64F, 0, 1, ksize=3)
    sobel_xy = np.sqrt(sobel_x ** 2 + sobel_y ** 2)

    brain_mask = sys.brain_mask
    log_img = exposure.adjust_log(sobel_xy)
    otsu = filters.threshold_otsu(log_img[brain_mask != 0])
    log_img[log_img < otsu] = 0

    def distance(x1, y1, x2, y2):
        return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)

    dist_map = np.zeros((h, w))
    for row in range(h):
        for col in range(w):
            if log_img[row, col] == 0:
                continue
            dist_map[row, col] = distance(x, y, col, row)

    maxV = dist_map.max()
    minV = dist_map.min()
    dist_map[dist_map != 0] = maxV - dist_map[dist_map != 0]  # pixel inversion

    dist_map = ((dist_map - minV) / (maxV - minV)) * 255
    res = dist_map.copy()

    maxV = dist_map.max()
    minV = dist_map[dist_map != 0].min()

    middle = maxV - 1 * (maxV - minV) // 3
    print(maxV, minV, middle)

    otsu = filters.threshold_otsu(dist_map[dist_map > middle])
    dist_map[dist_map < otsu] = 0

    #     kernel = np.ones((2,2), 'uint8')
    #     lesion_dilation = cv2.dilate(dist_map, kernel, iterations = 1)
    #     lesion_erode = cv2.erode(lesion_dilation, kernel, iterations = 1)
    #     lesion_dilation = cv2.dilate(lesion_erode, kernel, iterations = 1)

    ret, mask = cv2.threshold(dist_map, 127, 255, 0)
    mask = cv2.drawContours(mask, sys.mask_contours, 0, (255, 0, 0), 1)
    #     cv2.rectangle(mask, (max(0, x - 2), max(y - 2, 0)),
    #                               (min(256, x + 2),min(256, y + 2)), (227,23,13), 1)

    plt.figure(figsize=(15, 7))
    plt.subplot(1, 5, 1)
    plt.imshow(cbf_img)
    plt.title('Input Image')
    plt.subplot(1, 5, 2)
    plt.imshow(sobel_xy)
    plt.title('Sobel Power Image')
    plt.subplot(1, 5, 3)
    plt.imshow(log_img)
    plt.title('Enhance Image with Log')
    plt.subplot(1, 5, 4)
    plt.imshow(res)
    plt.title('Distance Image')
    plt.subplot(1, 5, 5)
    plt.imshow(mask)
    plt.title('Output Image')
    plt.show()

    return dist_map
Пример #35
0
plt.imshow(sobel_xx + sobel_yy)
plt.title('xx + yy')
plt.subplot(2, 5, 9)
plt.imshow(lap)
plt.title('Laplacian')
plt.show()

# In[224]:


img = sobel_2xy
plt.figure(figsize=(15, 7))
plt.subplot(1, 2, 1)
plt.imshow(img)
plt.subplot(1, 2, 2)
plt.imshow(exposure.adjust_log(img))

# In[225]:


brain_mask = sys.brain_mask
log_img = exposure.adjust_log(img)
otsu = filters.threshold_otsu(log_img[brain_mask != 0])
otsu

# In[226]:


# img = res.copy()
log_img[img < otsu] = 0
plt.imshow(log_img)
Пример #36
0
def log_transform(img, gain=1):
    return exposure.adjust_log(img, gain)
Пример #37
0
def adjust_brightness_gamma_log(sink):
    sink = exposure.adjust_gamma(sink, 3, 1)
    sink = exposure.adjust_log(sink, 2)
    return sink
Пример #38
0
    # Display cumulative distribution
    img_cdf, bins = exposure.cumulative_distribution(image, bins)
    ax_cdf.plot(bins, img_cdf, 'r')
    ax_cdf.set_yticks([])

    return ax_img, ax_hist, ax_cdf


# Load an example image
img = data.moon()

# Gamma
gamma_corrected = exposure.adjust_gamma(img, 2)

# Logarithmic
logarithmic_corrected = exposure.adjust_log(img, 1)

# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2, 3), dtype=np.object)
axes[0, 0] = plt.subplot(2, 3, 1)
axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0])
axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0])
axes[1, 0] = plt.subplot(2, 3, 4)
axes[1, 1] = plt.subplot(2, 3, 5)
axes[1, 2] = plt.subplot(2, 3, 6)

ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')

y_min, y_max = ax_hist.get_ylim()