Exemplo n.º 1
0
def test_filter():
    china = load_sample_image("china.jpg")
    flower = load_sample_image("flower.jpg")
    dataset = np.array([china, flower], dtype=np.float32)
    batch_size, height, width, channels = dataset.shape

    filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
    filters[:, 3, :, 0] = 1  # vertical line
    filters[3, :, :, 1] = 1  # horizontal line

    X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
    #conv = tf.layers.conv2d(X, filters=2, kernel_size=7, strides=[2,2], padding='SAME')
    conv = tf.nn.conv2d(X, filters, strides=[1, 2, 2, 1], padding='SAME')
    max_pool = tf.nn.avg_pool(conv,
                              ksize=[1, 4, 4, 1],
                              strides=[1, 4, 4, 1],
                              padding="VALID")
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
        output = sess.run(max_pool, feed_dict={X: dataset})

    print(output.shape)
    plt.imshow(output[0, :, :, 0], cmap="gray")
    plt.show()
Exemplo n.º 2
0
def pooling_layer():
    china = load_sample_image("china.jpg")
    flower = load_sample_image("flower.jpg")
    dataset = np.array([china, flower], dtype=np.float32)
    batch_size, height, width, channels = dataset.shape

    filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
    filters[:, 3, :, 0] = 1  # vertical line
    filters[3, :, :, 1] = 1  # horizontal line
    X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
    max_pool = tf.nn.max_pool(X,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding="VALID")

    with tf.Session() as sess:
        output = sess.run(max_pool, feed_dict={X: dataset})

    plt.imshow(output[0].astype(np.uint8))  # plot the output for the 1st image
    plot_color_image(dataset[0])
    plt.savefig(PNG_PATH + "china_original2", dpi=300)
    plt.close()
    plot_color_image(output[0])
    plt.savefig(PNG_PATH + "china_max_pool", dpi=300)
    plt.close()
Exemplo n.º 3
0
def simple_example():
    # Load sample images
    china = load_sample_image("china.jpg")
    flower = load_sample_image("flower.jpg")
    dataset = np.array([china, flower], dtype=np.float32)
    batch_size, height, width, channels = dataset.shape

    # Create 2 filters
    filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
    filters[:, 3, :, 0] = 1  # vertical line
    filters[3, :, :, 1] = 1  # horizontal line

    # Create a graph with input X plus a convolutional layer applying the 2 filters
    X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
    convolution = tf.nn.conv2d(X,
                               filters,
                               strides=[1, 2, 2, 1],
                               padding="SAME")

    with tf.Session() as sess:
        output = sess.run(convolution, feed_dict={X: dataset})

    plt.imshow(output[0, :, :, 1],
               cmap="gray")  # plot 1st image's 2nd feature map
    plt.show()

    for image_index in (0, 1):
        for feature_map_index in (0, 1):
            plot_image(output[image_index, :, :, feature_map_index])
            plt.savefig(PNG_PATH + "conv_imgs" + str(image_index) +
                        str(feature_map_index),
                        dpi=300)
            plt.close()

    # Using tf.layers.conv2d():
    reset_graph()
    X = tf.placeholder(shape=(None, height, width, channels), dtype=tf.float32)
    conv = tf.layers.conv2d(X,
                            filters=2,
                            kernel_size=7,
                            strides=[2, 2],
                            padding="SAME")
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        init.run()
        output = sess.run(conv, feed_dict={X: dataset})

    plt.imshow(output[0, :, :, 1],
               cmap="gray")  # plot 1st image's 2nd feature map
    plt.savefig(PNG_PATH + "conv2d", dpi=300)
    plt.close()
Exemplo n.º 4
0
def basic_operations_with_images(image_path):
    '''
    Basic operations with images: image opening and writing, resizing, grayscaling,
    rotating,croping,putting text

    Parameters
    ----------
    image_path : str
        Path to image file

    Returns
    -------
    None.

    '''
    if image_path != 'flower.jpg':
        image = cv2.imread(image_path,cv2.IMREAD_COLOR)
        image_mpl = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
    else:
        image_mpl = load_sample_image(image_path)
    #show original image
    #show_image_with_matplotlib(image_mpl)
    # create grayscale image from original matplotlib image
    grayscale_image = cv2.cvtColor(image_mpl,cv2.COLOR_RGB2GRAY)
    cv2.imwrite("grayscale_image.jpg",grayscale_image)
    
    plot_image_histogram(grayscale_image)
    



    
    
    
    cv2.waitKey(0)
    def sample_images_datasets(self):
        """
            Sample images
            load_sample_images() 	Load sample images for image manipulation.
            load_sample_image(image_name) 	Load the numpy array of a single sample image
        """

        logging.debug('--------------- Sample images ---------')

        images = datasets.load_sample_images()

        print(images)

        print("filenames >>>>>>>>>> \n",images.filenames)

        chinaImage = datasets.load_sample_image("china.jpg")
        print(chinaImage)

        '''
        
        
        Warning
        
        The default coding of images is based on the uint8 dtype to spare memory. Often machine learning algorithms work best if the input is converted to a floating point representation first. Also, if you plan to use matplotlib.pyplpt.imshow don’t forget to scale to the range 0 - 1 as done in the following example. 
        '''

        # Convert to floats instead of the default 8 bits integer coding. Dividing by
        # 255 is important so that plt.imshow behaves works well on float data (need to
        # be in the range [0-1])
        china = np.array(chinaImage, dtype=np.float64) / 255
Exemplo n.º 6
0
def test_3():
    from sklearn.datasets import load_sample_image
    from sklearn.cluster import KMeans
    china = load_sample_image("china.jpg")
    plt.figure('china')
    plt.imshow(china)
    plt.grid(False)
    plt.show()
    print(china.shape)

    X = (china / 255.0).reshape(-1, 3)
    print(X.shape)

    # reduce the size of the image for speed
    image = china[::3, ::3]
    n_colors = 64

    X = (image / 255.0).reshape(-1, 3)

    model = KMeans(n_colors)
    labels = model.fit_predict(X)
    colors = model.cluster_centers_
    new_image = colors[labels].reshape(image.shape)
    new_image = (255 * new_image).astype(np.uint8)

    # create and plot the new image
    plt.figure()
    plt.imshow(image)
    plt.title('input')

    plt.figure()
    plt.imshow(new_image)
    plt.title('{0} colors'.format(n_colors))
def test_load_sample_image():
    try:
        china = load_sample_image("china.jpg")
        assert china.dtype == "uint8"
        assert china.shape == (427, 640, 3)
    except ImportError:
        warnings.warn("Could not load sample images, PIL is not available.")
Exemplo n.º 8
0
def test_load_sample_image():
    try:
        china = load_sample_image('china.jpg')
        assert_equal(china.dtype, 'uint8')
        assert_equal(china.shape, (427, 640, 3))
    except ImportError:
        warnings.warn("Could not load sample images, PIL is not available.")
Exemplo n.º 9
0
def test_load_sample_image():
    try:
        china = load_sample_image('china.jpg')
        assert_equal(china.dtype, 'uint8')
        assert_equal(china.shape, (427, 640, 3))
    except ImportError:
        warnings.warn("Could not load sample images, PIL is not available.")
Exemplo n.º 10
0
def _china_dataset(n_samples=None, dtype=np.float32):
    img = load_sample_image('china.jpg')
    X = np.array(img, dtype=dtype) / 255
    X = X.reshape((-1, 3))[:n_samples]

    X, X_val = train_test_split(X, test_size=0.1, random_state=0)
    return X, X_val, None, None
Exemplo n.º 11
0
def split_and_merge_channels(image_path):
    '''
    Splitting and merging channels of an image
    '''
    image_mpl = load_sample_image(image_path)
    hsv_image = cv2.cvtColor(image_mpl,cv2.COLOR_RGB2HSV)
    (h,s,v) = cv2.split(hsv_image)
    hsv_image = cv2.merge((h,s,v))
Exemplo n.º 12
0
    def _preprocess_data(self, image_name: str) -> None:
        """Initialize data.

        * Convert images to tensor.
        * Modify color distribution.

        Args:
            image_name (str): Background image name for coloring.
        """

        # Transform for MNIST image
        _transform = transforms.Compose(
            [transforms.Resize(64), transforms.ToTensor()])

        # Transform for background image
        _transform_background = transforms.Compose(
            [transforms.RandomCrop(64), transforms.ToTensor()])

        # Load background image if necessary
        if self.color:
            background_image = Image.fromarray(load_sample_image(image_name))

        # Convert images to tensor
        data_list = []
        for img in self.data:
            # Image to tensor
            img = Image.fromarray(img.numpy(), mode="L")
            img = _transform(img)

            # Convert channel dim to RGB: (3, h, w)
            img = img.repeat(3, 1, 1)

            # Modify color distribution of images
            if self.color:
                # Binarize image
                img[img >= 0.5] = 1.0
                img[img < 0.5] = 0.0

                # Random crop of background image
                color_img = _transform_background(background_image)

                # Randomly alter color distribution
                color_img = (color_img + torch.rand(3, 1, 1)) / 2

                # Invert color of pixels at number
                color_img[img == 1] = 1 - color_img[img == 1]
                img = color_img

            # Add to data list
            data_list.append(img)

        # Conver list to tensor: (b, h, w)
        self.data = torch.stack(data_list)
Exemplo n.º 13
0
def get_flower(native=False, reduced=False):
    flower = load_sample_image('flower.jpg')
    if native:
        return flower
    if reduced:
        image = np.array(flower, dtype=np.float64) / 255.
        w, h, d = image.shape
        arr = np.reshape(image, (w * h, d))

        sample = shuffle(arr, random_state=0)[:1000]
        km = KMeans(n_clusters=8, random_state=0).fit(sample)
        labels = km.predict(arr)
        return km, labels, w, h
Exemplo n.º 14
0
def convolutional_layer():
    pdb.set_trace()
    china = load_sample_image("china.jpg")
    flower = load_sample_image("flower.jpg")
    image = china[150:220, 130:250]
    height, width, channels = image.shape
    image_grayscale = image.mean(axis=2).astype(np.float32)
    images = image_grayscale.reshape(1, height, width, 1)
    fmap = np.zeros(shape=(7, 7, 1, 2), dtype=np.float32)
    fmap[:, 3, 0, 0] = 1
    fmap[3, :, 0, 1] = 1
    plot_image(fmap[:, :, 0, 0])
    plt.savefig(PNG_PATH + "vertical", dpi=300)
    plt.close()
    plot_image(fmap[:, :, 0, 1])
    plt.savefig(PNG_PATH + "horizontal", dpi=300)
    plt.close()

    reset_graph()
    X = tf.placeholder(tf.float32, shape=(None, height, width, 1))
    feature_maps = tf.constant(fmap)
    convolution = tf.nn.conv2d(X,
                               feature_maps,
                               strides=[1, 1, 1, 1],
                               padding="SAME")

    with tf.Session() as sess:
        output = convolution.eval(feed_dict={X: images})
    plot_image(images[0, :, :, 0])
    plt.savefig(PNG_PATH + "china_original", dpi=300)
    plt.close()

    plot_image(output[0, :, :, 0])
    plt.savefig(PNG_PATH + "china_vertical", dpi=300)
    plt.close()

    plot_image(output[0, :, :, 1])
    plt.savefig(PNG_PATH + "china_horizontal", dpi=300)
    plt.close()
Exemplo n.º 15
0
def example3():
    flower = datasets.load_sample_image('flower.jpg')
    # can be any picture with high resulotion image
    # ax = plt.axes(xticks = [], yticks = [])
    # ax.imshow(flower)
    # print(flower.shape) # (length pixel, width pixel, n_dimension)

    data = flower / 255  # reshape 0 - 255, between 0 and 1
    data = data.reshape(427 * 640, 3)

    # print(data.shpae) # (273200, 3)

    def plot_pixels(data, title, colors=None, N=10000):
        if colors is None:
            colors = data

        # choose a random subset
        rng = np.random.RandomState(0)
        i = rng.permutation(data.shape[0])[:N]

        # permutation method:
        # np.random.permutation(): 隨機排列
        # Ex. np.random.permutation([i for i in range(10)])

        colors = colors[i]
        R, G, B = data[i].T
        fig, ax = plt.subplots(1, 2, figsize=(16, 6))
        ax[0].scatter(R, G, color=colors, marker='.')
        ax[0].set(xlabel='Red', ylabel='Green', xlim=(0, 1), ylim=(0, 1))

        ax[1].scatter(R, B, color=colors, marker='.')
        ax[1].set(xlabel="Red", ylabel="Blue", xlim=(0, 1), ylim=(0, 1))

        fig.suptitle(title, size=20)

    # plot_pixels(data, title = "Input color space: 16 million possible colors")
    from sklearn.cluster import MiniBatchKMeans
    import warnings
    warnings.simplefilter("ignore")  # Fix numpy issue
    kmeans = MiniBatchKMeans(16)
    kmeans.fit(data)
    new_colors = kmeans.cluster_centers_[kmeans.predict(data)]

    plot_pixels(data,
                colors=new_colors,
                title="Reduced color space: 16 colors")

    plt.show()
Exemplo n.º 16
0
def reflect_sample():
    china = load_sample_image('china.jpg')

    f, axes = plt.subplots(1, 2)
    axes[0].set_title('the original china image.')
    axes[0].imshow(china)

    n_row, n_col, n_dim = china.shape
    m_reflect = np.zeros((n_row, n_row), dtype=int)

    for i in range(n_row):
        m_reflect[i, n_row - 1 - i] = 1

    new_china = np.stack(tuple(m_reflect.dot(china[:, :, d]) for d in range(n_dim)), axis=-1)
    axes[1].set_title('the reflected china image.')
    axes[1].imshow(new_china)

    plt.show()
Exemplo n.º 17
0
def invert_sample():
    china = load_sample_image('china.jpg')

    f, axes = plt.subplots(1, 2)
    axes[0].set_title('the original china image.')
    axes[0].imshow(china)

    n_row, n_col, n_dim = china.shape
    m_flip = np.zeros((n_col, n_col), dtype=int)

    for i in range(n_col):
        m_flip[i, n_col - 1 - i] = 1

    new_china = np.stack(tuple(m_flip.dot(china[:, :, d].T).T for d in range(n_dim)), axis=-1)
    axes[1].set_title('the inverted china image.')
    axes[1].imshow(new_china)

    plt.show()
Exemplo n.º 18
0
def contour_finding():
    '''
    Procedure demonstrates contour detection techniques
    '''
    # finding the contours
    
    image = load_sample_image('flower.jpg')
    image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
    image_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    _,thresh = cv2.threshold(image_gray,127,255,cv2.THRESH_BINARY)
    contour_image = thresh.copy()
    contours,hierarchy = cv2.findContours(contour_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.imshow("Contour image",contour_image)
    cv2.imshow("Thresholded image",thresh)
    print("Number of contours found: {}".format(len(contours)))
    # draw all contours
    canvas = np.ones(thresh.shape,dtype = np.uint8)
    img = cv2.drawContours(canvas.copy(),contours,-1,(255,255,255),3)
    cv2.imshow("All contours of the image",img)
    # draw individual contour
    cnt = contours[5]
    img = cv2.drawContours(canvas.copy(),[cnt],0,(255,255,255),3)
    cv2.imshow("Specified contours of the image",img)
Exemplo n.º 19
0
def color_quantization(n_colors=64, file_path=''):
    # Load the Summer Palace photo
    image = None
    if (len(file_path) > 0) and (os.path.isfile(file_path)):
        image = mpimg.imread(file_path)
    if image is None:
        image = load_sample_image("china.jpg")
    # Convert to floats instead of the default 8 bits integer coding. Dividing by
    # 255 is important so that plt.imshow behaves works well on float data (need to
    # be in the range [0-1]
    image = np.array(image, dtype=np.float64) / 255

    # Load Image and transform to a 2D numpy array.
    w, h, d = original_shape = tuple(image.shape)
    assert d == 3
    image_array = np.reshape(image, (w * h, d))
    print("Fitting model on a small sub-sample of the data")
    t0 = time()
    image_array_sample = shuffle(image_array, random_state=0)[:1000]
    kmeans = KMeans(n_clusters=n_colors,
                    random_state=0).fit(image_array_sample)
    print("done in %0.3fs." % (time() - t0))

    # Get labels for all points
    print("Predicting color indices on the full image (k-means)")
    t0 = time()
    labels = kmeans.predict(image_array)
    print("done in %0.3fs." % (time() - t0))

    codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
    print("Predicting color indices on the full image (random)")
    t0 = time()
    labels_random = pairwise_distances_argmin(codebook_random,
                                              image_array,
                                              axis=0)
    print("done in %0.3fs." % (time() - t0))
    return [kmeans, image, labels, codebook_random, labels_random, w, h]
def plot_color_quantization():
    n_colors = 64

    # Load the Summer Palace photo
    china = load_sample_image("china.jpg")

    # Convert to floats instead of the default 8 bits integer coding. Dividing by
    # 255 is important so that plt.imshow behaves works well on float data (need to
    # be in the range [0-1])
    china = np.array(china, dtype=np.float64) / 255

    # Load Image and transform to a 2D numpy array.
    w, h, d = original_shape = tuple(china.shape)
    assert d == 3
    image_array = np.reshape(china, (w * h, d))

    print("Fitting model on a small sub-sample of the data")
    t0 = time()
    image_array_sample = shuffle(image_array, random_state=0)[:1000]
    kmeans = KMeans(n_clusters=n_colors,
                    random_state=0).fit(image_array_sample)
    print("done in %0.3fs." % (time() - t0))

    # Get labels for all points
    print("Predicting color indices on the full image (k-means)")
    t0 = time()
    labels = kmeans.predict(image_array)
    print("done in %0.3fs." % (time() - t0))

    codebook_random = shuffle(image_array, random_state=0)[:n_colors]
    print("Predicting color indices on the full image (random)")
    t0 = time()
    labels_random = pairwise_distances_argmin(codebook_random,
                                              image_array,
                                              axis=0)
    print("done in %0.3fs." % (time() - t0))

    def recreate_image(codebook, labels, w, h):
        """Recreate the (compressed) image from the code book & labels"""
        d = codebook.shape[1]
        image = np.zeros((w, h, d))
        label_idx = 0
        for i in range(w):
            for j in range(h):
                image[i][j] = codebook[labels[label_idx]]
                label_idx += 1
        return image

    # Display all results, alongside original image
    plt.figure(1)
    plt.clf()
    plt.axis('off')
    plt.title('Original image (96,615 colors)')
    plt.imshow(china)

    plt.figure(2)
    plt.clf()
    plt.axis('off')
    plt.title('Quantized image (64 colors, K-Means)')
    plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))

    plt.figure(3)
    plt.clf()
    plt.axis('off')
    plt.title('Quantized image (64 colors, Random)')
    plt.imshow(recreate_image(codebook_random, labels_random, w, h))
    plt.show()
Exemplo n.º 21
0
def _china_dataset(dtype=np.float32):
    img = load_sample_image('china.jpg')
    X = np.array(img, dtype=dtype) / 255
    X = X.reshape((-1, 3))
    return X
Exemplo n.º 22
0

# data = datasets.load_linnerud()
# import pdb
# pdb.set_trace()
# print "Features: ", len(data["data"][0])
# print "Instances: ", len(data["data"])
# print len(set(data["target"]))

data = datasets.load_mlcomp()
print "Features: ", len(data["data"][0])
print "Instances: ", len(data["data"])
import pdb
pdb.set_trace()

data = datasets.load_sample_image()
print "Features: ", len(data["data"][0])
print "Instances: ", len(data["data"])
print len(set(data["target"]))

data = datasets.load_sample_images()
print "Features: ", len(data["data"][0])
print "Instances: ", len(data["data"])
print len(set(data["target"]))

data = datasets.load_svmlight_file()
print "Features: ", len(data["data"][0])
print "Instances: ", len(data["data"])
print len(set(data["target"]))

data = datasets.load_svmlight_files()
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
from sklearn import datasets


from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle

# Specified the number of colors which were compresses
n_colors = 64

# Load the photo -- china summer palace
china = load_sample_image('china.jpg')


# convert: to float -- instead of default 8 bit integer
# Dividing by 255 so that plt.imshow behaves works well on float data
# need to be in the range [0 - 1]
china = np.array(china, dtype=np.float64) / 255
print china.min(), china.max()
print china
# Load the image and transform to a 2D numpy array
w, h, d = original_shape = tuple(china.shape)
print w,h,d  # 427 * 640 * 3
assert d == 3
image_array = np.reshape(china, (w*h, d))
print(image_array[1])
print('Fitting model on a small sub-sample of the data')
Exemplo n.º 24
0
from os.path import dirname, join
from sklearn.externals import joblib

logging.basicConfig()
# ..
# .. load data ..
# lfw_people = datasets.fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# print(lfw_people.data.shape)

NUM_SAMPLES = 10

data = []
target = np.genfromtxt('train.csv', delimiter=',')[:NUM_SAMPLES, 1][1:]
for i in range(1, len(target) + 1):
    num_zeroes = (5 - len(str(i))) * '0'
    l_img = load_sample_image(num_zeroes + str(i) + '.jpg')
    # l_img = imresize(l_img, (8,8,3))
    data.append(l_img)

lfw_people = datasets.base.Bunch(target=np.array(target),
                                 data=np.array(data).reshape(len(data), -1))

print(lfw_people.data.shape)
faces = np.reshape(lfw_people.data, (lfw_people.target.shape[0], -1))
print(faces.shape)
exit()
skf = model_selection.StratifiedKFold(n_splits=4)
train, test = next(iter(skf.split(lfw_people.data, lfw_people.target)))
X_train, X_test = faces[train], faces[test]
y_train, y_test = lfw_people.target[train], lfw_people.target[test]
print y_train, y_test
Exemplo n.º 25
0
def test_load_sample_image():
    china = load_sample_image('china.jpg')
    assert_equal(china.dtype, 'uint8')
    assert_equal(china.shape, (427, 640, 3))
Exemplo n.º 26
0
#
# License: BSD 3 clause

print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time

n_colors = 64

# Load the Summer Palace photo
china = load_sample_image("china.jpg")
china1 = load_sample_image("china.jpg")

# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255

# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))

print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
Exemplo n.º 27
0
def test_load_missing_sample_image_error():
    if pillow_installed:
        with pytest.raises(AttributeError):
            load_sample_image("blop.jpg")
    else:
        warnings.warn("Could not load sample images, PIL is not available.")
Exemplo n.º 28
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 18-11-12 09:10
# @Author  : Vitan

from sklearn.datasets import load_sample_image
from matplotlib import pyplot as plt

ChinaImage = load_sample_image('china.jpg')
print(ChinaImage)

plt.imshow(ChinaImage)
plt.show()

plt.imshow(ChinaImage[:, :, 1])
plt.show()

plt.imshow(ChinaImage[:, :, 2], plt.cm.gray)
plt.show()
Exemplo n.º 29
0
#!/usr/bin python
# -*- encoding: utf-8 -*-
'''
@Author  :   Celeste Young
@File    :   手写字.py    
@Time    :   2021/2/15 21:24  
@Tips    :   手写字和图片读取
'''

# # ===========手写体数据===========
from sklearn.datasets import load_digits,load_sample_image
import matplotlib.pyplot as plt # 画图工具
# digits = load_digits()
# data=digits.data
# print(data.shape)
# plt.matshow(digits.images[15])  # 矩阵像素点的样式显示
# # plt.imshow(digits.images[3])  # 图片渐变的样式显示3
# # plt.gray()   # 图片显示为灰度模式

img=load_sample_image('flower.jpg')   # 加载sk自带的花朵图案
plt.imshow(img)
plt.show()
Exemplo n.º 30
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 18-11-12 10:10
# @Author  : Vitan

from sklearn.datasets import load_sample_image
import matplotlib.image as img
from sklearn.cluster import KMeans
import numpy as np
from matplotlib import pyplot as plt
import sys

picture = load_sample_image('china.jpg')
pic2 = img.imread('vitan.jpg')
# 根据图片的分辨率,可适当降低分辨率。
image = picture[::3, ::3]  # 横纵每三个点去一个颜色值
plt.imshow(image)
img.imsave('E://pure.jpg', image)
plt.show()

# 再用k均值聚类算法,将图片中所有的颜色值做聚类。
X = image.reshape(-1, 3)  #reshape为一维
mod = KMeans(n_clusters=64)
labels = mod.fit_predict(X)  #每个点的颜色分类,0-63
colors = mod.cluster_centers_  #64个聚类中心,颜色值

# 还原颜色,维数,数据类型
new_img = colors[labels]
new_img = new_img.reshape(image.shape)
new_img = new_img.astype(np.uint8)
print(new_img)
Exemplo n.º 31
0
kmeans = KMeans(n_clusters=4)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)

from sklearn.metrics import pairwise_distances_argmin


def find_clusters(X, n_clusters, rseed=2):
    rng = np.random.RandomState(rseed)
    i = rng.permutation(X.shape[0])[:n_clusters]
    centers = X[i]
    while True:
        labels = pairwise_distances_argmin(X, centers)
        new_centers = np.array(
            [X[labels == i].mean(0) for i in range(n_clusters)])
        centers, labels = find_clusters(X, 4)
        plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis')
        if np.all(centers == new_centers):
            break
        centers = new_centers
    return centers, labels
    centers, labels = find_clusters(X, 4)
    plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=50, cmap='viridis')
    plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)


from sklearn.datasets import load_sample_image

india = load_sample_image("flower.jpg")
Exemplo n.º 32
0
def load_image_test():
	flower = load_sample_image('flower.jpg')
	pl.imshow(flower)
	pl.show()
Exemplo n.º 33
0
#
# License: BSD 3 clause

print(__doc__)
import numpy as np
import pylab as pl
from sklearn.cluster import KMeans
from sklearn.metrics import euclidean_distances
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time

n_colors = 64

# Load the Summer Palace photo
china = load_sample_image("china.png")

# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that pl.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255

# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))

print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
#####################################
# Color compression usin Kmean      #
# Created By G.K                    #
#####################################

### import libraries
from skimage import data, io
from matplotlib import pyplot as plt
### use this library if you want to input custom image
import imageio
#china  = imageio.imread('custom_image.jpg')
from sklearn.datasets import load_sample_image
### loading image sample from avaliable dataset
Image = load_sample_image('flower.jpg')

### used to get the pixel value of image
print(Image.shape)

### Original picture
#io.imshow(Image)
#plt.show()

### Normaliazing the original image
data = Image / 255.0
###  converting  3D matrix into 2D
data = data.reshape(640 * 427, 3)

import warnings
warnings.simplefilter('ignore')
### import clustering from scikit
from sklearn.cluster import MiniBatchKMeans
Exemplo n.º 35
0
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time

#
# Free Coding session for 2015-06-10
# Written by Matt Warren
#

n_colors = 50

china = load_sample_image("china.jpg")
china = np.array(china, dtype=np.float64) / 255

w, h, d = original_shape = tuple(china.shape)
image_array = np.reshape(china, (w * h, d))

# grab a sample of pixels to run kmeans on
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
labels = kmeans.predict(image_array)


def recreate_image(codebook, labels, w, h):
    """Recreate the (compressed) image from the code book & labels"""
    d = codebook.shape[1]
    image = np.zeros((w, h, d))
Exemplo n.º 36
0
allice_vec = txt_vec.transform(['allice.txt'])

allice_vec

allice_vec.shape

allice_vec = allice_vec.toarray()

allice_vec[0, 100:120]

for word, count in zip(txt_vec.get_feature_names()[100:120],
                       allice_vec[0, 100:120]):
    print(word, count)


china = load_sample_image('china.jpg')

plt.imshow(china)

china.shape

histR = plt.hist(china[:, :, 0].ravel(), bins=10)
plt.show()
histG = plt.hist(china[:, :, 1].ravel(), bins=10)
plt.show()
histB = plt.hist(china[:, :, 2].ravel(), bins=10)
plt.show()


histRGBcat = np.hstack((histR[0], histG[0], histB[0]))
Exemplo n.º 37
0
Arquivo: ch13.py Projeto: kcson/mlbook
    if tight_layout:
        plt.tight_layout()
    plt.savefig(path, format='png', dpi=300)


def plot_image(image):
    plt.imshow(image, cmap="gray", interpolation="nearest")
    plt.axis("off")


def plot_color_image(image):
    plt.imshow(image.astype(np.uint8), interpolation="nearest")
    plt.axis("off")


china = load_sample_image("china.jpg")
flower = load_sample_image("flower.jpg")
dataset = np.array([china, flower], dtype=np.float32)
batch_size, height, width, channels = dataset.shape
print(china.shape)
print(dataset.shape)

filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
# filter
filters[:, 3, :, 0] = 1  # 수직
filters[3, :, :, 1] = 1  # 수평

X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
# convolution = tf.nn.conv2d(X, filters, strides=[1, 2, 2, 1], padding="SAME")
convolution = tf.layers.conv2d(X, filters=2, kernel_size=7, strides=[2, 2], padding="SAME")
max_pool = tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
Exemplo n.º 38
0
Arquivo: svm.py Projeto: amineux/kmean
from sklearn.decomposition import RandomizedPCA
pca=RandomizedPCA(2).fit(X)
X_proj = pca.transform(X)

fig, ax = plt.subplots(1, 2, figsize=(8,4))
ax[0].scatter(X_proj[:,0], X_proj[:,1], c=y_pred)
ax[0].set_title('Clusters reduced to 2D with PCA', fontsize=10)

ax[1].scatter(X_proj[:,0], X_proj[:,1], c=y)
ax[1].set_title('Original Dataset reduced to 2D with PCA', fontsize=10)



from sklearn.datasets import load_sample_image
img=load_sample_image("china.jpg");
plt.imshow(img)



print img.shape


img_r = (img / 255.0).reshape(-1,3)
print img_r.shape



k_colors = KMeans(n_clusters=64).fit(img_r)
y_pred=k_colors.predict(img_r)
from sklearn.datasets import load_sample_image

china = load_sample_image('china.jpg')
china.dtype
china.shape


flower = load_sample_image('flower.jpg')
flower.dtype
flower.shape

import matplotlib.pyplot as plt
plt.imshow(china)
plt.imshow(flower)