コード例 #1
0
ファイル: test_base.py プロジェクト: Calvin-O/scikit-learn
def test_load_sample_images():
    try:
        res = load_sample_images()
        assert_equal(len(res.images), 2)
        assert_equal(len(res.filenames), 2)
        assert_true(res.DESCR)
    except ImportError:
        warnings.warn("Could not load sample images, PIL is not available.")
コード例 #2
0
ファイル: test_base.py プロジェクト: allefpablo/scikit-learn
def test_load_sample_images():
    try:
        res = load_sample_images()
        assert_equal(len(res.images), 2)
        assert_equal(len(res.filenames), 2)
        images = res.images

        # assert is china image
        assert np.all(images[0][0, 0, :] ==
                      np.array([174, 201, 231], dtype=np.uint8))
        # assert is flower image
        assert np.all(images[1][0, 0, :] ==
                      np.array([2, 19, 13], dtype=np.uint8))
        assert res.DESCR
    except ImportError:
        warnings.warn("Could not load sample images, PIL is not available.")
コード例 #3
0
def go_extract(inputs):
    """
    Our main function to predict the cluster for an image
    Args:
        inputs: json

    Returns:

    """
    try:
        label = ''
        error = ''
        cluster_labels = []

        record_id = inputs['values'][0]['recordId']
        # Get the base64 encoded image
        encoded_image = inputs['values'][0]['data']['images']['data']

        img = base64.b64decode(str(encoded_image).strip())

        logging.info(f"Cluster labels file {os.environ.get('CLUSTER_LABELS')}")
        cluster_labels = joblib.load(
            os.path.join("models/", os.environ.get("CLUSTER_LABELS")))
        logging.info(f"Loaded cluster labels {cluster_labels}")
        # We will run on a small sample dataset
        if sample_model:
            # Download sample data
            from sklearn.datasets import load_sample_images, load_sample_image

            dataset = load_sample_images()
            images = dataset['images']
            images.extend(images)

            # Train detector
            labels = detector.train(images)

        # Load the image
        image = Image.open(io.BytesIO(img))
        # Convert image to numpy array
        img = asarray(image)
        # Predict
        label = detector.assign_group([img])
        logging.info(f"Predicted cluster {label.item()} recordId {record_id}")

        if len(cluster_labels) > 0 and label.item() > -1:
            label = cluster_labels[label.item()]
        else:
            label = ''

        output_response = build_output_response(record_id, label, error,
                                                cluster_labels)

    except Exception as ProcessingError:
        logging.exception(ProcessingError)
        error = str(ProcessingError)
        output_response = build_output_response(record_id, label, error,
                                                cluster_labels)

    logging.info(output_response)

    return output_response
コード例 #4
0
    def setUpClass(self):

        self.is_initial_training_from_topic = False    

        self.inference_data_topic = 'inference'
        self.prediction_result_topic = 'prediction'


        # Mock training data
        self.training_data_topic = None
        dataset = load_sample_images() 
        sequence_1 = [dataset.images[0] for x in range(20)]
        self.initial_training_data = sequence_1

        for i in range(0, len(self.initial_training_data)):
            self.initial_training_data[i] = cv2.resize(self.initial_training_data[i], (256,256))
        

        
        # # Send training data
        self.training_data_topic = 'training'

        self.user_constraints = {
            "is_real_time": False,
            "minimum_efectiveness": None
        }
        
        self.models = [
            {
                "name": "model_1",
                "training_rate": 200,
                "efectiveness": 30,
                "inference_rate": 10,
                "model":  MockModel(50, model_name= "model_1")
            },
            {
                "name": "model_2",
                "training_rate": 300,
                "efectiveness": 20,
                "inference_rate": 20,
                "model":  MockModel(30, model_name= "model_2")
            },
            {
                "name": "model_3",
                "training_rate": 400,
                "efectiveness": 20,
                "inference_rate": 20,
                "model":  MockModel(10, model_name= "model_3")
            }
        ]
        self.drift_algorithm = PageHinkley(min_instances=5, delta=0.005, threshold=10, alpha=1 - 0.01)
        self.dimensionality_reduction = PCA()



        self.number_training_frames_after_drift = 5 # What happens if there are less infered examples than this number? 
        


        self.handler = MainHandler(
            models=self.models,
            user_constraints=self.user_constraints,
            number_training_frames_after_drift=self.number_training_frames_after_drift,
            drift_algorithm=self.drift_algorithm,
            dimensionality_reduction=self.dimensionality_reduction,
            training_data_topic=self.training_data_topic,
            is_initial_training_from_topic=self.is_initial_training_from_topic,
            initial_training_data=self.initial_training_data,
            prediction_result_topic=self.prediction_result_topic,
            inference_data_topic=self.inference_data_topic
            )
コード例 #5
0
import numpy as np
from sklearn.datasets import load_sample_images
import tensorflow as tf
import matplotlib.pyplot as plt

# 加载数据集
# 输入图片通常是3D,[height, width, channels]
# mini-batch通常是4D,[mini-batch size, height, width, channels]
dataset = np.array(load_sample_images().images, dtype=np.float32)
# 数据集里面两张图片,一个中国庙宇,一个花
batch_size, height, width, channels = dataset.shape
print(batch_size, height, width, channels)

# 创建两个filter
# 高,宽,通道,卷积核
# 7, 7, channels, 2
filters_test = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
filters_test[:, 3, :, 0] = 1  # 垂直
filters_test[3, :, :, 1] = 1  # 水平

# filter参数是一个filters的集合
X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
# strides=[1, 2, 2, 1] 中第一最后一个为1,中间对应sh和sw
convolution = tf.nn.conv2d(X,
                           filter=filters_test,
                           strides=[1, 2, 2, 1],
                           padding='SAME')

with tf.Session() as sess:
    output = sess.run(convolution, feed_dict={X: dataset})
コード例 #6
0
# Assignment for the day : Implement SVM ML Model on Cats and Dogs Image DataSet
# PreRequisite : Please change image into gray scale and into numpy Array. Create your own labels


import matplotlib.pyplot as plt
from sklearn.datasets import load_sample_images
dataset = load_sample_images()
print(len(dataset.images))

first_img_data = dataset.images[0]
print(first_img_data.shape)

pos=1
for i in range(0,2):
    plt.subplot(1,2,pos)
    plt.imshow(dataset['images'][i],cmap=plt.cm.gray_r)
    pos+=1
plt.show()
コード例 #7
0
from lightning import Lightning
from sklearn import datasets

lgn = Lightning()

imgs = datasets.load_sample_images()['images']

lgn.imagepoly(imgs[0])
コード例 #8
0
from matplotlib import pyplot as plt
import numpy as np
import math
from scipy import misc
from skimage.feature import hog
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.datasets import load_sample_images


def rgb2gray(rgb):  #converting rgb into gray scale
    r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
    gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
    return gray


data = load_sample_images()
len(data.images)
img1 = data.images[1]
img1.shape
plt.imshow(img1)

img2 = rgb2gray(img1)
plt.imshow(img2)
fd, hog_image = hog(img1,
                    orientations=8,
                    pixels_per_cell=(16, 16),
                    cells_per_block=(1, 1),
                    visualize=True,
                    multichannel=True)
plt.imshow(hog_image)
コード例 #9
0
import numpy as np
from sklearn.datasets import load_sample_images
import tensorflow as tf
import matplotlib.pyplot as plt


# 加载数据集
# 输入图片通常是3D,[height, width, channels]
# mini-batch通常是4D,[mini-batch size, height, width, channels]
dataset = np.array(load_sample_images().images, dtype=np.float32)
# 数据集里面两张图片,一个中国庙宇,一个花
batch_size, height, width, channels = dataset.shape
print(batch_size, height, width, channels)

# 创建两个filter
# 高,宽,通道,卷积核
# 7, 7, channels, 2
filters_test = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
filters_test[:, 3, :, 0] = 1  # 垂直
filters_test[3, :, :, 1] = 1  # 水平

# filter参数是一个filters的集合
X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
# strides=[1, 2, 2, 1] 中第一最后一个为1,中间对应sh和sw
convolution = tf.nn.conv2d(X, filter=filters_test, strides=[1, 2, 2, 1], padding='SAME')

with tf.Session() as sess:
    output = sess.run(convolution, feed_dict={X: dataset})

plt.imshow(output[0, :, :, 0])  # 绘制第一个图的第二个特征图
plt.show()
コード例 #10
0
ファイル: dataload2.py プロジェクト: seokhyunson/pyAnaltyics1
diabetes = datasets.load_diabetes()
diabetes

boston = datasets.load_boston()
boston

iris = datasets.load_iris()
iris

digits = datasets.load_digits()
digits

linnerud = datasets.load_linnerud()
linnerud

wine = datasets.load_wine()
wine

breast_cancer = datasets.load_breast_cancer()
breast_cancer


#%%
#Sample Images

image1 = datasets.load_sample_images()
image1


#load_sample_image(image_name)
コード例 #11
0
import numpy as np
from sklearn.datasets import load_sample_images
import tensorflow as tf
import matplotlib.pyplot as plt

#两个卷积核过滤图片

# 加载数据集
# 输入图片通常是3D,[height, width, channels]
# mini-batch通常是4D,[mini-batch size, height, width, channels]
dataset = np.array(load_sample_images().images, dtype=np.float32)
# 数据集里面两张图片,一个中国庙宇,一个花
batch_size, height, width, channels = dataset.shape
print(batch_size, height, width, channels)

# 创建两个filter
# 高,宽,通道,卷积核
# 7, 7, channels, 2
filters_test = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
filters_test[:, 3, :, 0] = 1  # 垂直
filters_test[3, :, :, 1] = 1  # 水平

# filter参数是一个filters的集合
X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
# strides=[1, 2, 2, 1] 中第一最后一个为1,中间对应sh和sw
convolution = tf.nn.conv2d(X,
                           filter=filters_test,
                           strides=[1, 2, 2, 1],
                           padding='SAME')

with tf.Session() as sess:
コード例 #12
0
#Loading dataset from scikit-learn dataset
from sklearn.datasets import load_iris

iris = load_iris()

iris.keys()

from sklearn import datasets as ds
from matplotlib import pyplot as pl

images = ds.load_sample_images()
pl.imshow(images.images[0])

import sklearn.datasets as ds

data = ds.fetch
data.keys()

Data = [
    {
        'Price': 710000,
        'Rooms': 2,
        'Neighbourhood': 'Cuffe Parade'
    },
    {
        'Price': 740000,
        'Rooms': 1,
        'Neighbourhood': 'Coloba'
    },
    {
        'Price': 730000,
コード例 #13
0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from sklearn.datasets import load_sample_images

sample = load_sample_images()
dataset = sample.images
nb_images = len(dataset)
print(nb_images) # => 2

for image in dataset:
    print(image.shape) # retourne 2x (427, 640, 3)

img_1 = dataset[0].astype(np.float32)
print(img_1[:, :, 1])

img_1[:, :, 0:2] = img_1[:, :, 0:2]/255.
print(img_1[:, :, 0])

plt.imshow(img_1[:, :, 1])
plt.show()
コード例 #14
0
ファイル: tests.py プロジェクト: nukui-s/clippedgrad
import tensorflow as tf
from clippedgrad import ClippedAdagradOptimizer
from clippedgrad import ClippedGDOptimizer
import time
import os
from sklearn.datasets import load_digits
from tffactorization.tfnmf import TFNMF
from sklearn.datasets import load_sample_images

os.system("rm -rf logtest")
sess = tf.InteractiveSession()
writer = tf.train.SummaryWriter("logtest",sess.graph_def)
N = 8
K = 2
v = np.random.random(size=[N,N]).astype(np.float32)
v = load_sample_images().images[0][0:400,0:200,0]
N = v.shape[0]
M = v.shape[1]
w = np.random.rand(N,K).astype(np.float32)
h = np.random.rand(K,M).astype(np.float32)

tfnmf = TFNMF(v,K)
start = time.time()
W3, H3 = tfnmf.run(sess)
end = time.time()
loss = np.power(v - np.matmul(W3,H3),2).sum() / (M*N)
print(end-start)
print("loss:",loss)

V = tf.placeholder("float", shape=[N,M])
W = tf.Variable(w)
コード例 #15
0
ファイル: test_base.py プロジェクト: josexie/scikit-learn
def test_load_sample_images():
    res = load_sample_images()
    assert_equal(len(res.images), 2)
    assert_equal(len(res.filenames), 2)
    assert_true(res.DESCR)
コード例 #16
0
from sklearn.datasets import load_sample_images
import numpy as np
import tensorflow as ts
import matplotlib.pyplot as plt
#cargar el dataset de imagenes
imagenes = load_sample_images().images

#variables para cargar una imagen
img1 = imagenes[0]
img2 = imagenes[1]

#mostrar imagenes
plt.imshow(img1)
plt.show()
plt.imshow(img2)
plt.show()

#formato del dato
print("Forato de las imagenes --> dim[{}] Shape[{}] type[{}] ".format(
    img1.ndim, img1.shape, img1.dtype))
#covertir las imagenes en un arreglo de numeros flotantes
dataset = np.array(imagenes, dtype=np.float)
print("Forato de las imagenes --> dim[{}] Shape[{}] type[{}] ".format(
    dataset.ndim, dataset.shape, dataset.dtype))

#que sea compatible con la versio  1
ts.compat.v1.disable_eager_execution()

#delaramos variables
tam, alto, ancho, canales = dataset.shape
コード例 #17
0
ファイル: image.py プロジェクト: ggorr/Machine-Learning
from sklearn import datasets
import matplotlib.pyplot as plt

from lib.NodeGraph import *
import lib.DataTools as dt

im_in = datasets.load_sample_images().images[0] / 255
trX = dt.channelFirst(np.array([im_in]))
sset = StartSet2D(trX.shape[1:3], trX.shape[0])
conv = Conv2D((3, 3), 3, biased=False)
conv.addPrevSets(sset)
conv.compile()
conv.preFit(regularizer=None, momentum=None)
conv.B = np.zeros((conv.ychs, 1, 1, 1))
conv.W = np.zeros((conv.ychs, conv.xchs, conv.filSiz[0], conv.filSiz[1]))
conv.W[0] = np.array(
    [[[0, 0, 0], [.33, .33, .33], [0, 0, 0]],
     [[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]],
    np.float)
conv.W[1] = np.array(
    [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [.33, .33, .33], [0, 0, 0]
                                         ], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]],
    np.float)
conv.W[2] = np.array(
    [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]],
     [[0, 0, 0], [0, 0, 0], [.33, .33, .33]]], np.float)
conv.prePropTr(1)
conv.resetForPush()
sset.pushTr(trX)

# print(conv.trY.shape)
    def setUpClass(self):

        self.is_initial_training_from_topic = False

        self.inference_data_topic = 'inference'
        self.prediction_result_topic = 'prediction'

        # Mock training data
        self.training_data_topic = None
        dataset = load_sample_images()
        sequence_1 = [dataset.images[0] for x in range(20)]
        sequence_2 = [dataset.images[1] for x in range(20)]
        self.initial_training_data = sequence_1 + sequence_2

        for i in range(0, len(self.initial_training_data)):
            self.initial_training_data[i] = cv2.resize(
                self.initial_training_data[i], (256, 256))

        # # Send training data
        self.training_data_topic = 'training'

        # adoc_dataset_location = ADOC_DATASET_LOCATION
        # video_files = os.listdir(adoc_dataset_location)
        # train_video_files = [x for x in video_files if x[0:5] == 'train']
        # train_video_files.sort()
        # train_video_files = train_video_files[1:2] # not all videos for test
        # for video in train_video_files:
        #     video_producer = VideoProducer("localhost:29092", self.training_data_topic, os.path.join(adoc_dataset_location, video), debug=True, resize_to_dimension=(256,256))
        #     video_producer.send_video(extra_fields={"sequence_name": video})

        self.user_constraints = {
            "is_real_time": False,
            "minimum_efectiveness": None
        }

        self.models = [{
            "name": "model_1",
            "training_rate": 200,
            "efectiveness": 30,
            "inference_rate": 10,
            "model": MockModel(40, model_name="model_1")
        }, {
            "name": "model_2",
            "training_rate": 300,
            "efectiveness": 20,
            "inference_rate": 20,
            "model": MockModel(30, model_name="model_2")
        }, {
            "name": "model_3",
            "training_rate": 400,
            "efectiveness": 20,
            "inference_rate": 20,
            "model": MockModel(10, model_name="model_3")
        }]
        self.drift_algorithm = PageHinkley(min_instances=20,
                                           delta=0.005,
                                           threshold=10,
                                           alpha=1 - 0.01)
        self.dimensionality_reduction = PCA()
        self.number_training_frames_after_drift = 10

        self.handler = MainHandler(
            models=self.models,
            user_constraints=self.user_constraints,
            number_training_frames_after_drift=self.
            number_training_frames_after_drift,
            drift_algorithm=self.drift_algorithm,
            dimensionality_reduction=self.dimensionality_reduction,
            training_data_topic=self.training_data_topic,
            is_initial_training_from_topic=self.is_initial_training_from_topic,
            initial_training_data=self.initial_training_data,
            prediction_result_topic=self.prediction_result_topic,
            inference_data_topic=self.inference_data_topic,
            provide_training_data_after_drift=True)
コード例 #19
0
    min_ele = array.min(initial=None)
    array -= min_ele
    max_ele = array.max(initial=None)
    array = 255 * array / max_ele
    return np.around(array, 0)


Max_conversion_rate = 80  # Set number -> 0-100 [%] of the original picture size.
Normalization = 1  # Normalization of results / Set 0/1
Picture_choice = 1  # Selection photo / Set 0/1

Picture = "Base_RGB.jpg"
Picture_output = "RGB_output.jpg"
Picture_compressed = "RGB_compressed_output.jpg"
space = ' '
database = load_sample_images()
Base_img = database.images[Picture_choice]

Show_img = Image.fromarray(Base_img)
Show_img.save(Picture)

Height, Width = Base_img.shape[0], Base_img.shape[1]

f_original = open(Picture)
f_original.seek(0, os.SEEK_END)
Base_weight = f_original.tell()
f_original.close()

Range = int(
    Max_conversion_rate * Height /
    100) if int(Max_conversion_rate * Height /
コード例 #20
0
    def setUpClass(self):

        dataset = load_sample_images() 
        sequence_1 = [dataset.images[0] for x in range(20)]
        sequence_2 = [dataset.images[1] for x in range(20)]
        self.sequences = [sequence_1,sequence_2]
コード例 #21
0
from sklearn.datasets import load_sample_images
import matplotlib.pyplot as plt
import numpy as np
from skimage.feature import corner_harris
from skimage.feature import corner_peaks
from skimage.color import rgb2gray

dataset = load_sample_images()
img = dataset.images[0]
gray_img = rgb2gray(img)
harris_coords = corner_peaks(corner_harris(gray_img))
y, x = np.transpose(harris_coords)
plt.axis('off')
plt.imshow(img)
plt.plot(x, y, 'ro')
plt.show()
コード例 #22
0
import numpy as np
import sklearn.datasets as skl_data
import pylab as pl

os.chdir("/home/tvieira/git/ppca-mixture/")

# Tobomovirus data
# Should be 38 rows / observations of 18 columns of number
# of amino acids attached to a surface protien
tobamovirus = np.fromfile("virus3.dat", sep=" ")
tobamovirus = np.reshape(tobamovirus, (38, 18))

# Image data
# To convert to grayscale , reference below website
# https://samarthbhargav.wordpress.com/2014/05/05/image-processing-with-python-rgb-to-grayscale-conversion/
image_data = skl_data.load_sample_images()

"""
first_img_data = image_data.images[0] 
first_img_data.shape               
first_img_data.dtype               
"""

# Handwritten Data
digits = skl_data.load_digits()

"""
digits.keys()
print(digits.data.shape) #1700 or so sampls of 64 matrix of 16 color intensity
digits.target # This is the classification 0-10 of each image
pl.gray() 
コード例 #23
0
ファイル: utils.py プロジェクト: ahmedassal/ml-playground
def get_patches(size, num=5000):
    imgs = load_sample_images()
    img = (np.float64(imgs.images[1]) / 255.).mean(axis=2)
    # img = ndi.gaussian_filter(img, .5) - ndi.gaussian_filter(img, 1)
    return extract_patches_2d(img, (size, size), max_patches=num)
コード例 #24
0
    olivetti_faces_images = olivetti_faces['images']
    olivetti_faces_target = olivetti_faces['target']
    olivetti_faces_description = olivetti_faces['DESCR']

    logger.info('loading Reuters Corpus Volume I data')
    reuters_pickle = data_folder + 'reuters.pkl'
    rcv1_bunch = fetch_rcv1(subset='all', download_if_missing=True, random_state=random_state)
    rcv1_data = rcv1_bunch['data']
    rcv1_target = rcv1_bunch['target']
    rcv1_sample_id = rcv1_bunch['sample_id']
    rcv1_target_names = rcv1_bunch['target_names']
    rcv1_description = rcv1_bunch['DESCR']
    logger.info('Reuters data has description %s' % str(rcv1_description).strip())

    logger.info('loading sample images data')
    with catch_warnings():
        filterwarnings('ignore', category=DeprecationWarning)
        sample_images_bunch = load_sample_images()
    sample_images = sample_images_bunch['images']
    sample_images_filenames = sample_images_bunch['filenames']
    sample_images_description = sample_images_bunch['DESCR']

    logger.info('done')

    finish_time = time()
    elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
    elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
    logger.info('Time: {:0>2}:{:0>2}:{:05.2f}'.format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
    console_handler.close()
    logger.removeHandler(console_handler)
コード例 #25
0
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "data"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)

def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
    path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
    print("Saving figure", fig_id)
    if tight_layout:
        plt.tight_layout()
    plt.savefig(path, format=fig_extension, dpi=resolution)

from sklearn.datasets import load_sample_images

img = load_sample_images()["images"][0]
plt.imshow(img)
plt.axis("off")
plt.title("Original Image")
plt.show()

#from tensorflow.train import BytesList, FloatList, Int64List
#from tensorflow.train import Feature, Features, Example
BytesList = tf.train.BytesList
FloatList = tf.train.FloatList
Int64List = tf.train.Int64List
Feature = tf.train.Feature
Features = tf.train.Features
Example = tf.train.Example

person_example = Example(
コード例 #26
0
from sklearn.datasets import load_sample_images
import matplotlib.pyplot as plt
import tensorflow as tf

img = load_sample_images()['images'][0]
plt.imshow(img)
plt.axis('off')
plt.title('Original Image')

plt.savefig('TFRecords.jpg')

plt.show()

data = tf.io.encode_jpeg(img)
コード例 #27
0
ファイル: images.py プロジェクト: amueller/pca
from rpca import RobustPCA
from matplotlib import pyplot as plt
from sklearn import datasets
import numpy as np


model = RobustPCA()

data = datasets.load_sample_images()
X = [np.mean(D, axis=-1) for D in data.images]
M = X[0] + np.random.laplace(scale=5, size=X[0].shape)
model.fit(M)
L = model.embedding_
S  = M - L


plt.figure()
plt.title('Noisy')
plt.imshow(M, cmap='gray')


plt.figure()
plt.title('Low-rank')
plt.imshow(L, cmap='gray')

print('Original stats (min=%f, max=%f)' % (np.min(M), np.max(M)))
print('low rank stats (min=%f, max=%f)' % (np.min(L), np.max(L)))

plt.show()
コード例 #28
0
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.datasets import load_sample_images

images = load_sample_images()['images']
flower = images[0]
china = images[1]
dataset = np.array(images, np.float32)

batch_size, height, width, channels = dataset.shape

filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)

filters[:, 3, :, 0] = 1  # x = 3の部分だけ白線
filters[3, :, :, 1] = 1  # y = 3の部分だけ白線

x = tf.placeholder(tf.float32, shape=(None, height, width, channels))
conv = tf.nn.conv2d(x, filters, strides=(1, 2, 2, 1), padding='SAME')
max_pool = tf.nn.max_pool(x,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='VALID')
with tf.Session() as sess:
    output = sess.run(max_pool, feed_dict={x: dataset})

plt.imshow(output[0, :, :, 0], cmap='gray')
plt.show()

plt.imshow(output[1, :, :, 0], cmap='gray')
plt.show()