コード例 #1
0
ファイル: test_image.py プロジェクト: 123fengye741/pylearn2
def test_image_load():
    """
    Test utils.image.load
    """
    assert_raises(AssertionError, load, 1)

    path = os.path.join(pylearn2.__path__[0], 'utils',
                        'tests', 'example_image', 'mnist0.jpg')
    img = load(path)
    eq_(img.shape, (28, 28, 1))
コード例 #2
0
ファイル: dataset.py プロジェクト: cc13ny/galatea
    def __init__(self, iterator, num_examples, image_shape):
        assert len(image_shape) == 2

        T = np.zeros((num_examples,image_shape[0],image_shape[1],3),dtype='float32')

        for i in xrange(num_examples):
            image_path = iterator.next()
            img = image.load(image_path)
            T[i,:] = make_letterboxed_thumbnail(img, image_shape)

        super(DARPA_ImageNet, self).__init__(
                topo_view = T,
                view_converter = DefaultViewConverter(T.shape[1:]))
コード例 #3
0
    def __init__(self, iterator, num_examples, image_shape):
        assert len(image_shape) == 2

        T = np.zeros((num_examples, image_shape[0], image_shape[1], 3),
                     dtype='float32')

        for i in xrange(num_examples):
            image_path = iterator.next()
            img = image.load(image_path)
            T[i, :] = make_letterboxed_thumbnail(img, image_shape)

        super(DARPA_ImageNet,
              self).__init__(topo_view=T,
                             view_converter=DefaultViewConverter(T.shape[1:]))
コード例 #4
0
ファイル: resize_streams_tfd.py プロジェクト: vd114/galatea
from pylearn2.utils import image
import numpy as np
for i in xrange(5000):
    print i
    number = str(i)
    while len(number) != 4:
        number = '0' + number
    img = image.load('/Tmp/video/' + number + '.png')
    out = np.zeros((406, 406, 3))
    for ofs_r in [0, 1]:
        for ofs_c in [0, 1]:
            out[ofs_r::2, ofs_c::2, :] = img
    image.save('/Tmp/video_resized/' + number + '.png', out)
コード例 #5
0
ファイル: lcn.py プロジェクト: yeahq/pylearn2
new_X = centered_X / T.maximum(1., divisor)

new_X = new_X.dimshuffle(0, 2, 3, 1)

from theano import function
f = function([orig_X], new_X)

j = 0
for path in paths:
    if j % 100 == 0:
        print j
    try:
        raw_path = path
        path = base + '/' + path
        img = image.load(path)

        #image.show(img)
        if len(img.shape) == 3 and img.shape[2] == 4:
            img = img[:, :, 0:3]
        img = img.reshape(*([1] + list(img.shape))).astype('float32')
        channels = [f(img[:, :, :, i:i + 1]) for i in xrange(img.shape[3])]
        if len(channels) != 3:
            assert len(channels) == 1
            channels = [channels[0]] * 3
        img = np.concatenate(channels, axis=3)
        img = img[0, :, :, :]

        assert isfinite(img)

        path = outdir + '/' + raw_path
コード例 #6
0
    def __init__(self,
                 lfw_path,
                 filelist_path,
                 embedding_file=None,
                 center=False,
                 scale=False,
                 start=None,
                 stop=None,
                 gcn=None,
                 shuffle=False,
                 rng=None,
                 seed=132987,
                 axes=('b', 0, 1, 'c'),
                 img_shape=(3, 250, 250)):
        self.axes = axes

        self.img_shape = img_shape
        C, H, W = img_shape
        self.img_size = np.prod(self.img_shape)

        files = []
        with open(filelist_path, 'r') as filelist_f:
            files = [line.strip() for line in filelist_f]

        # Load raw pixel integer values
        dtype = 'uint8'
        X = np.zeros((len(files), W, H, C), dtype=dtype)
        img_ids = []

        for i, line in enumerate(files):
            if '\t' in line:
                # New format: contains image IDs
                img_path, img_id = line.strip().split()
                img_ids.append(int(img_id))
            else:
                img_path = line.strip()

            full_path = os.path.join(lfw_path, img_path)
            im = image.load(full_path, rescale_image=False, dtype=dtype)

            # Handle grayscale images which may not have RGB channels
            if len(im.shape) == 2:
                W, H = im.shape

                # Repeat image 3 times across axis 2
                im = im.reshape(W, H, 1).repeat(3, 2)

            # Swap color channel to front
            X[i] = im

        # Cast to float32, center / scale if necessary
        X = np.cast['float32'](X)

        # Create dense design matrix from topological view
        X = X.reshape(X.shape[0], -1)

        # Prepare img_ids
        if embedding_file is not None:
            if len(img_ids) != len(files):
                raise ValueError("You must provide a filelist with indexes "
                                 "into the embedding array for each image.")
        img_ids = np.array(img_ids, dtype='uint32')

        if center and scale:
            X[:] -= 127.5
            X[:] /= 127.5
        elif center:
            X[:] -= 127.5
        elif scale:
            X[:] /= 255.

        self.gcn = gcn
        if gcn is not None:
            gcn = float(gcn)
            X = global_contrast_normalize(X, scale=gcn)

        if shuffle:
            rng = make_np_rng(rng, seed, which_method='permutation')
            rand_idx = rng.permutation(len(X))

            X = X[rand_idx]
            img_ids = img_ids[rand_idx]

        if start is not None:
            assert start >= 0
            assert stop > start
            assert stop <= X.shape[0]

            X = X[start:stop]

            if len(img_ids) > 0:
                img_ids = img_ids[start:stop]

        # Load embeddings if provided
        Y = None
        if embedding_file is not None:
            embeddings = np.load(embedding_file)['arr_0']
            assert embeddings.shape[0] >= len(files)

            Y = embeddings[img_ids].astype(theano.config.floatX)

        # create view converting for retrieving topological view
        self.view_converter = dense_design_matrix.DefaultViewConverter(
            (W, H, C), axes)

        # init super class
        super(LFW, self).__init__(X=X, y=Y)

        assert not contains_nan(self.X)

        # Another hack: rename 'targets' to match model expectations
        if embedding_file is not None:
            space, (X_source, y_source) = self.data_specs
            self.data_specs = (space, (X_source, 'condition'))
コード例 #7
0
ファイル: final_browse.py プロジェクト: cc13ny/galatea
l2_path = '/data/lisatmp/goodfeli/esp/final_l2'
import numpy as np

from pylearn2.utils import image

imbase = '/data/lisatmp/goodfeli/esp/final_images'
ims = sorted(os.listdir(imbase))


for label, im in zip(labels, ims):

    stem = label.split('.')[0]
    assert stem in im

    img = image.load(imbase + '/' + im)

    image.show(img)

    full_label_path = labels_dir + '/' + label
    print 'True labels:'
    fd = open(full_label_path,'r')
    print fd.read()
    fd.close()

    full_l2_path = l2_path + '/' + label.split('.')[0] + '.npy'

    l2 = np.load(full_l2_path)

    y = f(l2)
コード例 #8
0
f = function([X], y)

l2_path = '/data/lisatmp/goodfeli/esp/final_l2'
import numpy as np

from pylearn2.utils import image

imbase = '/data/lisatmp/goodfeli/esp/final_images'
ims = sorted(os.listdir(imbase))

for label, im in zip(labels, ims):

    stem = label.split('.')[0]
    assert stem in im

    img = image.load(imbase + '/' + im)

    image.show(img)

    full_label_path = labels_dir + '/' + label
    print 'True labels:'
    fd = open(full_label_path, 'r')
    print fd.read()
    fd.close()

    full_l2_path = l2_path + '/' + label.split('.')[0] + '.npy'

    l2 = np.load(full_l2_path)

    y = f(l2)
コード例 #9
0
from pylearn2.utils import image
import numpy as np
for i in xrange(5000):
    print i
    number = str(i)
    while len(number) != 4:
        number = '0' + number
    img = image.load('video/' + number + '.png')
    out = np.zeros((480, 480, 3))
    for j in range(32):
        for k in xrange(32):
            for l in xrange(3):
                out[j * 15:(j + 1) * 15, k * 15:(k + 1) * 15, l] = img[j, k, l]
    image.save('video_resized/' + number + '.png', out)
コード例 #10
0
ファイル: labeler.py プロジェクト: cc13ny/galatea
labels_dir = '/data/lisatmp/goodfeli/esp/word_labels'
from pylearn2.utils import image

while True:
    candidates = os.listdir(images_path)
    for path in candidates:
        word = path.split('.')[0]
        if word in labeled:
            continue
        label_path = labels_dir + '/' + word +'.txt'

        if os.path.exists(label_path):
            continue

        image_path = images_path + '/' + path
        img = image.load(image_path)
        image.show(img)

        f = open(label_path, 'w')

        idx = 1
        print 'Suggested label: ',word
        x = ''
        while x not in ['y', 'n']:
            x = raw_input('ok? ')
            if x == 'y':
                idx += 1
                f.write(word+'\n')

        while True:
            x = raw_input('label %d: ' %idx)
コード例 #11
0
labels_dir = '/data/lisatmp/goodfeli/esp/word_labels'
from pylearn2.utils import image

while True:
    candidates = os.listdir(images_path)
    for path in candidates:
        word = path.split('.')[0]
        if word in labeled:
            continue
        label_path = labels_dir + '/' + word + '.txt'

        if os.path.exists(label_path):
            continue

        image_path = images_path + '/' + path
        img = image.load(image_path)
        image.show(img)

        f = open(label_path, 'w')

        idx = 1
        print 'Suggested label: ', word
        x = ''
        while x not in ['y', 'n']:
            x = raw_input('ok? ')
            if x == 'y':
                idx += 1
                f.write(word + '\n')

        while True:
            x = raw_input('label %d: ' % idx)
コード例 #12
0
ファイル: resize48.py プロジェクト: cc13ny/galatea
from pylearn2.utils import image
import numpy as np
for i in xrange(900):
    print i
    number = str(i)
    while len(number) != 3:
        number = '0' + number
    img = image.load('video/' + number + '.png')
    out = np.zeros((480, 480, 3))
    for j in range(48):
        for k in xrange(48):
            for l in xrange(3):
                out[j*10:(j+1)*10, k*10:(k+1)*10, l] = img[j,k,0]
    image.save('video_resized/' + number + '.png', out)
コード例 #13
0
ファイル: dataset.py プロジェクト: hit-computer/adversarial
    def __init__(
        self,
        lfw_path,
        filelist_path,
        embedding_file=None,
        center=False,
        scale=False,
        start=None,
        stop=None,
        gcn=None,
        shuffle=False,
        rng=None,
        seed=132987,
        axes=("b", 0, 1, "c"),
        img_shape=(3, 250, 250),
    ):
        self.axes = axes

        self.img_shape = img_shape
        C, H, W = img_shape
        self.img_size = np.prod(self.img_shape)

        files = []
        with open(filelist_path, "r") as filelist_f:
            files = [line.strip() for line in filelist_f]

        # Load raw pixel integer values
        dtype = "uint8"
        X = np.zeros((len(files), W, H, C), dtype=dtype)
        img_ids = []

        for i, line in enumerate(files):
            if "\t" in line:
                # New format: contains image IDs
                img_path, img_id = line.strip().split()
                img_ids.append(int(img_id))
            else:
                img_path = line.strip()

            full_path = os.path.join(lfw_path, img_path)
            im = image.load(full_path, rescale_image=False, dtype=dtype)

            # Handle grayscale images which may not have RGB channels
            if len(im.shape) == 2:
                W, H = im.shape

                # Repeat image 3 times across axis 2
                im = im.reshape(W, H, 1).repeat(3, 2)

            # Swap color channel to front
            X[i] = im

        # Cast to float32, center / scale if necessary
        X = np.cast["float32"](X)

        # Create dense design matrix from topological view
        X = X.reshape(X.shape[0], -1)

        # Prepare img_ids
        if embedding_file is not None:
            if len(img_ids) != len(files):
                raise ValueError("You must provide a filelist with indexes " "into the embedding array for each image.")
        img_ids = np.array(img_ids, dtype="uint32")

        if center and scale:
            X[:] -= 127.5
            X[:] /= 127.5
        elif center:
            X[:] -= 127.5
        elif scale:
            X[:] /= 255.0

        self.gcn = gcn
        if gcn is not None:
            gcn = float(gcn)
            X = global_contrast_normalize(X, scale=gcn)

        if shuffle:
            rng = make_np_rng(rng, seed, which_method="permutation")
            rand_idx = rng.permutation(len(X))

            X = X[rand_idx]
            img_ids = img_ids[rand_idx]

        if start is not None:
            assert start >= 0
            assert stop > start
            assert stop <= X.shape[0]

            X = X[start:stop]

            if len(img_ids) > 0:
                img_ids = img_ids[start:stop]

        # Load embeddings if provided
        Y = None
        if embedding_file is not None:
            embeddings = np.load(embedding_file)["arr_0"]
            assert embeddings.shape[0] >= len(files)

            Y = embeddings[img_ids].astype(theano.config.floatX)

        # create view converting for retrieving topological view
        self.view_converter = dense_design_matrix.DefaultViewConverter((W, H, C), axes)

        # init super class
        super(LFW, self).__init__(X=X, y=Y)

        assert not contains_nan(self.X)

        # Another hack: rename 'targets' to match model expectations
        if embedding_file is not None:
            space, (X_source, y_source) = self.data_specs
            self.data_specs = (space, (X_source, "condition"))