def extract_dataset(net_message):
    assert net_message.layer[0].type == "DenseImageData"
    source = net_message.layer[0].dense_image_data_param.source
    with open(source) as f:
        data = f.read().split()
    ims = ImageCollection(data[::2])
    labs = ImageCollection(data[1::2])
    assert len(ims) == len(labs) > 0
    return ims, labs
Esempio n. 2
0
    def writeTrainTFRecord(self):
        with open(self.in_file) as f:
            files = [x.strip() for x in f.readlines()]

        train_writer = tf.python_io.TFRecordWriter(self.out_path + "train.tfrecord")
        test_writer  = tf.python_io.TFRecordWriter(self.out_path + "validation.tfrecord")
        
        for train_indices,test_indices in ShuffleSplit(n_splits=1,test_size=.33).split(files):        
            print("******************************processing train files...*************************************")
            for train_index in train_indices:
                train_file = files[train_index]
                train_data_path = "{}{}/*".format(self.dat_path,train_file)
                train_labl_path = "{}{}.png".format(self.lab_path, train_file)
                train_data = np.array(ImageCollection(train_data_path)).astype(np.float32)
                train_labl = io.imread(train_labl_path).astype(np.float32)
                m_train,n_train = train_labl.shape
                ddat_train = np.concatenate([train_data,train_labl.reshape(1,m_train,n_train)])
                print("Converting numpy arrays to raw strings..")
                train_ddat_raw = ddat_train.tostring()
                print("Writing raw strings to files.")
                train_example = tf.train.Example(
                    features = tf.train.Features(
                        feature = {
                            'ddat' : tf.train.Feature(bytes_list=tf.train.BytesList(value=[train_ddat_raw])),
                            'm'    : tf.train.Feature(int64_list=tf.train.Int64List(value=[m_train])),
                            'n'    : tf.train.Feature(int64_list=tf.train.Int64List(value=[n_train]))
                        }
                    )
                )
                train_writer.write(train_example.SerializeToString())
            print("done.")
            train_writer.close()
            print("******************************processing validation files...*************************************")
            for test_index in test_indices:
                test_file = files[test_index]
                test_data_path = "{}{}/*".format(self.dat_path,test_file)
                test_labl_path = "{}{}.png".format(self.lab_path, test_file)
                test_data = np.array(ImageCollection(test_data_path)).astype(np.float32)
                test_labl = io.imread(test_labl_path).astype(np.float32)
                m_test,n_test = test_labl.shape
                ddat_test = np.concatenate([test_data,test_labl.reshape(1,m_test,n_test)])
                print("Converting numpy arrays to raw strings..")
                test_ddat_raw  = ddat_test.tostring()
                print("Writing raw strings to files.")
                test_example = tf.train.Example(
                    features = tf.train.Features(
                        feature = {
                            'ddat' : tf.train.Feature(bytes_list=tf.train.BytesList(value=[test_ddat_raw])),
                            'm'    : tf.train.Feature(int64_list=tf.train.Int64List(value=[m_test])),
                            'n'    : tf.train.Feature(int64_list=tf.train.Int64List(value=[n_test]))
                        }
                    )
                )
                test_writer.write(test_example.SerializeToString())
            print("done.")
            test_writer.close()
Esempio n. 3
0
def extract_dataset(net_message):
    assert net_message.layer[0].type == "DenseImageData"
    # source = net_message.layer[0].dense_image_data_param.source
    source = '/home/czm/CapStone/ENet/dataset/train_fine_cityscapes.txt'
    with open(source) as f:
        data = f.read().split()
    ims = ImageCollection(data[::2])
    labs = ImageCollection(data[1::2])
    assert len(ims) == len(labs) > 0
    return ims, labs
Esempio n. 4
0
def extract_dataset(net_message):
    assert net_message.layer[0].type == "ImageSegData"
    source = net_message.layer[0].image_data_param.source
    root_folder = net_message.layer[0].image_data_param.root_folder
    with open(source) as f:
        data = f.read().split()
    ims = ImageCollection(data[::3])
    labs = ImageCollection(data[1::3])
    assert len(ims) == len(labs) > 0
    return ims, labs
Esempio n. 5
0
def load_dataset_by_dir(_dir):
	pos_dir = _dir + "POSITIVE/*.png"
	ic_pos = ImageCollection(pos_dir, load_func=load_and_pp)
	X_pos = concatenate_images(ic_pos) 
	y_pos = np.array([1 for _ in range(len(ic_pos))])
	neg_dir = _dir + "NEGATIVE/*.png"
	ic_neg = ImageCollection(neg_dir, load_func=load_and_pp)
	X_neg = concatenate_images(ic_neg) 
	y_neg = np.array([0 for _ in range(len(ic_neg))])
	X = np.concatenate((X_pos, X_neg))
	y = np.concatenate((y_pos, y_neg))	
	return (X, y)
Esempio n. 6
0
def extract_DenseMMImageData(net_message):
    assert net_message.layer[0].type == "DenseMMImageData"
    source = net_message.layer[0].dense_mm_image_data_param.source

    with open(source) as f:
        data = f.read().split()
    images = ImageCollection(data[::3])
    d_images = ImageCollection(data[1::3])
    labels = ImageCollection(data[2::3])
    assert len(images) == len(labels) > 0
    assert len(images) == len(d_images) > 0

    return images, d_images, labels
Esempio n. 7
0
def load_images():
  """Loads ImageCollection into memory
  Returns:
    Two instances of skimage.io.ImageCollection
  """
  def _loader(f):
    return imread(f).astype(np.uint8)

  masks = ImageCollection('raw_masks/*',load_func=_loader)
  originals = ['image/' + os.path.basename(i).rsplit('.', 1)[0] + '.*' for i in masks.files]
  originals = ImageCollection(originals,load_func=_loader)

  return masks, originals
class TestImageCollection():
    pattern = [
        os.path.join(data_dir, pic) for pic in ['camera.png', 'color.png']
    ]
    pattern_matched = [
        os.path.join(data_dir, pic) for pic in ['camera.png', 'moon.png']
    ]

    def setUp(self):
        self.collection = ImageCollection(self.pattern)
        self.collection_matched = ImageCollection(self.pattern_matched)

    def test_len(self):
        assert len(self.collection) == 2

    def test_getitem(self):
        num = len(self.collection)
        for i in range(-num, num):
            assert type(self.collection[i]) is ioImage
        assert_array_almost_equal(self.collection[0], self.collection[-num])

        #assert_raises expects a callable, hence this do-very-little func
        def return_img(n):
            return self.collection[n]

        assert_raises(IndexError, return_img, num)
        assert_raises(IndexError, return_img, -num - 1)

    def test_slicing(self):
        assert type(self.collection[:]) is ImageCollection
        assert len(self.collection[:]) == 2
        assert len(self.collection[:1]) == 1
        assert len(self.collection[1:]) == 1
        assert_array_almost_equal(self.collection[0], self.collection[:1][0])
        assert_array_almost_equal(self.collection[1], self.collection[1:][0])
        assert_array_almost_equal(self.collection[1], self.collection[::-1][0])
        assert_array_almost_equal(self.collection[0], self.collection[::-1][1])

    def test_files_property(self):
        assert isinstance(self.collection.files, list)

        def set_files(f):
            self.collection.files = f

        assert_raises(AttributeError, set_files, 'newfiles')

    def test_custom_load(self):
        load_pattern = [(1, 'one'), (2, 'two')]

        def load_fn(x):
            return x

        ic = ImageCollection(load_pattern, load_func=load_fn)
        assert_equal(ic[1], (2, 'two'))

    def test_concatenate(self):
        ar = self.collection_matched.concatenate()
        assert_equal(ar.shape, (len(self.collection_matched), ) +
                     self.collection[0].shape)
        assert_raises(ValueError, self.collection.concatenate)
Esempio n. 9
0
    def __init__(self,
                 directory,
                 train=True,
                 imsize=(256, 256),
                 num_channels=3,
                 scale=True,
                 invert_white_images=True):
        """
        Class to read in our training and testing data, resize it, and store
        some metadata, including the image id and original size.  If we need to
        change the preprocessing for the images, we can do so in the _process
        method.
        """

        #   Sets all attributes.
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")
        for arg, val in values.items():
            setattr(self, arg, val)

        self.IMG_MAX = 255.0

        data_pattern = os.path.join(directory, "**/images/*.png")

        self.metadata_ = []
        self.masks_ = []
        self.metadata_columns = ["image_id", "orig_shape"]

        self.data_ic_ = ImageCollection(data_pattern)
def load_frames(folder_name, offset=0, desired_fps=3, max_frames=40):
    """
    :param folder_name: Filename with a gif
    :param offset: How many frames into the gif we want to start at
    :param desired_fps: How many fps we'll sample from the image
    :return: [T, h, w, 3] GIF
    """
    coll = ImageCollection(folder_name + '/out-*.jpg', mode='RGB')

    try:
        duration_path = folder_name + '/duration.txt'
        with open(duration_path, 'r') as f:
            durs = f.read().splitlines()
            fps = 100.0 / durs[0]
    except:
        # Some error occurs
        fps = 10

    # want to scale it to desired_fps
    keep_ratio = max(1., fps / desired_fps)

    frames = np.arange(offset, len(coll), keep_ratio).astype(int)[:max_frames]

    def _add_chans(img):
        if img.ndim == 3:
            return img
        return np.stack([img] * 3, -1)

    imgs_concat = concatenate_images([_add_chans(coll[f]) for f in frames])
    assert imgs_concat.ndim == 4
    return imgs_concat
Esempio n. 11
0
    def test_custom_load(self):
        load_pattern = [(1, 'one'), (2, 'two')]

        def load_fn(x):
            return x

        ic = ImageCollection(load_pattern, load_func=load_fn)
        assert_equal(ic[1], (2, 'two'))
    def __init__(self, cfg):
        super(SaltTest, self).__init__()

        self.cfg = cfg

        self.imgs = ImageCollection(os.path.join(TEST_IMG_DIR, '*.png'),
                                    conserve_memory=False,
                                    load_func=_imread_img)
Esempio n. 13
0
class TestImageCollection():
    pattern = [os.path.join(data_dir, pic) for pic in ['camera.png',
                                                       'color.png']]
    pattern_matched = [os.path.join(data_dir, pic) for pic in 
                                                    ['camera.png', 'moon.png']]

    def setUp(self):
        self.collection = ImageCollection(self.pattern)
        self.collection_matched = ImageCollection(self.pattern_matched)

    def test_len(self):
        assert len(self.collection) == 2

    def test_getitem(self):
        num = len(self.collection)
        for i in range(-num, num):
            assert type(self.collection[i]) is np.ndarray
        assert_array_almost_equal(self.collection[0],
                                  self.collection[-num])

        #assert_raises expects a callable, hence this do-very-little func
        def return_img(n):
            return self.collection[n]
        assert_raises(IndexError, return_img, num)
        assert_raises(IndexError, return_img, -num - 1)

    def test_slicing(self):
        assert type(self.collection[:]) is ImageCollection
        assert len(self.collection[:]) == 2
        assert len(self.collection[:1]) == 1
        assert len(self.collection[1:]) == 1
        assert_array_almost_equal(self.collection[0], self.collection[:1][0])
        assert_array_almost_equal(self.collection[1], self.collection[1:][0])
        assert_array_almost_equal(self.collection[1], self.collection[::-1][0])
        assert_array_almost_equal(self.collection[0], self.collection[::-1][1])

    def test_files_property(self):
        assert isinstance(self.collection.files, list)

        def set_files(f):
            self.collection.files = f
        assert_raises(AttributeError, set_files, 'newfiles')

    def test_custom_load(self):
        load_pattern = [(1, 'one'), (2, 'two')]

        def load_fn(x):
            return x

        ic = ImageCollection(load_pattern, load_func=load_fn)
        assert_equal(ic[1], (2, 'two'))

    def test_concatenate(self):
        ar = self.collection_matched.concatenate()
        assert_equal(ar.shape, (len(self.collection_matched),) + 
                                self.collection[0].shape)
        assert_raises(ValueError, self.collection.concatenate)
Esempio n. 14
0
def get_train_df():
    train_path = get_paths()["train_data_path"]
    train_format = get_paths()["train_data_format"]
    real_path = os.path.join(os.getcwd(), train_path)
    imageCollections = [
        ImageCollection(os.path.join(train_path, subdirname, train_format))
        for _, dirnames, _ in os.walk(real_path) for subdirname in dirnames
    ]
    return imageCollections
Esempio n. 15
0
def demo(path, terms, collection=True, quant_method='oct'):
    cterms = pd.read_csv(terms + '.csv')
    cterms.set_index('index')

    if collection:
        from skimage.io import ImageCollection
        img_set = ImageCollection(path)
        for img in img_set:
            _demo(img, cterms, quant_method)
    else:
        img = io.imread(path)
        _demo(img, cterms, quant_method)
Esempio n. 16
0
def load_test_data(test_dir, sample):
    print "loading test data!"
    cur_dir = test_dir + "images/"
    onlyfiles = [f for f in listdir(cur_dir) if isfile(join(cur_dir, f))]
    onlyfiles = [cur_dir + f for f in onlyfiles]
    numfiles = int(round(len(onlyfiles) * sample))
    onlyfiles = onlyfiles[0:numfiles]
    images = concatenate_images(
        ImageCollection(onlyfiles, load_func=imreadconvert))
    print "loaded test data:"
    print str(images.shape)
    return images
Esempio n. 17
0
    def _load_mask(self, image_id):
        """
        Function to load masks of specific image.
        """

        mask_pattern = os.path.join(self.directory, image_id, "masks/*.png")
        ic = ImageCollection(mask_pattern)

        mask = np.zeros(self.imsize, dtype='uint8')
        for lbl, indiv_mask in enumerate(ic):
            mask += ((
                1 + lbl) * self._process(indiv_mask, True).astype('uint8'))

        return mask
Esempio n. 18
0
def slices_to_npz(glob_pattern: str, out_file: str, compressed: bool = True):
    """Takes a glob pattern to a set of volume slices and composes an npz file."""

    from skimage.io import ImageCollection, imread

    images_coll = ImageCollection(glob_pattern.replace('"',
                                                       '').replace("'", ''),
                                  load_func=lambda x: imread(x, as_gray=True))

    if compressed:
        np.savez_compressed(
            out_file.replace('"', '').replace("'", ''), images_coll)
    else:
        np.savez(out_file.replace('"', '').replace("'", ''), images_coll)
Esempio n. 19
0
    def load(self, in_dir):
        '''Load images in in_dir'''

        # Load images
        if not Path(in_dir).is_dir():
            exit("[ERROR] Input directory doesn't exist.")

        p = [f'{str(Path(in_dir))}/*.{ex}' for ex in self.extension]
        p = ':'.join(p)
        self.images = ImageCollection(p)
        self.num_imgs = len(self.images)

        if self.num_imgs < 2:
            exit("[ERROR] No images.")

        # convert images to gray scale
        self.grays = [rgb2gray(img) for img in self.images]
Esempio n. 20
0
    def get_collection(self, time_range=None):
        """
        Returns an ImageCollection object.

        Parameters
        ----------
        time_range: range (int), optional
            Time steps to extract, defaults to the entire length of video.

        Returns
        -------
        output: ImageCollection
            Collection of images iterator.
        """
        if not time_range:
            time_range = range(int(self.frame_count()))
        return ImageCollection(time_range, load_func=self.get_index_frame)
def load_ifood(im_dir, label_dir, dim):
    """load images and labels 
    ---------
    im_dir: location of the images
    label_dir: location of the labels csv file 
    dim: dimension of the resized labels 
    """
    # TODO: joblib to parallelize
    # https://scikit-image.org/docs/dev/user_guide/tutorial_parallelization.html
    images = ImageCollection(im_dir)
    images_resized = [resize(image, dim) for image in images]
    num_images = len(images_resized)
    image_arr = np.reshape(concatenate_images(images_resized), (num_images, -1))

    labels = pd.read_csv(label_dir)
    labels = labels.sort_values(by=['img_name'])
    labels = labels.head(num_images)
    y = labels["label"].to_numpy()
    unique_labels = labels["label"].unique()
    return image_arr, y, unique_labels
Esempio n. 22
0
    def getData(self):
        # Loading scan spec data.
        scan = self.specData[self.scanNumber]

        # Reading scan images.
        stackDirectory = os.path.join(self.dir,
                                      'S{:03d}'.format(self.scanNumber))
        time.sleep(0.75)
        print('Reading images in %s' % stackDirectory)
        images = ImageCollection(stackDirectory + os.sep + '*.tif')
        stack = np.array([imp for imp in images])
        print('Found %d images with dimensions: %s\n' %
              (stack.shape[0], format(stack.shape[1:])))

        # Checking dimensions of scan and # of images.
        if stack.shape[0] is not scan.data.shape[0]:
            message = '# of images: %d and # of points: %d in scan %s%03d do not match!' % (
                stack.shape[0], scan.data.shape[0], 'S', self.scanNumber)
            warn(message)
            raise IndexError
        return scan, stack
Esempio n. 23
0
def load_validation_data(valid_dir, label_dict, sample):
    print "loading validation data!"
    image_to_id = open(valid_dir + "val_annotations.txt",
                       'r').read().split('\n')
    while '' in image_to_id:
        image_to_id.remove('')
    image_names = []
    image_labels = []
    for entry in image_to_id:
        tokens = entry.split('\t')
        image_names.append(valid_dir + 'images/' + tokens[0])
        image_labels.append(label_dict[tokens[1]])
    num_entries = int(round(len(image_names) * sample))
    image_names = image_names[0:num_entries]
    images = concatenate_images(
        ImageCollection(image_names, load_func=imreadconvert))
    print "image val shape:" + str(images.shape)
    print "loaded validation data:"
    image_labels = np.asarray(image_labels[0:num_entries])
    image_labels = np.reshape(image_labels, (len(image_labels), ))
    image_labels = one_hot_encode(image_labels)
    return images, image_labels
Esempio n. 24
0
def load_train_data(train_dir, label_dict, sample):
    print "loading training data!"
    nsamples = int(round(sample * 500))  #500 images for each of 200 labels
    file_names = []
    labels = []
    for label in label_dict:
        #print str(label)
        cur_dir = train_dir + label + "/images"
        onlyfiles = [f for f in listdir(cur_dir)
                     if isfile(join(cur_dir, f))][0:nsamples]
        onlyfiles = [cur_dir + '/' + f for f in onlyfiles]
        file_names = file_names + onlyfiles
        cur_labels = nsamples * [label_dict[label]]
        labels = labels + cur_labels
    X_train = concatenate_images(
        ImageCollection(file_names, load_func=imreadconvert))
    print "loaded training data"
    print str(X_train.shape)
    Y_train = np.asarray(labels)
    Y_train = np.reshape(Y_train, (len(Y_train), ))
    Y_train = one_hot_encode(Y_train)
    print str(Y_train.shape)
    return X_train, Y_train
Esempio n. 25
0
 def get_stream_label_intensity(self,stream_fname,labeled_back,SBtype=0,plot_hist=True):
     # read stream tiff
     stream_img = ImageCollection(stream_fname)
     
     # make hist dir
     if SBtype==2:
         try:os.mkdir(self.out_dir+"/hist")
         except:print("")
     
     time = [i*self.frame_time for i in range(len(stream_img))]
     int_df = pd.DataFrame([])
     self.back_val = np.array([])        
     
     for i in stream_img:
         int_df = self.get_label_intensity(i,int_df,labeled_back,SBtype=SBtype)
         
     # remove back ground
     if SBtype==2:
         back_intensity,counts = stats.mstats.mode(self.back_val)
         int_df = int_df - back_intensity
         self.back += [back_intensity]  
         if plot_hist:
             #plot back ground hist
             plt.figure()
             plt.hist(self.back_val,bins=300,range=(0,1200))
             figname = stream_fname.split("/")[-1]
             figname = figname.split(".")[0]
             plt.savefig(self.out_dir+"/hist/back_hist_"+figname+".png")
             plt.clf()                
             plt.close()
             
         
     label_name = ["label"+str(i+1) for i in range(self.label_num)]
     int_df.columns = label_name
     int_df.index = time        
     return(int_df)
Esempio n. 26
0
def im_start(pic,path,box):
    """Return an array of images."""
    im_matrix = ImageCollection(
            pic,path=path,box=box,
            load_func=imread_x)
    return(im_matrix.concatenate())
Esempio n. 27
0
 def to_MultiImage(self, interval):
     #loadingList = [os.path.join(self.path+"", name) for name in self.files]
     loadingList = self.files
     return MultiImage(ImageCollection(loadingList, load_func=imread),
                       interval)
Esempio n. 28
0
def imread_collection(x, conserve_memory=True):
    assert conserve_memory == False
    assert x == '*.png'
    return ImageCollection([5, 10], load_func=lambda x: np.zeros((x, x)))
Esempio n. 29
0
#   orig_figures. After that, we print the number of images in this
#   dataset.


def imread_convert(image):
    """Support function for ImageCollection. Converts the input image to
    gray.
    """

    return imread(image, as_grey=True)


files_mica = 'orig_figures/*mica*.tif'
files_apatite = 'orig_figures/*apatite*.tif'

imgset_mica = ImageCollection(load_pattern=files_mica,
                              load_func=imread_convert)
imgset_apatite = ImageCollection(load_pattern=files_apatite,
                                 load_func=imread_convert)

print('Number of images on dataset:', len(imgset_mica), 'mica,',
      len(imgset_apatite), 'apatite.')

# Binarizing images
# * Here we binarize all images using different algorithms: Otsu, Yen,
#   Li, ISODATA, triangle, MLSS.
# * We also perform some cleaning actions:
#  ** remove_small_objects(), in its default settings, removes objects
#     with an area smaller than 64 px.
#  ** binary_fill_holes() fills holes contained in objects.
#  ** clear_rd_border() removes objects touching the lower and right
#     borders. These objects could not be identified with precision.
Esempio n. 30
0
from skimage.io import ImageCollection, imread, imsave
from skimage import img_as_float, img_as_ubyte

args = ArgumentParser()
args.add_argument("--base_dir")
args.add_argument("--model")
args.add_argument("--out_dir")
args.add_argument("--data")
#args.add_argument("base_dir")
#args.add_argument("model")
#args.add_argument("out_dir")
#args.add_argument("data")
args.add_argument("--is_3d", default=False)
args = args.parse_args()

model = CARE(config=None, name=args.model, basedir=args.base_dir)
#data = ImageCollection("training_data/val/low_snr_extracted_z/*.tif")
data = ImageCollection(args.data)
axes = "ZYX" if bool(args.is_3d) else "YX"

if not exists(args.out_dir):
    makedirs(args.out_dir)

for i in range(len(data)):
    im = img_as_float(data[i])
    r = model.predict(im, axes)
    #r = (r - r.min()) / (r.max() - r.min())
    r = img_as_ubyte(r)
    imsave(join(args.out_dir, f"{args.model}_{basename(data.files[i])}"), r)

Esempio n. 31
0
 def setUp(self):
   self.images = ImageCollection('tests/images/*', load_func=allrgb.read_image)
Esempio n. 32
0
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 22:28:46 2020

@author: MightyA
"""

import matplotlib.pyplot as plt
from skimage.io import imread, imshow, ImageCollection, imsave
from skimage.transform import resize
import skimage.io as io
import glob

allow_pickle = True

from tempfile import TemporaryFile
outfile = TemporaryFile()

image_collection = ImageCollection('bilder/*')

croped_images = []

for img in image_collection:
    croped_images.append(resize(img, (224, 224)))

img_count = 1
for cropted_img in croped_images:
    imsave('result/img' + str(img_count) + ".jpg", cropted_img)
    img_count += 1
Esempio n. 33
0
 def setUp(self):
     self.collection = ImageCollection(self.pattern)
     self.collection_matched = ImageCollection(self.pattern_matched)