Esempio n. 1
0
def bifurcated_regions(n):
    """
    generate an intensity table with diagonal coordinates ((1, 1), (2, 2), ... (n, n)) over 3
    channels and four rounds, where intensities are randomly generated.

    Split the region into two block-diagonal cells which should encompass 1/2 of the total area but
    all of the points in the domain, since intensities is a diagonal table.
    """

    np.random.seed(777)
    data = np.random.random_sample((n, 3, 4))
    diagonal_intensities = intensity_table_factory(data)

    x = diagonal_intensities[Indices.X.value].max() + 1
    y = diagonal_intensities[Indices.Y.value].max() + 1
    box_one_coords = [[0, 0], [0, np.floor(x / 2)], [np.ceil(y / 2), 0],
                      [np.floor(y / 2), np.floor(x / 2)]]
    box_two_coords = [[np.floor(y / 2), np.floor(x / 2)], [np.floor(y / 2), x],
                      [y, np.floor(x / 2)], [y, x]]
    regions = regional.many(
        [regional.one(box_one_coords),
         regional.one(box_two_coords)])

    # assign intensity_table some target values that are just sequential numbers
    diagonal_intensities[Features.TARGET] = (Features.AXIS,
                                             np.arange(n).astype(str))

    return diagonal_intensities, regions
Esempio n. 2
0
def test_construction():
	coords = [[0, 0], [0, 1], [1, 0], [1, 1]]
	r = one(coords)
	assert allclose(r.coordinates, coords)
	coords = [1, 1]
	r = one(coords)
	assert allclose(r.coordinates, [coords])
Esempio n. 3
0
def test_inbounds():
	coords = [[1, 1], [1, 2], [2, 1], [2, 2]]
	v = one(coords).inbounds([0, 0], [3, 3])
	assert v == True
	v = one(coords).inbounds([0, 0], [2, 2])
	assert v == False
	v = one(coords).inbounds([1, 1], [3, 3])
	assert v == True
Esempio n. 4
0
def test_mask_background():
	r = many([one([0, 0]), one([1, 1])])
	im = r.mask(fill='red', stroke=None, background='black')
	assert allclose(im[:,:,0], [[1, 0], [0, 1]])
	assert allclose(im[:,:,1], [[0, 0], [0, 0]])
	assert allclose(im[:,:,2], [[0, 0], [0, 0]])
	im = r.mask(fill=[1, 0, 0], stroke=None, background='black')
	assert allclose(im[:,:,0], [[1, 0], [0, 1]])
	assert allclose(im[:,:,1], [[0, 0], [0, 0]])
	assert allclose(im[:,:,2], [[0, 0], [0, 0]])
Esempio n. 5
0
def test_mask_colors():
	r = many([one([0, 0]), one([1, 1])])
	im = r.mask(fill=['red','blue'], background='black')
	assert allclose(im[:,:,0], [[1, 0], [0, 0]])
	assert allclose(im[:,:,1], [[0, 0], [0, 0]])
	assert allclose(im[:,:,2], [[0, 0], [0, 1]])
	im = r.mask(fill=[[1, 0, 0], [0, 0, 1]], background='black')
	assert allclose(im[:,:,0], [[1, 0], [0, 0]])
	assert allclose(im[:,:,1], [[0, 0], [0, 0]])
	assert allclose(im[:,:,2], [[0, 0], [0, 1]])
Esempio n. 6
0
def test_mask():
	r = many([one([0, 0]), one([1, 1])])
	im = r.mask(fill='red')
	assert allclose(im[:,:,0], [[1, 1], [1, 1]])
	assert allclose(im[:,:,1], [[0, 1], [1, 0]])
	assert allclose(im[:,:,2], [[0, 1], [1, 0]])
	im = r.mask(fill=[1, 0, 0])
	assert allclose(im[:,:,0], [[1, 1], [1, 1]])
	assert allclose(im[:,:,1], [[0, 1], [1, 0]])
	assert allclose(im[:,:,2], [[0, 1], [1, 0]])
Esempio n. 7
0
def test_construction():
	coords = [[0, 0], [0, 1], [1, 0], [1, 1]]
	r = many([one(coords), one(coords)])
	assert r.count == 2
	assert allclose(r.coordinates, [coords, coords])
	r = many([coords, coords])
	assert r.count == 2
	assert allclose(r.coordinates, [coords, coords])
	r = many([coords, coords, coords])
	assert r.count == 3
	assert allclose(r.coordinates, [coords, coords, coords])
Esempio n. 8
0
def test_mask_no_fill():
	coords = [[1, 1]]
	r = one(coords)
	im = r.mask(fill=None, stroke='black', dims=(2,2))
	assert allclose(im[:,:,0], [[0, 0], [0, 1]])
	assert allclose(im[:,:,1], [[0, 0], [0, 1]])
	assert allclose(im[:,:,2], [[0, 0], [0, 1]])
Esempio n. 9
0
    def make_region(geometry):
        assert geometry['geometry']['type'] == "Polygon"

        return regional.one([
            (coordinates[0], coordinates[1])
            for coordinates in geometry['geometry']['coordinates']
        ])
Esempio n. 10
0
    def region_for(label_mat_coo, label):
        ind = label_mat_coo.data == label
        # TODO does this work in 3D?
        x = label_mat_coo.row[ind]
        y = label_mat_coo.col[ind]

        re = regional.one(list(zip(x, y)))
        return re
Esempio n. 11
0
def test_overlap():
	coords = [[1, 1], [1, 2], [2, 1], [2, 2]]
	v = one(coords).overlap(one([1, 1]), 'fraction')
	assert v == 0.25
	v = one(coords).overlap(one([[1, 1],[1, 2]]), 'fraction')
	assert v == 0.5
	v = one(coords).overlap(one([1, 1]), 'rates')
	assert v == (0.25, 1.0)
	v = one(coords).overlap(one([[1, 1], [1, 2], [3, 3], [4, 4]]), 'rates')
	assert v == (0.5, 0.5)
Esempio n. 12
0
def test_mask():
	coords = [[1, 1]]
	r = one(coords)
	im = r.mask(fill='red')
	assert im.shape == (1, 1, 3)
	assert allclose(im, [[[1, 0, 0]]])
	im = r.mask(fill=[1, 0, 0])
	assert im.shape == (1, 1, 3)
	assert allclose(im, [[[1, 0, 0]]])
Esempio n. 13
0
  def _get(self, index, block, shape):

      offset = (asarray(index[1]) * asarray(shape))[1:]
      dims = block.shape[1:]
      max_size = prod(dims) / 2 if self.max_size == 'full' else self.max_size

      # reshape to t x spatial dimensions
      data = block.reshape(block.shape[0], -1)

      # build and apply NMF model to block
      model = SKNMF(self.k, max_iter=self.max_iter)
      model.fit(clip(data, 0, inf))

      # reconstruct sources as spatial objects in one array
      components = model.components_.reshape((self.k,) + dims)

      # convert from basis functions into shape
      # by median filtering (optional), applying a percentile threshold,
      # finding connected components and removing small objects
      combined = []
      for component in components:
          tmp = component > percentile(component, self.percentile)
          regions = remove_small_objects(label(tmp), min_size=self.min_size)
          ids = unique(regions)
          ids = ids[ids > 0]
          for ii in ids:
              r = regions == ii
              r = median_filter(r, 2)
              coords = asarray(where(r)).T + offset
              if (size(coords) > 0) and (size(coords) < max_size):
                  combined.append(one(coords))

      # merge overlapping sources
      if self.overlap is not None:

          # iterate over source pairs and find a pair to merge
          def merge(sources):
              for i1, s1 in enumerate(sources):
                  for i2, s2 in enumerate(sources[i1+1:]):
                      if s1.overlap(s2) > self.overlap:
                          return i1, i1 + 1 + i2
              return None

          # merge pairs until none left to merge
          pair = merge(combined)
          testing = True
          while testing:
              if pair is None:
                  testing = False
              else:
                  combined[pair[0]] = combined[pair[0]].merge(combined[pair[1]])
                  del combined[pair[1]]
                  pair = merge(combined)

      return combined
Esempio n. 14
0
def load_regions(dataset_path):
    """
  Load in the ROIs for a dataset.
  Returns a regional.many object
  """
    with open(os.path.join(dataset_path, 'regions/regions.json')) as f:
        data = json.load(f)

    regions = []
    for i in range(len(data)):
        regions.append(regional.one(data[i]['coordinates']))

    return regional.many(regions)
Esempio n. 15
0
def load_regions(dataset_path):
  """
  Load in the ROIs for a dataset.
  Returns a regional.many object
  """
  with open(os.path.join(dataset_path, 'regions/regions.json')) as f:
    data = json.load(f)
  
  regions = []
  for i in range(len(data)):
    regions.append(regional.one(data[i]['coordinates']))
  
  return regional.many(regions)
Esempio n. 16
0
def mask_outlines(img, mask_arrs=[], colors=[]):
    """Apply each of the given masks (numpy arrays) to the base img with the given colors.

    # Arguments
        img: base image as a (height x width) numpy array.
        mask_arrs: list of masks as (height x width) numpy arrays that should be outlined.
        colors: one color (e.g. 'red' or hex code) for each mask.

    # Returns
        img: The base image with outlines applied.

    """

    assert len(mask_arrs) == len(colors), 'One color per mask.'
    img = img.astype(np.float32)

    # Clip outliners, scale the img to [0,1].
    img = np.clip(img, 0, np.percentile(img, 99))
    img = (img - np.min(img)) / (np.max(img) - np.min(img))

    # Convert the img to RGB.
    if len(img.shape) == 2:
        img_rgb = gray2rgb(img)

    # Build up an image of combined outlines.
    oln_rgb = np.zeros_like(img_rgb)
    for m, c in zip(mask_arrs, colors):
        if np.sum(m) == 0:
            continue
        r = one(list(zip(*np.where(m == 1))))
        o = r.mask(dims=img_rgb.shape[:2],
                   fill=None,
                   stroke=c,
                   background='black')
        yy, xx, cc = np.where(o != 0)
        oln_rgb[yy, xx, cc] = o[yy, xx, cc]

    # Merge the two images.
    # Helpful stackoverflow post: https://stackoverflow.com/questions/40895785
    oln_rgb = np.clip(oln_rgb, 0, 1)
    oln_msk = np.max(oln_rgb, axis=-1)
    img_msk = 1 - oln_msk
    oln_msk = gray2rgb(oln_msk)
    img_msk = gray2rgb(img_msk)
    mrg = (((oln_rgb * oln_msk) + (img_rgb * img_msk)) * 255).astype(np.uint8)

    return mrg
Esempio n. 17
0
def test_mask_colors():
	coords = [[1, 1]]
	r = one(coords)
	im = r.mask(fill='blue', stroke=None, dims=(2, 2))
	assert im.shape == (2, 2, 3)
	assert allclose(im[:, :, 0], [[1, 1], [1, 0]])
	assert allclose(im[:, :, 1], [[1, 1], [1, 0]])
	assert allclose(im[:, :, 2], [[1, 1], [1, 1]])
	im = r.mask(fill='red', stroke='black', dims=(2, 2))
	assert im.shape == (2, 2, 3)
	assert allclose(im[:, :, 0], [[0, 0], [0, 1]])
	assert allclose(im[:, :, 1], [[0, 0], [0, 0]])
	assert allclose(im[:, :, 2], [[0, 0], [0, 0]])
	im = r.mask(fill='red', stroke=None, background='blue', dims=(2, 2))
	assert im.shape == (2, 2, 3)
	assert allclose(im[:, :, 0], [[0, 0], [0, 1]])
	assert allclose(im[:, :, 1], [[0, 0], [0, 0]])
	assert allclose(im[:, :, 2], [[1, 1], [1, 0]])
Esempio n. 18
0
def dataset_to_mp4(s, m, mp4_path):
    """Converts the given series to an mp4 video. If the mask is given, adds an outline around each neuron.

    # Arguments
        s: imaging series as a (time x height x width) numpy array.
        m: neuron masks as a (no. neurons x height x width) numpy array.
        mp4_path: path where the mp4 file should be saved.

    # Returns
        Nothing

    """

    from skvideo.io import vwrite
    logger = logging.getLogger(funcname())
    logger.info('Preparing video %s.' % mp4_path)

    s = s.astype(np.float32)
    s = (s - np.min(s)) / (np.max(s) - np.min(s)) * 255

    # If mask is given make a color video with neurons outlined.
    if m is not None:
        video = np.zeros(s.shape + (3, ), dtype=np.uint8)
        video[:, :, :, 0] = s
        video[:, :, :, 1] = s
        video[:, :, :, 2] = s

        outlines = np.zeros(s.shape[1:] + (3, ), )
        for i in range(m.shape[0]):
            reg = one(list(zip(*np.where(m[i] == 1))))
            outlines += reg.mask(dims=m.shape[1:],
                                 fill=None,
                                 stroke='white',
                                 background='black')

        yy, xx, _ = np.where(outlines != 0)
        video[:, yy, xx, :] = [102, 255, 255]

    else:
        video = s.astype(np.uint8)

    vwrite(mp4_path, video)
    logger.info('Saved video %s.' % mp4_path)
Esempio n. 19
0
def test_dilate():
	v = many([one([1, 1]), one([1, 1])]).dilate(1)
	truth = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
	assert allclose(v.coordinates, [truth, truth])
Esempio n. 20
0
def test_exclude():
	coords = [[0, 0], [0, 1], [1, 0], [1, 1]]
	r = one(coords).exclude(one([[0, 0], [0, 1]]))
	assert allclose(r.coordinates, [[1, 0], [1, 1]])
	r = one(coords).exclude([[0, 0], [0, 1]])
	assert allclose(r.coordinates, [[0, 0], [0, 1], [1, 0]])
Esempio n. 21
0
def test_outline():
	coords = [[1, 1]]
	r = one(coords).outline(0, 1)
	assert allclose(r.coordinates, [[0, 0], [0, 1], [0, 2], 
		[1, 0], [1, 2], [2, 0], [2, 1], [2, 2]])
Esempio n. 22
0
def test_center():
	coords = [[0, 0], [0, 1], [1, 0], [1, 1]]
	r = one(coords)
	assert allclose(r.center, [0.5, 0.5])
Esempio n. 23
0
def convert(array):
    r, c = np.where(array > 0.0)
    return one(zip(r, c))
Esempio n. 24
0
def test_dilate():
	v = one([1, 1]).dilate(1)
	assert allclose(v.coordinates, [[0, 0], [0, 1], [0, 2], [1, 0], 
		[1, 1], [1, 2], [2, 0], [2, 1], [2, 2]])
	v = one([1, 1]).dilate(0)
	assert allclose(v.coordinates, [[1, 1]])
Esempio n. 25
0
def test_hull():
	coords = [[0, 0], [0, 2], [2, 0], [1, 1], [2, 2]]
	r = one(coords)
	assert allclose(r.hull, [[0, 0], [2, 0], [2, 2], [0, 2]])
Esempio n. 26
0
def test_bbox():
	coords = [[0, 0], [0, 2], [2, 0], [1, 1], [2, 2]]
	r = one(coords)
	assert allclose(r.bbox, [0, 0, 2, 2])
Esempio n. 27
0
    def _construct_region_for_location(self, location, images, template,
                                       mean_image):
        """
    Given a location, build a mask around it. Just do some simple segmenting
    to construct the mask.
    """

        image_dims = images[0].shape
        template_dims = template.shape
        template_area = template_dims[0] * template_dims[1]

        template_half_height = template_dims[0] / 2
        template_half_width = template_dims[1] / 2
        y, x = location
        min_x = max(0, x - template_half_width)
        max_x = min(image_dims[1], x + template_half_width)
        min_y = max(0, y - template_half_height)
        max_y = min(image_dims[0], y + template_half_height)

        neuron = mean_image[min_y:max_y, min_x:max_x]
        neuron_area = neuron.shape[0] * neuron.shape[1]

        f = interpolate.interp1d([np.min(neuron), np.max(neuron)], [-1, 1])
        scaled_neuron = f(neuron)
        seg = segmentation.felzenszwalb(scaled_neuron,
                                        scale=1000,
                                        sigma=1,
                                        min_size=int(.4 * template_area))

        # Is this mask good to go?
        seg_g2g = True

        # if the mask is more than 75% percent of the window, its probably bad
        if np.sum(seg) >= 0.75 * neuron_area:
            seg_g2g = False

        # did we even find a region?
        if np.count_nonzero(seg) == 0:
            seg_g2g = False

        # did we find too many regions?
        if np.unique(seg).shape[0] != 2:  # we only want 0s and 1s
            seg_g2g = False

        # we didn't find a good segmentation
        if not seg_g2g:
            # Just make an ellipse?
            # Rather than an ellipse, we should just use the template
            cx = neuron.shape[1] / 2
            cy = neuron.shape[0] / 2
            rx = cx * 3 / 4
            ry = cy * 3 / 4
            seg = np.zeros(neuron.shape)
            rr, cc = ellipse(cy, cx, ry, rx)
            seg[rr, cc] = 1

        mask = np.zeros(image_dims)
        mask[min_y:max_y, min_x:max_x] = seg

        coordinates = np.transpose(np.nonzero(mask)).tolist()

        return regional.one(coordinates)
Esempio n. 28
0
def convert(array):
    r,c = np.where(array > 0.0)
    return one(zip(r,c))
Esempio n. 29
0
	def _get(self, block):
		"""
		Performs Sparse PCA on a block to identify spatial regions

		Arguments
		---------
		block : thunder block
			Block of images

		Returns
		-------
		combined : list
			List of regions
		"""
		dims = block.shape[1:]
		max_size = prod(dims) / 2 if self.max_size == 'full' else self.max_size
		data = block.reshape(block.shape[0], -1)
		model = SparsePCA(self.k, normalize_components=True)
		model.fit(clip(data, 0, inf)) 
		components = model.components_.reshape((self.k, ) + dims)

		combined = []
		for component in components:
			tmp = component > percentile(component, self.percentile)
			labels, num = label(tmp, return_num=True)
			if num == 1:
				counts = bincount(labels.ravel())
				if counts[1] < self.min_size:
					continue
				else:
					regions = labels
			else:
				regions = remove_small_objects(labels, min_size=self.min_size)
		  
			ids = unique(regions)
			ids = ids[ids > 0]
			for ii in ids:
				r = regions == ii
				r = median_filter(r, 2)
				coords = asarray(where(r)).T
				if (size(coords) > 0) and (size(coords) < max_size):
					combined.append(one(coords))

		# merge overlapping sources
		if self.overlap is not None:

			# iterate over source pairs and find a pair to merge
			def merge(sources):
				for i1, s1 in enumerate(sources):
					for i2, s2 in enumerate(sources[i1+1:]):
						if s1.overlap(s2) > self.overlap:
							return i1, i1 + 1 + i2
					return None

		  # merge pairs until none left to merge
			pair = merge(combined)
			testing = True
			while testing:
				if pair is None:
					testing = False
				else:
					combined[pair[0]] = combined[pair[0]].merge(combined[pair[1]])
					del combined[pair[1]]
					pair = merge(combined)

		return combined		
Esempio n. 30
0
def test_mask_colormap():
	r = many([one([0, 0]), one([1, 1])])
	im = r.mask(cmap='gray', value=[0, 1], background='red')
	assert allclose(im[:,:,0], [[0, 1], [1, 1]])
	assert allclose(im[:,:,1], [[0, 0], [0, 1]])
	assert allclose(im[:,:,2], [[0, 0], [0, 1]])
Esempio n. 31
0
def test_index():
	coords = [[0, 0], [0, 1], [1, 0], [1, 1]]
	r = many([one(coords), one(coords)])
	assert allclose(r[0].coordinates, coords)
	assert allclose(r[int64(0)].coordinates, coords)
Esempio n. 32
0
def test_extent():
	coords = [[0, 0], [0, 2], [2, 0], [1, 1], [2, 2]]
	r = one(coords)
	assert allclose(r.extent, [3, 3])
Esempio n. 33
0
def test_overlap():
	coords = [[1, 1], [1, 2], [2, 1], [2, 2]]
	v = many([coords, coords]).overlap(one([1, 1]))
	assert v == [0.25, 0.25]
Esempio n. 34
0
def test_area():
	coords = [[0, 0], [0, 2], [2, 0], [2, 2]]
	r = one(coords)
	assert r.area == 4
Esempio n. 35
0
def test_exclude():
	coords = [[0, 0], [0, 1], [1, 0], [1, 1]]
	truth = [[1, 0], [1, 1]]
	r = many([coords, coords]).exclude(one([[0, 0], [0, 1]]))
	assert allclose(r.coordinates, [truth, truth])
Esempio n. 36
0
def test_distance():
	coords = [[0, 0], [0, 2], [2, 0], [2, 2]]
	r = one(coords)
	assert r.distance([1, 1]) == 0
Esempio n. 37
0
    def _get(self, block):
        """
      Perform NMF on a block to identify spatial regions.
      """
        dims = block.shape[1:]
        max_size = prod(dims) / 2 if self.max_size == 'full' else self.max_size

        # reshape to t x spatial dimensions
        data = block.reshape(block.shape[0], -1)

        # build and apply NMF model to block
        model = SKNMF(self.k, max_iter=self.max_iter)
        model.fit(clip(data, 0, inf))

        # reconstruct sources as spatial objects in one array
        components = model.components_.reshape((self.k, ) + dims)

        # convert from basis functions into shape
        # by median filtering (optional), applying a percentile threshold,
        # finding connected components and removing small objects
        combined = []
        for component in components:
            tmp = component > percentile(component, self.percentile)
            labels, num = label(tmp, return_num=True)
            if num == 1:
                counts = bincount(labels.ravel())
                if counts[1] < self.min_size:
                    continue
                else:
                    regions = labels
            else:
                regions = remove_small_objects(labels, min_size=self.min_size)
            ids = unique(regions)
            ids = ids[ids > 0]
            for ii in ids:
                r = regions == ii
                r = median_filter(r, 2)
                coords = asarray(where(r)).T
                if (size(coords) > 0) and (size(coords) < max_size):
                    combined.append(one(coords))

        # merge overlapping sources
        if self.overlap is not None:

            # iterate over source pairs and find a pair to merge
            def merge(sources):
                for i1, s1 in enumerate(sources):
                    for i2, s2 in enumerate(sources[i1 + 1:]):
                        if s1.overlap(s2) > self.overlap:
                            return i1, i1 + 1 + i2
                return None

            # merge pairs until none left to merge
            pair = merge(combined)
            testing = True
            while testing:
                if pair is None:
                    testing = False
                else:
                    combined[pair[0]] = combined[pair[0]].merge(
                        combined[pair[1]])
                    del combined[pair[1]]
                    pair = merge(combined)

        return combined
Esempio n. 38
0
def test_merge_nonunique():
	coords = [[0, 0], [0, 2], [2, 0], [2, 2]]
	r = one(coords).merge([[1, 1], [0, 0]])
	assert equal_sets(r.coordinates.tolist(), coords + [[1,1]])
	r = one(coords).merge(one([[1, 1], [0, 0]]))
	assert equal_sets(r.coordinates.tolist(), coords + [[1,1]])
Esempio n. 39
0
def test_crop():
	coords = [[0, 0], [0, 2], [2, 0], [2, 2]]
	r = one(coords).crop([0, 0], [1, 1])
	assert allclose(r.coordinates, [[0, 0]])
Esempio n. 40
0
def test_merge():
	coords = [[0, 0], [0, 2], [2, 0], [2, 2]]
	r = one(coords).merge([1, 1])
	assert allclose(r.coordinates, coords + [[1,1]])
	r = one(coords).merge(one([1, 1]))
	assert allclose(r.coordinates, coords + [[1,1]])