Esempio n. 1
0
    def test_fast(self):
        """Test the fast conversion"""
        data = numpy.ones((251, 200), dtype="uint16")
        data[:, :] = numpy.arange(200)
        data[2, :] = 56
        data[200, 2] = 3

        data_nc = data.swapaxes(
            0, 1)  # non-contiguous cannot be treated by fast conversion

        # convert to RGB
        hist, edges = img.histogram(data)
        irange = img.findOptimalRange(hist, edges, 1 / 256)
        tstart = time.time()
        for i in range(10):
            rgb = img.DataArray2RGB(data, irange)
        fast_dur = time.time() - tstart

        hist_nc, edges_nc = img.histogram(data_nc)
        irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256)
        tstart = time.time()
        for i in range(10):
            rgb_nc = img.DataArray2RGB(data_nc, irange_nc)
        std_dur = time.time() - tstart
        rgb_nc_back = rgb_nc.swapaxes(0, 1)

        print("Time fast conversion = %g s, standard = %g s" %
              (fast_dur, std_dur))
        self.assertLess(fast_dur, std_dur)
        # ±1, to handle the value shifts by the standard converter to handle floats
        numpy.testing.assert_almost_equal(rgb, rgb_nc_back, decimal=0)
Esempio n. 2
0
    def test_auto_vs_manual(self):
        """
        Checks that conversion with auto BC is the same as optimal BC + manual
        conversion.
        """
        size = (1024, 512)
        depth = 2**12
        img12 = numpy.zeros(size, dtype="uint16") + depth // 2
        img12[0, 0] = depth - 1 - 240

        # automatic
        img_auto = img.DataArray2RGB(img12)

        # manual
        hist, edges = img.histogram(img12, (0, depth - 1))
        self.assertEqual(edges, (0, depth - 1))
        irange = img.findOptimalRange(hist, edges)
        img_manu = img.DataArray2RGB(img12, irange)

        numpy.testing.assert_equal(img_auto, img_manu)

        # second try
        img12 = numpy.zeros(size, dtype="uint16") + 4000
        img12[0, 0] = depth - 1 - 40
        img12[12, 12] = 50

        # automatic
        img_auto = img.DataArray2RGB(img12)

        # manual
        hist, edges = img.histogram(img12, (0, depth - 1))
        irange = img.findOptimalRange(hist, edges)
        img_manu = img.DataArray2RGB(img12, irange)

        numpy.testing.assert_equal(img_auto, img_manu)
Esempio n. 3
0
    def test_uint8(self):
        # uint8 is special because it's so close from the output that bytescale
        # normally does nothing
        irange = (25, 135)
        shape = (1024, 836)
        tint = (0, 73, 255)
        data = numpy.random.randint(irange[0], irange[1] + 1,
                                    shape).astype(numpy.uint8)
        # to be really sure there is at least one of the min and max values
        data[0, 0] = irange[0]
        data[0, 1] = irange[1]

        out = img.DataArray2RGB(data, irange, tint=tint)

        pixel1 = out[0, 1]
        numpy.testing.assert_array_equal(pixel1, list(tint))

        self.assertTrue(numpy.all(out[..., 0] == 0))

        self.assertEqual(out[..., 2].min(), 0)
        self.assertEqual(out[..., 2].max(), 255)

        # Same data, but now mapped between 0->255 => no scaling to do (just duplicate)
        irange = (0, 255)
        out = img.DataArray2RGB(data, irange, tint=tint)
        self.assertTrue(numpy.all(out[..., 0] == 0))
        numpy.testing.assert_array_equal(data, out[:, :, 2])
Esempio n. 4
0
    def test_direct_mapping(self):
        """test with irange fitting the whole depth"""
        # first 8 bit => no change (and test the short-cut)
        size = (1024, 1024)
        depth = 256
        grey_img = numpy.zeros(size, dtype="uint8") + depth // 2
        grey_img[0, 0] = 10
        grey_img[0, 1] = depth - 10

        # should keep the grey
        out = img.DataArray2RGB(grey_img, irange=(0, depth - 1))
        self.assertEqual(out.shape, size + (3,))
        self.assertEqual(self.CountValues(out), 3)
        pixel = out[2, 2]
        numpy.testing.assert_equal(pixel, [128, 128, 128])

        # 16 bits
        depth = 4096
        grey_img = numpy.zeros(size, dtype="uint16") + depth // 2
        grey_img[0, 0] = 100
        grey_img[0, 1] = depth - 100

        # should keep the grey
        out = img.DataArray2RGB(grey_img, irange=(0, depth - 1))
        self.assertEqual(out.shape, size + (3,))
        self.assertEqual(self.CountValues(out), 3)
        pixel = out[2, 2]
        numpy.testing.assert_equal(pixel, [128, 128, 128])
Esempio n. 5
0
    def test_simple(self):
        # test with everything auto
        size = (1024, 512)
        grey_img = numpy.zeros(size, dtype="uint16") + 1500

        # one colour
        out = img.DataArray2RGB(grey_img)
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 1)

        # add black
        grey_img[0, 0] = 0
        out = img.DataArray2RGB(grey_img)
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 2)

        # add white
        grey_img[0, 1] = 4095
        out = img.DataArray2RGB(grey_img)
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 3)
        pixel0 = out[0, 0]
        pixel1 = out[0, 1]
        pixelg = out[0, 2]
        numpy.testing.assert_array_less(pixel0, pixel1)
        numpy.testing.assert_array_less(pixel0, pixelg)
        numpy.testing.assert_array_less(pixelg, pixel1)
Esempio n. 6
0
    def test_direct_mapping(self):
        """test with irange fitting the whole depth"""
        # first 8 bit => no change (and test the short-cut)
        size = (1024, 1024)
        depth = 256
        grey_img = numpy.zeros(size, dtype="uint8") + depth // 2  # 128
        grey_img[0, 0] = 10
        grey_img[0, 1] = depth - 10

        # should keep the grey
        out = img.DataArray2RGB(grey_img, irange=(0, depth - 1))
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 3)
        pixel = out[2, 2]
        numpy.testing.assert_equal(out[:, :, 0], out[:, :, 1])
        numpy.testing.assert_equal(out[:, :, 0], out[:, :, 2])
        numpy.testing.assert_equal(pixel, [128, 128, 128])

        # 16 bits
        depth = 4096
        grey_img = numpy.zeros(size, dtype="uint16") + depth // 2
        grey_img[0, 0] = 100
        grey_img[0, 1] = depth - 100
        grey_img[1, 0] = 0
        grey_img[1, 1] = depth - 1

        # should keep the grey
        out = img.DataArray2RGB(grey_img, irange=(0, depth - 1))
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 5)
        pixel = out[2, 2]
        numpy.testing.assert_equal(out[:, :, 0], out[:, :, 1])
        numpy.testing.assert_equal(out[:, :, 0], out[:, :, 2])
        # In theory, depth//2 should be 128, but due to support for floats (ranges),
        # the function cannot ensure this, so accept slightly less (127).
        assert (numpy.array_equal(pixel, [127, 127, 127])
                or numpy.array_equal(pixel, [128, 128, 128]))
        numpy.testing.assert_equal(out[1, 0], [0, 0, 0])
        numpy.testing.assert_equal(out[1, 1], [255, 255, 255])

        # 32 bits
        depth = 2**32
        grey_img = numpy.zeros(size, dtype="uint32") + depth // 2
        grey_img[0, 0] = depth // 50
        grey_img[0, 1] = depth - depth // 50
        grey_img[1, 0] = 0
        grey_img[1, 1] = depth - 1

        # should keep the grey
        out = img.DataArray2RGB(grey_img, irange=(0, depth - 1))
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 5)
        pixel = out[2, 2]
        numpy.testing.assert_equal(out[:, :, 0], out[:, :, 1])
        numpy.testing.assert_equal(out[:, :, 0], out[:, :, 2])
        assert (numpy.array_equal(pixel, [127, 127, 127])
                or numpy.array_equal(pixel, [128, 128, 128]))
        numpy.testing.assert_equal(out[1, 0], [0, 0, 0])
        numpy.testing.assert_equal(out[1, 1], [255, 255, 255])
Esempio n. 7
0
    def _updateImage(self, tint=(255, 255, 255)):
        """ Recomputes the image with all the raw data available

        tint ((int, int, int)): colouration of the image, in RGB. Only used by
            FluoStream to avoid code duplication
        """
        # check to avoid running it if there is already one running
        if self._running_upd_img:
            logging.debug(("Dropping image conversion to RGB, as the previous "
                           "one is still running"))
            return
        if not self.raw:
            return

        try:
            self._running_upd_img = True
            data = self.raw[0]
            irange = self._getDisplayIRange()
            rgbim = img.DataArray2RGB(data, irange, tint)
            rgbim.flags.writeable = False
            # # Commented to prevent log flooding
            # if model.MD_ACQ_DATE in data.metadata:
            #     logging.debug("Computed RGB projection %g s after acquisition",
            #                    time.time() - data.metadata[model.MD_ACQ_DATE])
            md = self._find_metadata(data.metadata)
            md[model.MD_DIMS] = "YXC"  # RGB format
            self.image.value = model.DataArray(rgbim, md)
        except Exception:
            logging.exception("Updating %s image", self.__class__.__name__)
        finally:
            self._running_upd_img = False
Esempio n. 8
0
    def _updateImage(self):
        """ Recomputes the image with all the raw data available for the current
        selected point.
        """
        if not self.raw:
            return

        pos = self.point.value
        try:
            if pos == (None, None):
                self.image.value = None
            else:
                polard = self._getPolarProjection(pos)
                # update the histrogram
                # TODO: cache the histogram per image
                # FIXME: histogram should not include the black pixels outside
                # of the circle. => use a masked array?
                # reset the drange to ensure that it doesn't depend on older data
                self._drange = None
                self._updateDRange(polard)
                self._updateHistogram(polard)
                irange = self._getDisplayIRange()

                # Convert to RGB
                rgbim = img.DataArray2RGB(polard, irange)
                rgbim.flags.writeable = False
                # For polar view, no PIXEL_SIZE nor POS
                self.image.value = model.DataArray(rgbim)
        except Exception:
            logging.exception("Updating %s image", self.__class__.__name__)
Esempio n. 9
0
File: png.py Progetto: lanery/odemis
def _saveAsPNG(filename, data):

    # TODO: store metadata

    # Already RGB 8 bit?
    if (data.metadata.get(model.MD_DIMS) == 'YXC'
            and data.dtype in (numpy.uint8, numpy.int8)
            and data.shape[2] in (3, 4)):
        rgb8 = data
    else:
        data = img.ensure2DImage(data)

        # TODO: it currently fails with large data, use gdal instead?
        #     tempdriver = gdal.GetDriverByName('MEM')
        #     tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte)
        #     tiledriver = gdal.GetDriverByName("png")
        #     tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0])
        #     tiledriver.CreateCopy("testgdal.png", tmp, strict=0)

        # TODO: support greyscale png?
        # TODO: skip if already 8 bits
        # Convert to 8 bit RGB
        hist, edges = img.histogram(data)
        irange = img.findOptimalRange(hist, edges, 1 / 256)
        rgb8 = img.DataArray2RGB(data, irange)

    # save to file
    im = Image.fromarray(rgb8)
    im.save(filename, "PNG")
Esempio n. 10
0
    def test_irange(self):
        """test with specific corner values of irange"""
        size = (1024, 1024)
        depth = 4096
        grey_img = numpy.zeros(size, dtype="uint16") + depth // 2
        grey_img[0, 0] = 100
        grey_img[0, 1] = depth - 100

        # slightly smaller range than everything => still 3 colours
        out = img.DataArray2RGB(grey_img, irange=(50, depth - 51))
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 3)
        pixel0 = out[0, 0]
        pixel1 = out[0, 1]
        pixelg = out[0, 2]
        numpy.testing.assert_array_less(pixel0, pixel1)
        numpy.testing.assert_array_less(pixel0, pixelg)
        numpy.testing.assert_array_less(pixelg, pixel1)

        # irange at the lowest value => all white (but the blacks)
        out = img.DataArray2RGB(grey_img, irange=(0, 1))
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 1)
        pixel = out[2, 2]
        numpy.testing.assert_equal(pixel, [255, 255, 255])

        # irange at the highest value => all blacks (but the whites)
        out = img.DataArray2RGB(grey_img, irange=(depth - 2, depth - 1))
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 1)
        pixel = out[2, 2]
        numpy.testing.assert_equal(pixel, [0, 0, 0])

        # irange at the middle value => black/white/grey (max)
        out = img.DataArray2RGB(grey_img,
                                irange=(depth // 2 - 1, depth // 2 + 1))
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out), 3)
        hist, edges = img.histogram(out[:, :, 0])  # just use one RGB channel
        self.assertGreater(hist[0], 0)
        self.assertEqual(hist[1], 0)
        self.assertGreater(hist[-1], 0)
        self.assertEqual(hist[-2], 0)
Esempio n. 11
0
    def test_fast(self):
        """Test the fast conversion"""
        data = numpy.ones((251, 200), dtype="uint16")
        data[:, :] = range(200)
        data[2, :] = 56
        data[200, 2] = 3

        data_nc = data.swapaxes(
            0, 1)  # non-contiguous cannot be treated by fast conversion

        # convert to RGB
        hist, edges = img.histogram(data)
        irange = img.findOptimalRange(hist, edges, 1 / 256)
        rgb = img.DataArray2RGB(data, irange)

        hist_nc, edges_nc = img.histogram(data_nc)
        irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256)
        rgb_nc = img.DataArray2RGB(data_nc, irange_nc)
        rgb_nc_back = rgb_nc.swapaxes(0, 1)

        numpy.testing.assert_equal(rgb, rgb_nc_back)
Esempio n. 12
0
def preprocess(im, invert, flip, crop, gaussian_sigma, eqhis):
    '''
    Typical preprocessing steps needed before performing keypoint matching
    im (DataArray): Input image
    invert (bool): Invert the brightness levels of the image
    flip (tuple(bool, bool)): Determine if the image should be flipped on the X and Y axis
    crop (tuple(t,b,l,r): Crop values in pixels
    gaussian_sigma (int): Blur intensity
    eqhis (bool): If True, an histogram equalisation is performed (and data type)
      is set to uint8
    return (DataArray of same shape): Processed image
    '''
    try:
        metadata = im.metadata
    except AttributeError:
        metadata = {}

    flip_x, flip_y = flip
    # flip on X axis
    if flip_x:
        im = im[:, ::-1]

    # flip on Y axis
    if flip_y:
        im = im[::-1, :]

    crop_top, crop_bottom, crop_left, crop_right = crop
    # remove the bar
    im = im[crop_top:im.shape[0] - crop_bottom,
            crop_left:im.shape[1] - crop_right]

    # Invert the image brightness
    if invert:
        # mn = im.min()
        mx = im.max()
        im = mx - im

    # equalize histogram
    if eqhis:
        if im.dtype != numpy.uint8:
            # OpenCV histogram equalisation only works on uint8 data
            rgb_im = img.DataArray2RGB(im)
            im = rgb_im[:, :, 0]
        im = cv2.equalizeHist(im)

    # blur the image using a gaussian filter
    if gaussian_sigma:
        im = ndimage.gaussian_filter(im, sigma=gaussian_sigma)

    # return a new DataArray with the metadata of the original image
    return model.DataArray(im, metadata)
Esempio n. 13
0
    def new_image(self, data):
        """
        Update the window with the new image (the window is resize to have the image
        at ratio 1:1)
        data (numpy.ndarray): an 2D array containing the image (can be 3D if in RGB)
        """
        if data.ndim == 3 and 3 in data.shape: # RGB
            rgb = img.ensureYXC(data)
        else: # Greyscale (hopefully)
            mn, mx, mnp, mxp = ndimage.extrema(data)
            logging.info("Image data from %s to %s", mn, mx)
            rgb = img.DataArray2RGB(data) # auto brightness/contrast

        self.app.img = NDImage2wxImage(rgb)
        wx.CallAfter(self.app.update_view)
Esempio n. 14
0
    def test_float(self):
        irange = (0.3, 468.4)
        shape = (102, 965)
        tint = (0, 73, 255)
        grey_img = numpy.zeros(shape, dtype="float") + 15.05
        grey_img[0, 0] = -15.6
        grey_img[0, 1] = 500.6

        out = img.DataArray2RGB(grey_img, irange, tint=tint)
        self.assertTrue(numpy.all(out[..., 0] == 0))
        self.assertEqual(out[..., 2].min(), 0)
        self.assertEqual(out[..., 2].max(), 255)

        # irange at the lowest value => all white (but the blacks)
        out = img.DataArray2RGB(grey_img, irange=(-100, -50))
        self.assertEqual(out.shape, shape + (3, ))
        self.assertEqual(self.CountValues(out), 1)
        pixel = out[2, 2]
        numpy.testing.assert_equal(pixel, [255, 255, 255])

        # irange at the highest value => all blacks (but the whites)
        out = img.DataArray2RGB(grey_img, irange=(5000, 5000.1))
        self.assertEqual(out.shape, shape + (3, ))
        self.assertEqual(self.CountValues(out), 1)
        pixel = out[2, 2]
        numpy.testing.assert_equal(pixel, [0, 0, 0])

        # irange at the middle => B&W only
        out = img.DataArray2RGB(grey_img, irange=(10, 10.1))
        self.assertEqual(out.shape, shape + (3, ))
        self.assertEqual(self.CountValues(out), 2)
        hist, edges = img.histogram(out[:, :, 0])  # just use one RGB channel
        self.assertGreater(hist[0], 0)
        self.assertEqual(hist[1], 0)
        self.assertGreater(hist[-1], 0)
        self.assertEqual(hist[-2], 0)
Esempio n. 15
0
    def _updateImage(self):
        raw = self.stream.raw[0]
        metadata = self.stream._find_metadata(raw.metadata)
        raw = img.ensure2DImage(raw)  # Remove extra dimensions (of length 1)
        grayscale_im = preprocess(raw, self._invert, self._flip, self._crop,
                                  self._gaussian_sigma, self._eqhis)
        rgb_im = img.DataArray2RGB(grayscale_im)
        if self._kp:
            rgb_im = cv2.drawKeypoints(rgb_im, self._kp, None, color=(30, 30, 255), flags=0)
        if self._mkp:
            rgb_im = cv2.drawKeypoints(rgb_im, self._mkp, None, color=(0, 255, 0), flags=0)

        rgb_im = model.DataArray(rgb_im, metadata)
        rgb_im.flags.writeable = False
        self.image.value = rgb_im
Esempio n. 16
0
 def _projectXY2RGB(self, data, tint=(255, 255, 255)):
     """
     Project a 2D spatial DataArray into a RGB representation
     data (DataArray): 2D DataArray
     tint ((int, int, int)): colouration of the image, in RGB.
     return (DataArray): 3D DataArray
     """
     irange = self._getDisplayIRange()
     rgbim = img.DataArray2RGB(data, irange, tint)
     rgbim.flags.writeable = False
     # Commented to prevent log flooding
     # if model.MD_ACQ_DATE in data.metadata:
     #     logging.debug("Computed RGB projection %g s after acquisition",
     #                    time.time() - data.metadata[model.MD_ACQ_DATE])
     md = self._find_metadata(data.metadata)
     md[model.MD_DIMS] = "YXC" # RGB format
     return model.DataArray(rgbim, md)
Esempio n. 17
0
    def test_uint32_small(self):
        """
        Test uint32, but with values very close from each other => the histogram
        will look like just one column not null. But we still want the image
        to display between 0->255 in RGB.
        """
        size = (512, 100)
        grey_img = numpy.zeros(size, dtype="uint32") + 3
        grey_img[0, :] = 0
        grey_img[:, 1] = 40
        hist, edges = img.histogram(grey_img)  # , (0, depth - 1))
        irange = img.findOptimalRange(hist, edges, 0)

        rgb = img.DataArray2RGB(grey_img, irange)

        self.assertEqual(rgb[0, 0].tolist(), [0, 0, 0])
        self.assertEqual(rgb[5, 1].tolist(), [255, 255, 255])
        self.assertTrue(0 < rgb[50, 50, 0] < 255)
Esempio n. 18
0
 def new_image(self, data):
     """
     Update the window with the new image (the window is resize to have the image
     at ratio 1:1)
     data (numpy.ndarray): an 2D array containing the image (can be 3D if in RGB)
     """
     if data.ndim == 3 and 3 in data.shape:  # RGB
         rgb = img.ensureYXC(data)
     elif numpy.prod(data.shape) == data.shape[-1]:  # 1D image => bar plot
         # TODO: add "(plot)" to the window title
         # Create a simple bar plot of X x 400 px
         lenx = data.shape[-1]
         if lenx > MAX_WIDTH:
             binning = lenx // MAX_WIDTH
             data = data[..., 0::binning]
             logging.debug("Compressed data from %d to %d elements", lenx,
                           data.shape[-1])
             lenx = data.shape[-1]
         leny = 400
         miny = min(0, data.min())
         maxy = data.max()
         diffy = maxy - miny
         if diffy == 0:
             diffy = 1
         logging.info("Plot data from %s to %s", miny, maxy)
         rgb = numpy.zeros((leny, lenx, 3), dtype=numpy.uint8)
         for i, v in numpy.ndenumerate(data):
             # TODO: have the base at 0, instead of miny, so that negative values are columns going down
             h = leny - int(((v - miny) * leny) / diffy)
             rgb[h:-1, i[-1], :] = 255
     else:  # Greyscale (hopefully)
         mn, mx, mnp, mxp = ndimage.extrema(data)
         logging.info("Image data from %s to %s", mn, mx)
         rgb = img.DataArray2RGB(data)  # auto brightness/contrast
     self.app.spots, self.app.translation, self.app.scaling, self.app.rotation = FindGridSpots(
         data, self.gridsize)
     self.app.img = NDImage2wxImage(rgb)
     wx.CallAfter(self.app.update_view)
Esempio n. 19
0
def _saveAsPNG(filename, data):

    # TODO: store metadata

    # TODO: support RGB
    data = img.ensure2DImage(data)

    # TODO: it currently fails with large data, use gdal instead?
    #     tempdriver = gdal.GetDriverByName('MEM')
    #     tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte)
    #     tiledriver = gdal.GetDriverByName("png")
    #     tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0])
    #     tiledriver.CreateCopy("testgdal.png", tmp, strict=0)

    # TODO: support greyscale png?
    # TODO: skip if already 8 bits
    # Convert to 8 bit RGB
    hist, edges = img.histogram(data)
    irange = img.findOptimalRange(hist, edges, 1 / 256)
    rgb8 = img.DataArray2RGB(data, irange)

    # save to file
    scipy.misc.imsave(filename, rgb8)
Esempio n. 20
0
    def test_tint_int16(self):
        """test with tint, with the slow path"""
        size = (1024, 1024)
        depth = 4096
        grey_img = numpy.zeros(size, dtype="int16") + depth // 2
        grey_img[0, 0] = 0
        grey_img[0, 1] = depth - 1

        # white should become same as the tint
        tint = (0, 73, 255)
        out = img.DataArray2RGB(grey_img, tint=tint)
        self.assertEqual(out.shape, size + (3, ))
        self.assertEqual(self.CountValues(out[:, :, 0]), 1)  # R
        self.assertEqual(self.CountValues(out[:, :, 1]), 3)  # G
        self.assertEqual(self.CountValues(out[:, :, 2]), 3)  # B

        pixel0 = out[0, 0]
        pixel1 = out[0, 1]
        pixelg = out[0, 2]
        numpy.testing.assert_array_equal(pixel1, list(tint))
        self.assertTrue(numpy.all(pixel0 <= pixel1))
        self.assertTrue(numpy.all(pixel0 <= pixelg))
        self.assertTrue(numpy.all(pixelg <= pixel1))
Esempio n. 21
0
    def get_spatial_spectrum(self, data=None, raw=False):
        """
        Project a spectrum cube (CYX) to XY space in RGB, by averaging the
          intensity over all the wavelengths (selected by the user)
        data (DataArray or None): if provided, will use the cube, otherwise,
          will use the whole data from the stream.
        raw (bool): if True, will return the "raw" values (ie, same data type as
          the original data). Otherwise, it will return a RGB image.
        return (DataArray YXC of uint8 or YX of same data type as data): average
          intensity over the selected wavelengths
        """
        if data is None:
            data = self._calibrated
        md = self._find_metadata(data.metadata)

        # pick only the data inside the bandwidth
        spec_range = self._get_bandwidth_in_pixel()
        logging.debug("Spectrum range picked: %s px", spec_range)

        if raw:
            av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data).astype(data.dtype)
            return model.DataArray(av_data, md)
        else:
            irange = self._getDisplayIRange() # will update histogram if not yet present

            if not self.fitToRGB.value:
                # TODO: use better intermediary type if possible?, cf semcomedi
                av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                rgbim = img.DataArray2RGB(av_data, irange)
            else:
                # Note: For now this method uses three independent bands. To give
                # a better sense of continuum, and be closer to reality when using
                # the visible light's band, we should take a weighted average of the
                # whole spectrum for each band. But in practice, that would be less
                # useful.

                # divide the range into 3 sub-ranges (BRG) of almost the same length
                len_rng = spec_range[1] - spec_range[0] + 1
                brange = [spec_range[0], int(round(spec_range[0] + len_rng / 3)) - 1]
                grange = [brange[1] + 1, int(round(spec_range[0] + 2 * len_rng / 3)) - 1]
                rrange = [grange[1] + 1, spec_range[1]]
                # ensure each range contains at least one pixel
                brange[1] = max(brange)
                grange[1] = max(grange)
                rrange[1] = max(rrange)

                # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded
                av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                rgbim = img.DataArray2RGB(av_data, irange)
                av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                gim = img.DataArray2RGB(av_data, irange)
                rgbim[:, :, 1] = gim[:, :, 0]
                av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                bim = img.DataArray2RGB(av_data, irange)
                rgbim[:, :, 2] = bim[:, :, 0]

            rgbim.flags.writeable = False
            md[model.MD_DIMS] = "YXC" # RGB format

            return model.DataArray(rgbim, md)
Esempio n. 22
0
    def get_line_spectrum(self):
        """
        Return the 1D spectrum representing the (average) spectrum
        See get_spectrum_range() to know the wavelength values for each index of
          the spectrum dimension
        return (None or DataArray with 3 dimensions): first axis (Y) is spatial
          (along the line), second axis (X) is spectrum, third axis (RGB) is
          colour (always greyscale).
          MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum
          If the selected_line is not valid, it will return None
        """
        if (None, None) in self.selected_line.value:
            return None

        spec2d = self._calibrated[:, 0,
                                  0, :, :]  # same data but remove useless dims
        width = self.width.value

        # Number of points to return: the length of the line
        start, end = self.selected_line.value
        v = (end[0] - start[0], end[1] - start[1])
        l = math.hypot(*v)
        n = 1 + int(l)
        if l < 1:  # a line of just one pixel is considered not valid
            return None

        # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X)
        # The line is scanned from the end till the start so that the spectra
        # closest to the origin of the line are at the bottom.
        coord = numpy.empty((3, width, n, spec2d.shape[0]))
        coord[0] = numpy.arange(spec2d.shape[0])  # spectra = all
        coord_spc = coord.swapaxes(
            2, 3)  # just a view to have (line) space as last dim
        coord_spc[-1] = numpy.linspace(end[0], start[0], n)  # X axis
        coord_spc[-2] = numpy.linspace(end[1], start[1], n)  # Y axis

        # Spread over the width
        # perpendicular unit vector
        pv = (-v[1] / l, v[0] / l)
        width_coord = numpy.empty((2, width))
        spread = (width - 1) / 2
        width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread,
                                         width)  # X axis
        width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread,
                                         width)  # Y axis

        coord_cw = coord[1:].swapaxes(0, 2).swapaxes(
            1, 3)  # view with coordinates and width as last dims
        coord_cw += width_coord

        # Interpolate the values based on the data
        if width == 1:
            # simple version for the most usual case
            spec1d = ndimage.map_coordinates(spec2d,
                                             coord[:, 0, :, :],
                                             order=2)
        else:
            # force the intermediate values to float, as mean() still needs to run
            spec1d_w = ndimage.map_coordinates(spec2d,
                                               coord,
                                               output=numpy.float,
                                               order=2)
            spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype)
        assert spec1d.shape == (n, spec2d.shape[0])

        # Scale and convert to RGB image
        hist, edges = img.histogram(spec1d)
        irange = img.findOptimalRange(hist, edges, 1 / 256)
        rgb8 = img.DataArray2RGB(spec1d, irange)

        # Use metadata to indicate spatial distance between pixel
        pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE]
        pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1)
        md = {
            MD_PIXEL_SIZE: (None, pxs)
        }  # for the spectrum, use get_spectrum_range()
        return model.DataArray(rgb8, md)
Esempio n. 23
0
    def _updateImageAverage(self, data):
        if self.auto_bc.value:
            # The histogram might be slightly old, but not too much
            irange = img.findOptimalRange(self.histogram._full_hist,
                                          self.histogram._edges,
                                          self.auto_bc_outliers.value / 100)

            # Also update the intensityRanges if auto BC
            edges = self.histogram._edges
            rrange = [(v - edges[0]) / (edges[1] - edges[0]) for v in irange]
            self.intensityRange.value = tuple(rrange)
        else:
            # just convert from the user-defined (as ratio) to actual values
            rrange = sorted(self.intensityRange.value)
            edges = self.histogram._edges
            irange = [edges[0] + (edges[1] - edges[0]) * v for v in rrange]

        # pick only the data inside the bandwidth
        spec_range = self._get_bandwidth_in_pixel()
        logging.debug("Spectrum range picked: %s px", spec_range)

        if not self.fitToRGB.value:
            # TODO: use better intermediary type if possible?, cf semcomedi
            av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
        else:
            # Note: For now this method uses three independent bands. To give
            # a better sense of continuum, and be closer to reality when using
            # the visible light's band, we should take a weighted average of the
            # whole spectrum for each band.

            # divide the range into 3 sub-ranges of almost the same length
            len_rng = spec_range[1] - spec_range[0] + 1
            rrange = [
                spec_range[0],
                int(round(spec_range[0] + len_rng / 3)) - 1
            ]
            grange = [
                rrange[1] + 1,
                int(round(spec_range[0] + 2 * len_rng / 3)) - 1
            ]
            brange = [grange[1] + 1, spec_range[1]]
            # ensure each range contains at least one pixel
            rrange[1] = max(rrange)
            grange[1] = max(grange)
            brange[1] = max(brange)

            # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded
            av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
            av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            gim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 1] = gim[:, :, 0]
            av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            bim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 2] = bim[:, :, 0]

        rgbim.flags.writeable = False
        self.image.value = model.DataArray(rgbim,
                                           self._find_metadata(data.metadata))
Esempio n. 24
0
    def test_spec_1d(self):
        """Test StaticSpectrumStream 1D"""
        spec = self._create_spec_data()
        specs = stream.StaticSpectrumStream("test", spec)

        # Check 1d spectrum on corner-case: parallel to the X axis
        specs.selected_line.value = [(3, 7), (3, 65)]
        sp1d = specs.get_line_spectrum()
        wl1d = specs.get_spectrum_range()
        self.assertEqual(sp1d.ndim, 3)
        self.assertEqual(sp1d.shape, (65 - 7 + 1, spec.shape[0], 3))
        self.assertEqual(sp1d.dtype, numpy.uint8)
        self.assertEqual(wl1d.shape, (spec.shape[0], ))
        self.assertEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1],
                         spec.metadata[model.MD_PIXEL_SIZE][0])

        # compare to doing it manually, by cutting the band at 3
        sp1d_raw_ex = spec[:, 0, 0, 65:6:-1, 3]
        # make it contiguous to be sure to get the fast conversion, because
        # there are (still) some minor differences with the slow conversion
        sp1d_raw_ex = numpy.ascontiguousarray(sp1d_raw_ex.swapaxes(0, 1))

        # Need to convert to RGB to compare
        hist, edges = img.histogram(sp1d_raw_ex)
        irange = img.findOptimalRange(hist, edges, 1 / 256)
        sp1d_rgb_ex = img.DataArray2RGB(sp1d_raw_ex, irange)
        numpy.testing.assert_equal(sp1d, sp1d_rgb_ex)

        # Check 1d spectrum in diagonal
        specs.selected_line.value = [(30, 65), (1, 1)]
        sp1d = specs.get_line_spectrum()
        wl1d = specs.get_spectrum_range()
        self.assertEqual(sp1d.ndim, 3)
        # There is not too much expectations on the size of the spatial axis
        self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41))
        self.assertEqual(sp1d.shape[1], spec.shape[0])
        self.assertEqual(sp1d.shape[2], 3)
        self.assertEqual(sp1d.dtype, numpy.uint8)
        self.assertEqual(wl1d.shape, (spec.shape[0], ))
        self.assertGreaterEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1],
                                spec.metadata[model.MD_PIXEL_SIZE][0])

        # Check 1d with larger width
        specs.selected_line.value = [(30, 65), (5, 1)]
        specs.width.value = 12
        sp1d = specs.get_line_spectrum()
        wl1d = specs.get_spectrum_range()
        self.assertEqual(sp1d.ndim, 3)
        # There is not too much expectations on the size of the spatial axis
        self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41))
        self.assertEqual(sp1d.shape[1], spec.shape[0])
        self.assertEqual(sp1d.shape[2], 3)
        self.assertEqual(sp1d.dtype, numpy.uint8)
        self.assertEqual(wl1d.shape, (spec.shape[0], ))

        specs.selected_line.value = [(30, 65), (5, 12)]
        specs.width.value = 13  # brings bad luck?
        sp1d = specs.get_line_spectrum()
        wl1d = specs.get_spectrum_range()
        self.assertEqual(sp1d.ndim, 3)
        # There is not too much expectations on the size of the spatial axis
        self.assertTrue(29 <= sp1d.shape[0] <= (53 * 1.41))
        self.assertEqual(sp1d.shape[1], spec.shape[0])
        self.assertEqual(sp1d.shape[2], 3)
        self.assertEqual(sp1d.dtype, numpy.uint8)
        self.assertEqual(wl1d.shape, (spec.shape[0], ))
Esempio n. 25
0
def main(args):
    """
    Handles the command line arguments
    args is the list of arguments passed
    return (int): value to return to the OS as program exit code
    """

    # arguments handling
    parser = argparse.ArgumentParser(
        description="Automated AR acquisition at multiple spot locations")

    parser.add_argument(
        "--repetitions_x",
        "-x",
        dest="repetitions_x",
        required=True,
        help=
        "repetitions defines the number of CL spots in the grid (x dimension)")
    parser.add_argument(
        "--repetitions_y",
        "-y",
        dest="repetitions_y",
        required=True,
        help=
        "repetitions defines the number of CL spots in the grid (y dimension)")
    parser.add_argument(
        "--dwell_time",
        "-t",
        dest="dwell_time",
        required=True,
        help="dwell_time indicates the time to scan each spot (unit: s)")
    parser.add_argument(
        "--max_allowed_diff",
        "-d",
        dest="max_allowed_diff",
        required=True,
        help=
        "max_allowed_diff indicates the maximum allowed difference in electron coordinates (unit: m)"
    )

    options = parser.parse_args(args[1:])
    repetitions = (int(options.repetitions_x), int(options.repetitions_y))
    dwell_time = float(options.dwell_time)
    max_allowed_diff = float(options.max_allowed_diff)

    try:
        escan = None
        detector = None
        ccd = None
        # find components by their role
        for c in model.getComponents():
            if c.role == "e-beam":
                escan = c
            elif c.role == "se-detector":
                detector = c
            elif c.role == "ccd":
                ccd = c
        if not all([escan, detector, ccd]):
            logging.error("Failed to find all the components")
            raise KeyError("Not all components found")

        # ccd.data.get()
        gscanner = GridScanner(repetitions, dwell_time, escan, ccd, detector)

        # Wait for ScanGrid to finish
        optical_image, electron_coordinates, electron_scale = gscanner.DoAcquisition(
        )
        hdf5.export("scanned_image.h5", optical_image)
        logging.debug("electron coord = %s", electron_coordinates)

        ############## TO BE REMOVED ON TESTING##############
        #        grid_data = hdf5.read_data("scanned_image.h5")
        #        C, T, Z, Y, X = grid_data[0].shape
        #        grid_data[0].shape = Y, X
        #        optical_image = grid_data[0]
        #####################################################

        logging.debug("Isolating spots...")
        opxs = optical_image.metadata[model.MD_PIXEL_SIZE]
        optical_dist = escan.pixelSize.value[0] * electron_scale[0] / opxs[0]
        subimages, subimage_coordinates = coordinates.DivideInNeighborhoods(
            optical_image, repetitions, optical_dist)
        logging.debug("Number of spots found: %d", len(subimages))

        hdf5.export("spot_found.h5", subimages, thumbnail=None)
        logging.debug("Finding spot centers...")
        spot_coordinates = spot.FindCenterCoordinates(subimages)
        logging.debug("center coord = %s", spot_coordinates)
        optical_coordinates = coordinates.ReconstructCoordinates(
            subimage_coordinates, spot_coordinates)
        logging.debug(optical_coordinates)
        rgb_optical = img.DataArray2RGB(optical_image)

        for ta in optical_coordinates:
            rgb_optical[ta[1] - 1:ta[1] + 1, ta[0] - 1:ta[0] + 1, 0] = 255
            rgb_optical[ta[1] - 1:ta[1] + 1, ta[0] - 1:ta[0] + 1, 1] *= 0.5
            rgb_optical[ta[1] - 1:ta[1] + 1, ta[0] - 1:ta[0] + 1, 2] *= 0.5

        misc.imsave('spots_image.png', rgb_optical)

        # TODO: Make function for scale calculation
        sorted_coordinates = sorted(optical_coordinates,
                                    key=lambda tup: tup[1])
        tab = tuple(
            map(operator.sub, sorted_coordinates[0], sorted_coordinates[1]))
        optical_scale = math.hypot(tab[0], tab[1])
        scale = electron_scale[0] / optical_scale
        print(scale)

        # max_allowed_diff in pixels
        max_allowed_diff_px = max_allowed_diff / escan.pixelSize.value[0]

        logging.debug("Matching coordinates...")
        known_electron_coordinates, known_optical_coordinates, max_diff = coordinates.MatchCoordinates(
            optical_coordinates, electron_coordinates, scale,
            max_allowed_diff_px)

        logging.debug("Calculating transformation...")
        (calc_translation_x, calc_translation_y), (
            calc_scaling_x,
            calc_scaling_y), calc_rotation = transform.CalculateTransform(
                known_electron_coordinates, known_optical_coordinates)
        logging.debug("Electron->Optical: ")
        print(calc_translation_x, calc_translation_y, calc_scaling_x,
              calc_scaling_y, calc_rotation)
        final_electron = coordinates._TransformCoordinates(
            known_optical_coordinates,
            (calc_translation_x, calc_translation_y), calc_rotation,
            (calc_scaling_x, calc_scaling_y))

        logging.debug("Overlay done.")

        # Calculate distance between the expected and found electron coordinates
        coord_diff = []
        for ta, tb in zip(final_electron, known_electron_coordinates):
            tab = tuple(map(operator.sub, ta, tb))
            coord_diff.append(math.hypot(tab[0], tab[1]))

        mean_difference = numpy.mean(coord_diff) * escan.pixelSize.value[0]

        variance_sum = 0
        for i in range(0, len(coord_diff)):
            variance_sum += (mean_difference - coord_diff[i])**2
        variance = (variance_sum / len(coord_diff)) * escan.pixelSize.value[0]

        not_found_spots = len(electron_coordinates) - len(final_electron)

        # Generate overlay image
        logging.debug("Generating images...")
        (calc_translation_x, calc_translation_y), (
            calc_scaling_x,
            calc_scaling_y), calc_rotation = transform.CalculateTransform(
                known_optical_coordinates, known_electron_coordinates)
        logging.debug("Optical->Electron: ")
        print(calc_translation_x, calc_translation_y, calc_scaling_x,
              calc_scaling_y, calc_rotation)
        overlay_coordinates = coordinates._TransformCoordinates(
            known_electron_coordinates,
            (calc_translation_y, calc_translation_x), -calc_rotation,
            (calc_scaling_x, calc_scaling_y))

        for ta in overlay_coordinates:
            rgb_optical[ta[0] - 1:ta[0] + 1, ta[1] - 1:ta[1] + 1, 1] = 255

        misc.imsave('overlay_image.png', rgb_optical)
        misc.imsave('optical_image.png', optical_image)
        logging.debug(
            "Done. Check electron_image.png, optical_image.png and overlay_image.png."
        )

    except:
        logging.exception("Unexpected error while performing action.")
        return 127

    logging.info(
        "\n**Overlay precision stats (Resulted to expected electron coordinates comparison)**\n Mean distance: %f (unit: m)\n Variance: %f (unit: m)\n Not found spots: %d",
        mean_difference, variance, not_found_spots)
    return 0
Esempio n. 26
0
    def new_image(self, data):
        """
        Update the window with the new image (the window is resize to have the image
        at ratio 1:1)
        data (numpy.ndarray): an 2D array containing the image (can be 3D if in RGB)
        """
        if data.ndim == 3 and 3 in data.shape:  # RGB
            rgb = img.ensureYXC(data)
        elif numpy.prod(data.shape) == 1:  # single point => show text
            text = "%g" % (data.flat[0], )
            logging.info("Data value is: %s", text)
            # Create a big enough white space (30x200 px) of BGRA format
            rgb = numpy.empty((30, 200, 4), dtype=numpy.uint8)
            rgb.fill(255)
            # Get a Cairo context for that image
            surface = cairo.ImageSurface.create_for_data(
                rgb, cairo.FORMAT_ARGB32, rgb.shape[1], rgb.shape[0])
            ctx = cairo.Context(surface)
            # Draw a black text of 20 px high
            ctx.set_source_rgb(0, 0, 0)
            ctx.select_font_face("Sans", cairo.FONT_SLANT_NORMAL)
            ctx.set_font_size(20)
            ctx.move_to(5, 20)
            ctx.show_text(text)
            del ctx  # ensure the context is flushed
        elif numpy.prod(data.shape) == data.shape[-1]:  # 1D image => bar plot
            # TODO: add "(plot)" to the window title
            # Create a simple bar plot of X x 400 px
            lenx = data.shape[-1]
            if lenx > MAX_WIDTH:
                binning = lenx // MAX_WIDTH
                data = data[..., 0::binning]
                logging.debug("Compressed data from %d to %d elements", lenx,
                              data.shape[-1])
                lenx = data.shape[-1]
            leny = 400
            miny = min(0, data.min())
            maxy = data.max()
            diffy = maxy - miny
            if diffy == 0:
                diffy = 1
            logging.info("Plot data from %s to %s", miny, maxy)
            rgb = numpy.zeros((leny, lenx, 3), dtype=numpy.uint8)
            for i, v in numpy.ndenumerate(data):
                # TODO: have the base at 0, instead of miny, so that negative values are columns going down
                h = leny - int(((v - miny) * leny) / diffy)
                rgb[h:-1, i[-1], :] = 255
        else:  # Greyscale (hopefully)
            mn, mx, mnp, mxp = ndimage.extrema(data)
            logging.info("Image data from %s to %s", mn, mx)
            rgb = img.DataArray2RGB(data)  # auto brightness/contrast

        self.app.img = NDImage2wxImage(rgb)
        disp_size = wx.Display(0).GetGeometry().GetSize()
        if disp_size[0] < rgb.shape[1] or disp_size[1] < rgb.shape[0]:
            asp_ratio = rgb.shape[1] / rgb.shape[0]
            if disp_size[0] / disp_size[1] < asp_ratio:
                self.app.magn = disp_size[0] / rgb.shape[1]
            else:
                self.app.magn = disp_size[1] / rgb.shape[0]
            self.app.img.Rescale(rgb.shape[1] * self.app.magn,
                                 rgb.shape[0] * self.app.magn, rgb.shape[2])
        else:
            self.app.magn = 1

        wx.CallAfter(self.app.update_view)
Esempio n. 27
0
    def get_line_spectrum(self, raw=False):
        """ Return the 1D spectrum representing the (average) spectrum

        Call get_spectrum_range() to know the wavelength values for each index
          of the spectrum dimension.
        raw (bool): if True, will return the "raw" values (ie, same data type as
          the original data). Otherwise, it will return a RGB image.
        return (None or DataArray with 3 dimensions): first axis (Y) is spatial
          (along the line), second axis (X) is spectrum. If not raw, third axis
          is colour (RGB, but actually always greyscale). Note: when not raw,
          the beginning of the line (Y) is at the "bottom".
          MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum
          If the selected_line is not valid, it will return None
        """

        if (None, None) in self.selected_line.value:
            return None

        spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims
        width = self.selectionWidth.value

        # Number of points to return: the length of the line
        start, end = self.selected_line.value
        v = (end[0] - start[0], end[1] - start[1])
        l = math.hypot(*v)
        n = 1 + int(l)
        if l < 1: # a line of just one pixel is considered not valid
            return None

        # FIXME: if the data has a width of 1 (ie, just a line), and the
        # requested width is an even number, the output is empty (because all
        # the interpolated points are outside of the data.

        # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X)
        # The line is scanned from the end till the start so that the spectra
        # closest to the origin of the line are at the bottom.
        coord = numpy.empty((3, width, n, spec2d.shape[0]))
        coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all
        coord_spc = coord.swapaxes(2, 3) # just a view to have (line) space as last dim
        coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis
        coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis

        # Spread over the width
        # perpendicular unit vector
        pv = (-v[1] / l, v[0] / l)
        width_coord = numpy.empty((2, width))
        spread = (width - 1) / 2
        width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis
        width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis

        coord_cw = coord[1:].swapaxes(0, 2).swapaxes(1, 3) # view with coordinates and width as last dims
        coord_cw += width_coord

        # Interpolate the values based on the data
        if width == 1:
            # simple version for the most usual case
            spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=1)
        else:
            # FIXME: the mean should be dependent on how many pixels inside the
            # original data were pick on each line. Currently if some pixels fall
            # out of the original data, the outside pixels count as 0.
            # force the intermediate values to float, as mean() still needs to run
            spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=1)
            spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype)
        assert spec1d.shape == (n, spec2d.shape[0])

        # Use metadata to indicate spatial distance between pixel
        pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE]
        pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1)
        md = {MD_PIXEL_SIZE: (None, pxs)}  # for the spectrum, use get_spectrum_range()

        if raw:
            return model.DataArray(spec1d[::-1, :], md)
        else:
            # Scale and convert to RGB image
            if self.auto_bc.value:
                hist, edges = img.histogram(spec1d)
                irange = img.findOptimalRange(hist, edges,
                                              self.auto_bc_outliers.value / 100)
            else:
                # use the values requested by the user
                irange = sorted(self.intensityRange.value)
            rgb8 = img.DataArray2RGB(spec1d, irange)

            return model.DataArray(rgb8, md)