コード例 #1
0
    def set_image(self, arr, idx=0):
        arr = asarray(grayspace(arr))

        self.plotdata.set_data('imagedata{:03d}'.format(idx), arr)
        # invoke_in_main_thread(self.container.invalidate_and_redraw)
        #         self.container.invalidate_and_redraw()
        self.container.request_redraw()
コード例 #2
0
    def _preprocess(self, frame, stretch_intensity=True, blur=1, denoise=0):
        """
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for more noise filtering
            3. stretch contrast
        """
        if len(frame.shape) != 2:
            frm = grayspace(frame) * 255
        else:
            frm = frame / self.pixel_depth * 255

        frm = frm.astype('uint8')

        # self.preprocessed_frame = frame
        # if denoise:
        #     frm = self._denoise(frm, weight=denoise)
        # print 'gray', frm.shape
        if blur:
            frm = gaussian(frm, blur) * 255
            frm = frm.astype('uint8')

            # frm1 = gaussian(self.preprocessed_frame, blur,
            #                 multichannel=True) * 255
            # self.preprocessed_frame = frm1.astype('uint8')

        if stretch_intensity:
            frm = rescale_intensity(frm)
            # frm = self._contrast_equalization(frm)
            # self.preprocessed_frame = self._contrast_equalization(self.preprocessed_frame)

        return frm
コード例 #3
0
    def _preprocess(self, frame, contrast=True, blur=1, denoise=0):
        """
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for more noise filtering
            3. stretch contrast
        """

        frm = grayspace(frame) * 255
        frm = frm.astype('uint8')

        self.preprocessed_frame = frame
        # if denoise:
        #     frm = self._denoise(frm, weight=denoise)
        # print 'gray', frm.shape
        if blur:
            frm = gaussian_filter(frm, blur) * 255
            frm = frm.astype('uint8')

            frm1 = gaussian_filter(
                self.preprocessed_frame, blur, multichannel=True) * 255
            self.preprocessed_frame = frm1.astype('uint8')

        if contrast:
            frm = self._contrast_equalization(frm)
            self.preprocessed_frame = self._contrast_equalization(
                self.preprocessed_frame)

        return frm
コード例 #4
0
ファイル: zoom_calibration.py プロジェクト: sgallet/pychron
    def _calculate_spacing(self, im):
        h, w, d = im.shape
        #         cw, ch = 600, 600
        cw, ch = 300, 300
        cx = (w - cw) / 2
        cy = (h - ch) / 2
        im = crop(im, cx, cy, cw, ch)
        #         d = self.test_image.plotdata.get_data('imagedata000')
        d = grayspace(im)
        #         d /= 255.
        #         edges = filter.canny(d, sigma=3,
        # #                              low_threshold=0,
        # #                              high_threshold=
        #                              )
        #         edges = edges.astype('uint8')
        #         edges = vsobel(d)
        edges = sobel(d)

        nim = zeros_like(edges)
        nim[edges > 0.1] = 255
        edges = nim
        self.test_image.set_image(edges)

        hspace, angles, dists = hough_line(edges)

        self.test_image.set_image(hspace, 1)

        _hspace, angles, dists = hough_peaks(
            hspace,
            angles,
            dists,
        )
        nim = zeros_like(edges)
        h, w, d = im.shape
        xs = []

        for ti, di in zip(angles, dists):
            ai = math.degrees(ti) + 180
            di = abs(int(round(di)))
            aa = abs(ai - 90) < 1
            bb = abs(ai - 270) < 1
            if aa or bb:
                adi = abs(di)
                coords = line(0, adi, h - 1, adi)
                nim[coords] = 200
                xs.append(di)

        self.test_image.set_image(nim, 2)
        xs.sort()
        # compute difference between each pair
        dxs = diff(xs)
        print dxs
        dd = sorted(dxs)[1:]
        print dd
        while len(dd):
            if std(dd) < 3:
                print dd
                return mean(dd) * 4  # each bar =0.25mm
            else:
                dd = dd[:-1]
コード例 #5
0
ファイル: mv_image.py プロジェクト: NMGRL/pychron
    def set_image(self, arr, idx=0):
        arr = asarray(grayspace(arr))

        self.plotdata.set_data('imagedata{:03d}'.format(idx), arr)
        # invoke_in_main_thread(self.container.invalidate_and_redraw)
#         self.container.invalidate_and_redraw()
        self.container.request_redraw()
コード例 #6
0
ファイル: locator.py プロジェクト: kenlchen/pychron
    def _preprocess(self, frame,
                    contrast=True, blur=1, denoise=0):
        """
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for more noise filtering
            3. stretch contrast
        """

        frm = grayspace(frame) * 255
        frm = frm.astype('uint8')

        self.preprocessed_frame = frame
        # if denoise:
        #     frm = self._denoise(frm, weight=denoise)
        # print 'gray', frm.shape
        if blur:
            frm = gaussian_filter(frm, blur) * 255
            frm = frm.astype('uint8')

            frm1 = gaussian_filter(self.preprocessed_frame, blur,
                                   multichannel=True) * 255
            self.preprocessed_frame = frm1.astype('uint8')

        if contrast:
            frm = self._contrast_equalization(frm)
            self.preprocessed_frame = self._contrast_equalization(
                    self.preprocessed_frame)

        return frm
コード例 #7
0
ファイル: locator.py プロジェクト: NMGRL/pychron
    def _preprocess(self, frame, stretch_intensity=True, blur=1, denoise=0):
        """
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for more noise filtering
            3. stretch contrast
        """
        if len(frame.shape) != 2:
            frm = grayspace(frame)
        else:
            frm = frame / self.pixel_depth * 255

        frm = frm.astype('uint8')

        # self.preprocessed_frame = frame
        # if denoise:
        #     frm = self._denoise(frm, weight=denoise)
        # print 'gray', frm.shape
        if blur:
            frm = gaussian(frm, blur) * 255
            frm = frm.astype('uint8')

            # frm1 = gaussian(self.preprocessed_frame, blur,
            #                 multichannel=True) * 255
            # self.preprocessed_frame = frm1.astype('uint8')

        if stretch_intensity:
            frm = rescale_intensity(frm)
            # frm = self._contrast_equalization(frm)
            # self.preprocessed_frame = self._contrast_equalization(self.preprocessed_frame)

        return frm
コード例 #8
0
    def _calculate_spacing(self, im):
        h, w, d = im.shape
#         cw, ch = 600, 600
        cw, ch = 300, 300
        cx = (w - cw) / 2
        cy = (h - ch) / 2
        im = crop(im, cx, cy, cw, ch)
#         d = self.test_image.plotdata.get_data('imagedata000')
        d = grayspace(im)
#         d /= 255.
#         edges = filter.canny(d, sigma=3,
# #                              low_threshold=0,
# #                              high_threshold=
#                              )
#         edges = edges.astype('uint8')
#         edges = vsobel(d)
        edges = sobel(d)

        nim = zeros_like(edges)
        nim[edges > 0.1] = 255
        edges = nim
        self.test_image.set_image(edges)


        hspace, angles, dists = hough_line(edges)

        self.test_image.set_image(hspace, 1)

        _hspace, angles, dists = hough_peaks(hspace, angles, dists,

                                             )
        nim = zeros_like(edges)
        h, w, d = im.shape
        xs = []

        for ti, di in zip(angles, dists):
            ai = math.degrees(ti) + 180
            di = abs(int(round(di)))
            aa = abs(ai - 90) < 1
            bb = abs(ai - 270) < 1
            if aa or bb :
                adi = abs(di)
                coords = line(0, adi, h - 1, adi)
                nim[coords] = 200
                xs.append(di)

        self.test_image.set_image(nim, 2)
        xs.sort()
        # compute difference between each pair
        dxs = diff(xs)
        print dxs
        dd = sorted(dxs)[1:]
        print dd
        while len(dd):
            if std(dd) < 3:
                print dd
                return mean(dd) * 4  # each bar =0.25mm
            else:
                dd = dd[:-1]
コード例 #9
0
ファイル: locator.py プロジェクト: sgallet/pychron
    def _find_targets(self,
                      image,
                      frame,
                      dim,
                      n=20,
                      w=10,
                      start=None,
                      step=1,
                      preprocess=False,
                      filter_targets=True,
                      depth=0,
                      set_image=True):
        '''
            use a segmentor to segment the image
        '''

        if preprocess:
            src = self._preprocess(frame)
        else:
            src = grayspace(frame)

#         pychron = array(pychron)
#         self.test_image.setup_images(1, (640, 480))
#         self.test_image.set_image(pychron)
        seg = RegionSegmenter(use_adaptive_threshold=False)

        if start is None:
            start = int(array(src).mean()) - 3 * w

        fa = self._get_filter_target_area(dim)

        for i in range(n):
            seg.threshold_low = max((0, start + i * step - w))
            seg.threshold_high = max((1, min((255, start + i * step + w))))

            seg.block_size += 5
            nsrc = seg.segment(src)

            nf = colorspace(nsrc)
            #             nf = array(colorspace(nsrc))

            # draw contours

            targets = self._find_polygon_targets(nsrc, frame=nf)
            if targets:
                if set_image:
                    image.set_frame(nf)
                # filter targets
                if filter_targets:
                    targets = self._filter_targets(image, frame, dim, targets,
                                                   fa)

            if targets:
                return targets
コード例 #10
0
ファイル: locator.py プロジェクト: UManPychron/pychron
    def _find_targets(self, image, frame, dim, n=20, w=10, start=None, step=1,
                      preprocess=False,
                      filter_targets=True,
                      depth=0,
                      set_image=True):
        '''
            use a segmentor to segment the image
        '''

        if preprocess:
            src = self._preprocess(frame)
        else:
            src = grayspace(frame)

#         pychron = array(pychron)
#         self.test_image.setup_images(1, (640, 480))
#         self.test_image.set_image(pychron)
        seg = RegionSegmenter(use_adaptive_threshold=False)

        if start is None:
            start = int(array(src).mean()) - 3 * w

        fa = self._get_filter_target_area(dim)

        for i in range(n):
            seg.threshold_low = max((0, start + i * step - w))
            seg.threshold_high = max((1, min((255, start + i * step + w))))

            seg.block_size += 5
            nsrc = seg.segment(src)

            nf = colorspace(nsrc)
#             nf = array(colorspace(nsrc))


            # draw contours


            targets = self._find_polygon_targets(nsrc,
                                                frame=nf
                                                 )
            if targets:
                if set_image:
                    image.set_frame(nf)
                # filter targets
                if filter_targets:
                    targets = self._filter_targets(image, frame, dim, targets, fa)

            if targets:
                return targets
コード例 #11
0
ファイル: zoom_calibration.py プロジェクト: sgallet/pychron
    def _test2(self):
        self.test_image.setup_images(
            3,
            #                                     (475, 613)
            (640, 480))

        root = '/Users/ross/Pychrondata_demo/data/snapshots/scan6'
        p = os.path.join(root, '008.jpg')
        im = load_image(p)
        im = grayspace(im)
        self.test_image.set_image(im)

        nim = zeros_like(im)
        nim[((im > 100) & (im < 200))] = 255
        self.test_image.set_image(nim, 1)
コード例 #12
0
    def _test2(self):
        self.test_image.setup_images(3,
#                                     (475, 613)
                                    (640, 480)
                                     )

        root = '/Users/ross/Pychrondata_demo/data/snapshots/scan6'
        p = os.path.join(root, '008.jpg')
        im = load_image(p)
        im = grayspace(im)
        self.test_image.set_image(im)

        nim = zeros_like(im)
        nim[((im > 100) & (im < 200))] = 255
        self.test_image.set_image(nim, 1)
コード例 #13
0
ファイル: locator.py プロジェクト: sgallet/pychron
    def _preprocess(self, frame, contrast=True, denoise=0):
        '''
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for move noise filtering
            3. stretch contrast
        '''

        frm = grayspace(frame) * 255
        frm = frm.astype('uint8')
        #         # preprocess
        if denoise:

            frm = self._denoise(frm, weight=denoise)
# #        contrast = False

        if contrast:
            frm = self._contrast_equalization(frm)

        return frm
コード例 #14
0
ファイル: locator.py プロジェクト: UManPychron/pychron
    def _preprocess(self, frame,
                    contrast=True, denoise=0):
        '''
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for move noise filtering
            3. stretch contrast
        '''

        frm = grayspace(frame) * 255
        frm = frm.astype('uint8')
#         # preprocess
        if denoise:

            frm = self._denoise(frm, weight=denoise)
# #        contrast = False

        if contrast:
            frm = self._contrast_equalization(frm)

        return frm
コード例 #15
0
    def _calculate_focus_measure(self, src, operator, roi):
        '''
            see
            IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
            FOR DIGITAL STILL CAMERA
            DOI 10.1109/30.468047
            and
            http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
        '''

        # need to resize to 640,480. this is the space the roi is in
        #        s = resize(grayspace(pychron), 640, 480)
        src = grayspace(src)
        v = crop(src, *roi)

        di = dict(var=lambda x: variance(x),
                  laplace=lambda x: get_focus_measure(x, 'laplace'),
                  sobel=lambda x: ndsum(
                      generic_gradient_magnitude(x, sobel, mode='nearest')))

        func = di[operator]
        return func(v)
コード例 #16
0
ファイル: autofocus_manager.py プロジェクト: NMGRL/pychron
    def _calculate_focus_measure(self, src, operator, roi):
        '''
            see
            IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
            FOR DIGITAL STILL CAMERA
            DOI 10.1109/30.468047
            and
            http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
        '''

        # need to resize to 640,480. this is the space the roi is in
#        s = resize(grayspace(pychron), 640, 480)
        src = grayspace(src)
        v = crop(src, *roi)

        di = dict(var=lambda x:variance(x),
                  laplace=lambda x: get_focus_measure(x, 'laplace'),
                  sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))
                  )

        func = di[operator]
        return func(v)
コード例 #17
0
 def _gray_image(self, src):
     return grayspace(src)
コード例 #18
0
    def _find_targets(self,
                      image,
                      frame,
                      dim,
                      shape='circle',
                      search=None,
                      preprocess=True,
                      filter_targets=True,
                      convexity_filter=False,
                      mask=False,
                      set_image=True,
                      inverted=False):
        """
            use a segmentor to segment the image
        """

        if search is None:
            search = {}

        if preprocess:
            if not isinstance(preprocess, dict):
                preprocess = {}
            src = self._preprocess(frame, **preprocess)
        else:
            src = grayspace(frame)

        if src is None:
            print('Locator: src is None')
            return

        if mask:
            self._mask(src, mask)

        if inverted:
            src = invert(src)

        start = search.get('start')
        if start is None:
            w = search.get('width', 10)
            start = int(mean(
                src[src > 0])) - search.get('start_offset_scalar', 3) * w

        step = search.get('step', 2)
        n = search.get('n', 20)

        blocksize_step = search.get('blocksize_step', 5)
        seg = RegionSegmenter(use_adaptive_threshold=search.get(
            'use_adaptive_threshold', False),
                              blocksize=search.get('blocksize', 20))
        fa = self._get_filter_target_area(shape, dim)
        phigh, plow = None, None

        for j in range(n):
            ww = w * (j + 1)
            self.debug('start intensity={}, width={}'.format(start, ww))
            for i in range(n):

                low = max((0, start + i * step - ww))
                high = max((1, min((255, start + i * step + ww))))
                if inverted:
                    low = 255 - low
                    high = 255 - high

                seg.threshold_low = low
                seg.threshold_high = high

                if seg.threshold_low == plow and seg.threshold_high == phigh:
                    break

                plow = seg.threshold_low
                phigh = seg.threshold_high

                nsrc = seg.segment(src)
                seg.blocksize += blocksize_step

                nf = colorspace(nsrc)

                # draw contours
                targets = self._find_polygon_targets(nsrc, frame=nf)
                if set_image and image is not None:
                    image.set_frame(nf)

                if targets:

                    # filter targets
                    if filter_targets:
                        targets = self._filter_targets(image, frame, dim,
                                                       targets, fa)
                    elif convexity_filter:
                        # for t in targets:
                        #     print t.convexity, t.area, t.min_enclose_area, t.perimeter_convexity
                        targets = [
                            t for t in targets
                            if t.perimeter_convexity > convexity_filter
                        ]

                if targets:
                    return sorted(targets,
                                  key=attrgetter('area'),
                                  reverse=True)
コード例 #19
0
ファイル: locator.py プロジェクト: NMGRL/pychron
    def _find_targets(self, image, frame, dim,
                      search=None, preprocess=True,
                      filter_targets=True,
                      convexity_filter=False,
                      mask=False,
                      set_image=True, inverted=False):
        """
            use a segmentor to segment the image
        """

        if search is None:
            search = {}

        if preprocess:
            if not isinstance(preprocess, dict):
                preprocess = {}
            src = self._preprocess(frame, **preprocess)
        else:
            src = grayspace(frame)

        if src is None:
            print('Locator: src is None')
            return

        if mask:
            self._mask(src, mask)

        if inverted:
            src = invert(src)

        start = search.get('start')
        if start is None:
            # n=20, w=10, start=None, step=2
            w = search.get('width', 10)
            start = int(median(src)) - search.get('start_offset_scalar', 3) * w
            # start = 2*w
            # start = 20

        step = search.get('step', 2)
        n = search.get('n', 20)

        blocksize_step = search.get('blocksize_step', 5)
        seg = RegionSegmenter(use_adaptive_threshold=search.get('use_adaptive_threshold', False),
                              blocksize=search.get('blocksize', 20))
        fa = self._get_filter_target_area(dim)
        phigh, plow = None, None

        for j in range(n):
            ww = w * (j + 1)

            for i in range(n):
                seg.threshold_low = max((0, start + i * step - ww))
                seg.threshold_high = max((1, min((255, start + i * step + ww))))
                if seg.threshold_low == plow and seg.threshold_high == phigh:
                    break

                plow = seg.threshold_low
                phigh = seg.threshold_high

                nsrc = seg.segment(src)
                seg.blocksize += blocksize_step

                nf = colorspace(nsrc)
                # print(i, seg.threshold_high, seg.threshold_low)
                # draw contours
                targets = self._find_polygon_targets(nsrc, frame=nf)
                # print('tasfdas', targets)
                if set_image and image is not None:
                    image.set_frame(nf)

                if targets:

                    # filter targets
                    if filter_targets:
                        targets = self._filter_targets(image, frame, dim, targets, fa)
                    elif convexity_filter:
                        # for t in targets:
                        #     print t.convexity, t.area, t.min_enclose_area, t.perimeter_convexity
                        targets = [t for t in targets if t.perimeter_convexity > convexity_filter]

                if targets:
                    return sorted(targets, key=attrgetter('area'), reverse=True)