Beispiel #1
0
    def main(self):

        if self.cfg.lfpimg:
            # hot pixel correction
            obj = CfaHotPixels(bay_img=self._lfp_img,
                               cfg=self.cfg,
                               sta=self.sta)
            obj.rectify_candidates_bayer(n=9, sig_lev=2.5)
            self._lfp_img = obj.bay_img
            del obj

        if self.cfg.params[self.cfg.opt_vign] and self._wht_img is not None:
            # apply de-vignetting
            obj = LfpDevignetter(lfp_img=self._lfp_img,
                                 wht_img=self._wht_img,
                                 cfg=self.cfg,
                                 sta=self.sta,
                                 noise_lev=0)
            obj.main()
            self._lfp_img = obj.lfp_img
            self._wht_img = obj.wht_img
            del obj

        if self.cfg.lfpimg and len(
                self._lfp_img.shape) == 2 and not self.sta.interrupt:
            # perform color filter array management and obtain rgb image
            cfa_obj = CfaProcessor(bay_img=self._lfp_img,
                                   wht_img=self._wht_img,
                                   cfg=self.cfg,
                                   sta=self.sta)
            cfa_obj.main()
            self._lfp_img = cfa_obj.rgb_img
            del cfa_obj

        if self.cfg.params[
                self.cfg.
                opt_rota] and self._lfp_img is not None and not self.sta.interrupt:
            # de-rotate centroids
            obj = LfpRotator(self._lfp_img,
                             self.cfg.calibs[self.cfg.mic_list],
                             rad=None,
                             cfg=self.cfg,
                             sta=self.sta)
            obj.main()
            self._lfp_img, self.cfg.calibs[
                self.cfg.mic_list] = obj.lfp_img, obj.centroids
            del obj

        if not self.sta.interrupt:
            # interpolate each micro image with its MIC as the center with consistent micro image size
            obj = LfpResampler(lfp_img=self._lfp_img,
                               cfg=self.cfg,
                               sta=self.sta,
                               method='linear')
            obj.main()
            self._lfp_img = obj.lfp_out
            del obj

        return True
Beispiel #2
0
    def auto_find(self):

        if self.wht_img is None:
            # find calibration file automatically
            obj = lfp_calibrator.CaliFinder(self.cfg, self.sta)
            obj.main()
            self.wht_img = obj._wht_bay
            del obj

        # white image demosaicing (when light field image is given as RGB)
        if self.wht_img is not None and len(self.lfp_img.shape) == 3:
            from plenopticam.lfp_aligner.cfa_processor import CfaProcessor
            cfa_obj = CfaProcessor(bay_img=self.wht_img,
                                   cfg=self.cfg,
                                   sta=self.sta)
            cfa_obj.bay2rgb()
            self.wht_img = cfa_obj.rgb_img
            del cfa_obj
    def _raw2img(self):
        ''' decode raw data to obtain bayer image and settings data '''

        # skip if calibrated json file already exists, otherwise perform centroid calibration
        if self._raw_data:

            # decode raw data
            obj = LfpDecoder(self._raw_data, self.cfg, self.sta)
            obj.decode_raw()
            self._wht_bay = obj.bay_img
            del obj

            # balance Bayer channels in white image
            try:
                wht_json = json.loads(self._wht_json.read())
                frame_arr = safe_get(wht_json, 'master', 'picture',
                                     'frameArray')[0]
                self.cfg.lfpimg['ccm_wht'] = safe_get(frame_arr, 'frame',
                                                      'metadata', 'image',
                                                      'color',
                                                      'ccmRgbToSrgbArray')
                awb = safe_get(frame_arr, 'frame', 'metadata', 'devices',
                               'sensor', 'normalizedResponses')[0]
                gains = [
                    1. / awb['b'], 1. / awb['r'], 1. / awb['gr'],
                    1. / awb['gb']
                ]
                self.cfg.lfpimg['awb_wht'] = gains
            except ValueError:
                gains = [
                    1 / 0.74476742744445801, 1 / 0.76306647062301636, 1, 1
                ]

            # apply white balance gains to calibration file
            cfa_obj = CfaProcessor(bay_img=self._wht_bay,
                                   cfg=self.cfg,
                                   sta=self.sta)
            cfa_obj.set_gains(gains)
            self._wht_bay = cfa_obj.apply_awb()
            del cfa_obj

        return True
Beispiel #4
0
    def apply_ccm(self):

        # color matrix correction
        if 'ccm' in self.cfg.lfpimg.keys():

            # ccm mat selection
            if 'ccm_wht' in self.cfg.lfpimg:
                ccm_arr = self.cfg.lfpimg['ccm_wht']
            elif 'ccm' in self.cfg.lfpimg:
                #ccm_arr = self.cfg.lfpimg['ccm']
                ccm_arr = np.array([
                    2.4827811717987061, -1.1018080711364746,
                    -0.38097298145294189, -0.36761483550071716,
                    1.6667767763137817, -0.29916191101074219,
                    -0.18722048401832581, -0.73317205905914307,
                    1.9203925132751465
                ])
            else:
                ccm_arr = np.diag(np.ones(3))

            # normalize
            self.vp_img_arr /= self.vp_img_arr.max()

            if 'exp' in self.cfg.lfpimg:
                sat_lev = 2**(-self.cfg.lfpimg['exp'])
            else:
                sat_lev = 1
            self.vp_img_arr *= sat_lev

            # transpose and flip ccm_mat for RGB order
            ccm_mat = np.reshape(ccm_arr, (3, 3)).T
            self._vp_img_arr = CfaProcessor().correct_color(
                self._vp_img_arr.copy(), ccm_mat=ccm_mat)

            # remove potential NaNs
            self._vp_img_arr[self._vp_img_arr < 0] = 0
            self._vp_img_arr[self._vp_img_arr > sat_lev] = sat_lev
            #self._vp_img_arr /= sat_lev
            self._vp_img_arr /= self._vp_img_arr.max()

        return True
Beispiel #5
0
class LfpColorEqualizer(LfpViewpoints):
    def __init__(self, *args, **kwargs):
        super(LfpColorEqualizer, self).__init__(*args, **kwargs)

        self._ref_img = kwargs[
            'ref_img'] if 'ref_img' in kwargs else self.central_view
        self.prop_type = kwargs[
            'prop_type'] if 'prop_type' in kwargs else 'central'
        self._method = 'mvgd'

    def main(self):

        if self.vp_img_arr is not None and not self.sta.interrupt:
            self.apply_ccm()
            self._ref_img = self.central_view

        if self.prop_type == 'central':
            self.proc_vp_arr(fun=self.color_eq_img,
                             ref=self._ref_img,
                             method=self._method,
                             msg='Color equalization')
        elif self.prop_type == 'axial':
            self.proc_ax_propagate_2d(fun=self.color_eq_img,
                                      method=self._method,
                                      msg='Color equalization')

        # zero-out sub-apertures suffering from cross-talk (e.g. to exclude them in refocusing)
        self._exclude_crosstalk_views()

    @staticmethod
    def color_eq_img(src, ref, method=None):

        # instantiate color matcher
        match = ColorMatcher(src, ref, method=method).main()

        return match

    def apply_ccm(self):

        # color matrix correction
        if 'ccm' in self.cfg.lfpimg.keys():

            # ccm mat selection
            if 'ccm_wht' in self.cfg.lfpimg:
                ccm_arr = self.cfg.lfpimg['ccm_wht']
            elif 'ccm' in self.cfg.lfpimg:
                #ccm_arr = self.cfg.lfpimg['ccm']
                ccm_arr = np.array([
                    2.4827811717987061, -1.1018080711364746,
                    -0.38097298145294189, -0.36761483550071716,
                    1.6667767763137817, -0.29916191101074219,
                    -0.18722048401832581, -0.73317205905914307,
                    1.9203925132751465
                ])
            else:
                ccm_arr = np.diag(np.ones(3))

            # normalize
            self.vp_img_arr /= self.vp_img_arr.max()

            if 'exp' in self.cfg.lfpimg:
                sat_lev = 2**(-self.cfg.lfpimg['exp'])
            else:
                sat_lev = 1
            self.vp_img_arr *= sat_lev

            # transpose and flip ccm_mat for RGB order
            ccm_mat = np.reshape(ccm_arr, (3, 3)).T
            self._vp_img_arr = CfaProcessor().correct_color(
                self._vp_img_arr.copy(), ccm_mat=ccm_mat)

            # remove potential NaNs
            self._vp_img_arr[self._vp_img_arr < 0] = 0
            self._vp_img_arr[self._vp_img_arr > sat_lev] = sat_lev
            #self._vp_img_arr /= sat_lev
            self._vp_img_arr /= self._vp_img_arr.max()

        return True

    def _exclude_crosstalk_views(self):

        ratio = self.vp_img_arr.shape[3] / self.vp_img_arr.shape[2]
        r = self._M // 2
        mask = np.zeros([2 * r + 1, 2 * r + 1])

        # determine mask for affected views
        for x in range(-r, r + 1):
            for y in range(-r, r + 1):
                if int(np.round(np.sqrt(x**2 + y**2 * ratio))) > r + 2:
                    mask[r + y][r + x] = 1

        # extract coordinates from mask
        coords_table = [(y, x) for y in range(len(mask))
                        for x in range(len(mask)) if mask[y][x]]

        # zero-out selected views
        for coords in coords_table:
            self.vp_img_arr[coords[0], coords[1],
                            ...] = np.zeros(self.vp_img_arr.shape[2:])

        return True
Beispiel #6
0
    obj = lfp_calibrator.CaliFinder(cfg)
    obj.main()
    wht_img = obj.wht_bay
    del obj

    if cal_opt:
        # perform centroid calibration
        cal_obj = lfp_calibrator.LfpCalibrator(wht_img, cfg)
        cal_obj.main()
        cfg = cal_obj.cfg
        del cal_obj
    else:
        # convert Bayer to RGB representation
        if len(wht_img.shape) == 2 and 'bay' in cfg.lfpimg:
            # perform color filter array management and obtain rgb image
            cfa_obj = CfaProcessor(bay_img=wht_img, cfg=cfg)
            cfa_obj.bay2rgb()
            wht_img = cfa_obj.rgb_img
            del cfa_obj

    # ensure white image is monochromatic
    #wht_img = misc.rgb2gray(wht_img) if len(wht_img.shape) is 3 else wht_img

    # load calibration data
    cfg.load_cal_data()

    if cfg.params[cfg.opt_rota]:
        # de-rotate centroids
        obj = LfpRotator(wht_img, cfg.calibs[cfg.mic_list], rad=None, cfg=cfg)
        obj.main()
        wht_rot, centroids_rot = obj.lfp_img, obj.centroids
Beispiel #7
0
    def main(self):

        if self._wht_img is None:
            self.sta.status_msg(msg='White image file not present',
                                opt=self.cfg.params[self.cfg.opt_prnt])
            self.sta.error = True

        # convert Bayer to RGB representation
        if len(self._wht_img.shape) == 2 and 'bay' in self.cfg.lfpimg:
            # perform color filter array management and obtain rgb image
            cfa_obj = CfaProcessor(bay_img=self._wht_img,
                                   cfg=self.cfg,
                                   sta=self.sta)
            cfa_obj.bay2rgb()
            self._wht_img = cfa_obj.rgb_img
            del cfa_obj

        # ensure white image is monochromatic
        if len(self._wht_img.shape) == 3:
            self._wht_img = rgb2gry(self._wht_img)[
                ..., 0] if self._wht_img.shape[-1] == 3 else self._wht_img

        # estimate micro image diameter
        obj = PitchEstimator(self._wht_img, self.cfg, self.sta)
        obj.main()
        self._M = obj.M if not self._M else self._M
        del obj

        # compute all centroids of micro images
        obj = CentroidExtractor(self._wht_img,
                                self.cfg,
                                self.sta,
                                self._M,
                                method='area')
        obj.main()
        centroids = obj.centroids
        del obj

        # write micro image center image to hard drive if debug option is set
        if self.cfg.params[self.cfg.opt_dbug]:
            draw_obj = CentroidDrawer(self._wht_img, centroids, self.cfg,
                                      self.sta)
            draw_obj.write_centroids_img(fn='wht_img+mics_unsorted.png')
            del draw_obj

        # reorder MICs and assign indices based on the detected MLA pattern
        obj = CentroidSorter(centroids, self.cfg, self.sta)
        obj.main()
        mic_list, pattern, pitch = obj.mic_list, obj.pattern, obj.pitch
        del obj

        # fit grid of MICs using least-squares method to obtain accurate MICs from line intersections
        if not self.sta.interrupt and None:
            from plenopticam.lfp_calibrator import GridFitter
            self.cfg.calibs[self.cfg.pat_type] = pattern
            obj = GridFitter(coords_list=mic_list, cfg=self.cfg, sta=self.sta)
            obj.main()
            mic_list = obj.grid_fit
            del obj

        # save calibration metadata
        self.sta.status_msg('Save calibration data',
                            opt=self.cfg.params[self.cfg.opt_prnt])
        self.sta.progress(None, opt=self.cfg.params[self.cfg.opt_prnt])
        try:
            self.cfg.save_cal_data(mic_list=mic_list,
                                   pat_type=pattern,
                                   ptc_mean=pitch)
            self.sta.progress(100, opt=self.cfg.params[self.cfg.opt_prnt])
        except PermissionError:
            self.sta.status_msg('Could not save calibration data',
                                opt=self.cfg.params[self.cfg.opt_prnt])

        # write image to hard drive (only if debug option is set)
        if self.cfg.params[self.cfg.opt_dbug]:
            draw_obj = CentroidDrawer(self._wht_img, mic_list, self.cfg,
                                      self.sta)
            draw_obj.write_centroids_img(fn='wht_img+mics_sorted.png')
            del draw_obj

        return True
Beispiel #8
0
class LfpColorEqualizer(LfpViewpoints):
    def __init__(self, *args, **kwargs):
        super(LfpColorEqualizer, self).__init__(*args, **kwargs)

        self._ref_img = kwargs[
            'ref_img'] if 'ref_img' in kwargs else self.central_view
        self.prop_type = kwargs[
            'prop_type'] if 'prop_type' in kwargs else 'central'
        self._method = 'mvgd'

    def main(self):

        # check interrupt status
        if self.sta.interrupt:
            return False

        # apply color correction
        if self.vp_img_arr is not None:
            self.apply_ccm()
            self._ref_img = self.central_view

        # equalize light field colors
        if self.prop_type == 'central':
            self.proc_vp_arr(fun=self.color_eq_img,
                             ref=self._ref_img,
                             method=self._method,
                             msg='Color equalization')
        elif self.prop_type == 'axial':
            self.proc_ax_propagate_2d(fun=self.color_eq_img,
                                      method=self._method,
                                      msg='Color equalization')

        # zero-out marginal sub-apertures (e.g. suffering from cross-talk)
        self._exclude_crosstalk_views()

    @staticmethod
    def color_eq_img(src, ref, method=None):

        # instantiate color matcher
        match = ColorMatcher(src, ref, method=method).main()

        return match

    def apply_ccm(self):

        # color matrix correction
        if 'ccm' in self.cfg.lfpimg.keys():

            # ccm mat selection
            if 'ccm_wht' in self.cfg.lfpimg:
                ccm_arr = self.cfg.lfpimg['ccm_wht']
            elif 'ccm' in self.cfg.lfpimg:
                #ccm_arr = self.cfg.lfpimg['ccm']
                ccm_arr = np.array([
                    2.4827811717987061, -1.1018080711364746,
                    -0.38097298145294189, -0.36761483550071716,
                    1.6667767763137817, -0.29916191101074219,
                    -0.18722048401832581, -0.73317205905914307,
                    1.9203925132751465
                ])
            else:
                ccm_arr = np.diag(np.ones(3))

            # normalize
            self.vp_img_arr /= self.vp_img_arr.max()

            if 'exp' in self.cfg.lfpimg:
                sat_lev = 2**(-self.cfg.lfpimg['exp'])
            else:
                sat_lev = 1
            self.vp_img_arr *= sat_lev

            # transpose and flip ccm_mat for RGB order
            ccm_mat = np.reshape(ccm_arr, (3, 3)).T
            self._vp_img_arr = CfaProcessor().correct_color(
                self._vp_img_arr.copy(), ccm_mat=ccm_mat)

            # remove potential NaNs
            self._vp_img_arr[self._vp_img_arr < 0] = 0
            self._vp_img_arr[self._vp_img_arr > sat_lev] = sat_lev
            #self._vp_img_arr /= sat_lev
            self._vp_img_arr /= self._vp_img_arr.max()

        return True

    def _exclude_crosstalk_views(self):
        ''' function wrapper to exclude Lytro Illum views that suffer from cross-talk '''

        self.circular_view_aperture(offset=2, ellipse=True)