コード例 #1
0
def demosaic_channel(img, channel=None):
    if channel == None:  # get color image
        return colour_demosaicing.demosaicing_CFA_Bayer_bilinear(
            np.sum(img, axis=2), 'BGGR').astype(img.dtype)
    else:
        return colour_demosaicing.demosaicing_CFA_Bayer_bilinear(
            img[:, :, channel], 'BGGR').astype(img.dtype)
コード例 #2
0
ファイル: acquire_images.py プロジェクト: idiap/hesm_distrib
def display_stack_serie(stack, interval=50):
    N = stack.shape[2]
    run = True

    def press(event):
        print('pressed ', event.key)
        sys.stdout.flush()
        if event.key == 'q':
            print('Exiting function')
            plt.ioff()
            plt.close('all')
            run = False

    if (run):
        fig = plt.figure()
        ax = fig.add_subplot(111)
        fig.canvas.mpl_connect('key_press_event', press)
        plt.ion()
        plt.show()
        im = demosaicing_CFA_Bayer_bilinear(stack[..., 0])
        img = ax.imshow(im, interpolation='nearest', cmap='Greys_r')
    while run:
        for i in range(N):
            if (run):
                plt.title("{0}".format(i))
                im = demosaicing_CFA_Bayer_bilinear(stack[..., i])
                img.set_data(im)
                plt.pause(interval / 1000.)
コード例 #3
0
ファイル: demosaic.py プロジェクト: themathgeek13/plenpy
def get_demosaiced(img: ndarray,
                   pattern: str = 'GRBG',
                   method: str = 'bilinear') -> ndarray:
    """Get a demosaiced RGB image from a raw image.

    This function is a wrapper of the demosaicing functions supplied by the
    ``color_demosaicing`` package.

    Args:
        img: Input image, greyscale, of shape (x,y).

        pattern: Bayer filter pattern that the input image is modulated with.
            Patterns are: 'RGGB', 'BGGR', 'GRBG', 'GBRG'.

            Default: 'GRBG'

        method: Algorithm used to calculate the demosaiced image.\n
            * 'bilinear': Simple bilinear interpolation of color values
            * 'malvar2004': Algorithm introduced by Malvar et. al. [R3]_
            * 'menon2007': Algorithm introduced by Menon et. al. [R4]_,


    Returns:
        Demosaiced RGB-color image of shape (x,y,3) of
        dtype :class:`numpy.float64`.

    References:
        .. [R3]  H.S. Malvar,  Li-wei He, and  R. Cutler (2004).
           High-quality linear interpolation for demosaicing of
           Bayer-patterned color images.
           IEEE International Conference on Acoustics, Speech, and Signal
           Processing, Proceedings. (ICASSP '04).
           DOI: 10.1109/ICASSP.2004.1326587

        .. [R4]  D. Menon, S. Andriani, G. Calvagno (2007).
           Demosaicing With Directional Filtering and a posteriori Decision.
           IEEE Transactions on Image Processing (Volume: 16, Issue: 1)
           DOI: 10.1109/TIP.2006.884928

    """

    param_list = ["bilinear", "malvar2004", "menon2007"]

    # Do demosaicing with specified method
    if method not in param_list:
        raise ValueError(
            f"The specified method {method} is none of the supported "
            f"methods: {param_list}.")

    elif method == "bilinear":
        return demosaicing_CFA_Bayer_bilinear(img.astype(np.float64),
                                              pattern=pattern)

    elif method == "malvar2004":
        return demosaicing_CFA_Bayer_Malvar2004(img.astype(np.float64),
                                                pattern=pattern)

    elif method == "menon2007":
        return demosaicing_CFA_Bayer_Menon2007(img.astype(np.float64),
                                               pattern=pattern)
コード例 #4
0
    def _debayer(self, img: Image) -> Image:
        assert img.data.dtype == np.float32 and img.data.min(
        ) >= 0.0 and img.data.max(
        ) <= 1.0, f"{img.data.dtype} {img.data.max()} {img.data.min()}"

        if not img.bayer_pattern:
            logging.warn(f"no bayer pattern detected for {img.key}")
            return img

        with Timer(
                f"debayering image for {img.key} with pattern {img.bayer_pattern}"
        ):
            data = demosaicing_CFA_Bayer_bilinear(img.data,
                                                  pattern=img.bayer_pattern)

            # fix the image shape
            img.data = np.array([data[:, :, 0], data[:, :, 1], data[:, :, 2]])
            img.data = np.interp(img.data, (0.0, img.data.max()),
                                 (0.0, 1.0)).astype(np.float32)

            # poor man's SCNR
            # img.data[1] *= 0.8

        assert img.data.dtype == np.float32 and img.data.min(
        ) >= 0.0 and img.data.max(
        ) <= 1.0, f"{img.data.dtype} {img.data.max()} {img.data.min()}"
        return img
コード例 #5
0
    def bay2rgb(self, method=2):

        # print status
        self.sta.status_msg('Debayering', self.cfg.params[self.cfg.opt_prnt])
        self.sta.progress(None, self.cfg.params[self.cfg.opt_prnt])

        # Bayer to RGB conversion
        if method == 0:
            self._rgb_img = demosaicing_CFA_Bayer_bilinear(
                self._bay_img, self.cfg.lfpimg['bay'])
        elif method == 1:
            self._rgb_img = demosaicing_CFA_Bayer_Malvar2004(
                self._bay_img, self.cfg.lfpimg['bay'])
        else:
            self._rgb_img = demosaicing_CFA_Bayer_Menon2007(
                self._bay_img, self.cfg.lfpimg['bay'])

        # normalize image
        min = np.percentile(self._rgb_img, 0.05)
        max = np.max(self.rgb_img)
        self._rgb_img = misc.Normalizer(self._rgb_img, min=min,
                                        max=max).type_norm()

        # update status message
        self.sta.progress(100, self.cfg.params[self.cfg.opt_prnt])

        return True
コード例 #6
0
ファイル: time_gen.py プロジェクト: panoptes/panoptes-timegen
def debayer_image_array(data, algorithm='bilinear', pattern='GRBG'):
    """ Returns the RGB data after bilinear interpolation on the given array.
        ----------
        parameters  
        ----------
        data : The input array containing the image data. Array like of shape (rows,columns)
        algorithm : The algorithm to use for the debayering operation. 
        {'bilinear','malvar2004','menon2007'}
        ----------
        returns
        ----------
        numpy array of shape (rows,columns,3)
    """
    # Check to see if data is two dimensional
    try:
        assert len(data.shape) == 2, 'Shape is not 2 dimensional'
    except AssertionError:
        log_error_exit('Image data input to debayer is not 2 dimensional')

    if algorithm == 'bilinear':
        rgb_data = demosaicing_CFA_Bayer_bilinear(data, pattern)
    elif algorithm == 'malvar2004':
        rgb_data = demosaicing_CFA_Bayer_Malvar2004(data, pattern)
    elif algorithm == 'menon2007':
        rgb_data = demosaicing_CFA_Bayer_Menon2007(data, pattern)
    return rgb_data.astype(np.uint16)
コード例 #7
0
ファイル: cfa_processor.py プロジェクト: pvjosue/plenopticam
    def bay2rgb(self, method=2):

        # print status
        self.sta.status_msg('Debayering', self.cfg.params[self.cfg.opt_prnt])
        self.sta.progress(None, self.cfg.params[self.cfg.opt_prnt])

        # Bayer to RGB conversion
        if method == 0:
            self._rgb_img = demosaicing_CFA_Bayer_bilinear(
                self._bay_img.astype(np.float32), self.cfg.lfpimg['bay'])
        elif method == 1:
            self._rgb_img = demosaicing_CFA_Bayer_Malvar2004(
                self._bay_img.astype(np.float32), self.cfg.lfpimg['bay'])
        else:
            self._rgb_img = demosaicing_CFA_Bayer_Menon2007(
                self._bay_img.astype(np.float32), self.cfg.lfpimg['bay'])

        # clip intensities above and below previous limits (removing dead and hot outliers yields much better contrast)
        self._rgb_img[
            self._rgb_img < self._bay_img.min()] = self._bay_img.min()
        self._rgb_img[
            self._rgb_img > self._bay_img.max()] = self._bay_img.max()

        # print "Progress: Done!"
        self.sta.progress(100, self.cfg.params[self.cfg.opt_prnt])

        return True
コード例 #8
0
ファイル: multi.py プロジェクト: qiuzhongwei-USTB/ice_vision
def converter(names,num = 1):
	for epoch in tqdm(range(int(len(names)/num))):

		pnms = np.zeros([2048,2448*num])
		for index,name in enumerate(names[epoch*num:(epoch+1)*num]):
			pnm_file = name
			pnm = cv2.imread('/dataset/training/'+pnm_file,0)
			eq = cv2.equalizeHist(pnm)
			#print(pnm.shape)
			pnms[:,index*2448:(index+1)*2448] = eq
		#print('read over')

		#print('eq over')
		pnms_3 = demosaicing_CFA_Bayer_bilinear(pnms)
		#print('demosaicing_CFA_Bayer_bilinear over')
		#print('write')
		for index,name in enumerate(names[epoch*num:(epoch+1)*num]):
			#print(pnms_3[:,index*2448:index*2448+2448,:].shape)
			image = pnms_3[:,index*2448:index*2448+2448,:] 
			#print(image.shape)
			if not os.path.exists(save_folder+'/'+'/'.join(name.split('/')[:-1])):
				os.mkdir(save_folder+'/'+'/'.join(name.split('/')[:-2]))
				os.mkdir(save_folder+'/'+'/'.join(name.split('/')[:-1]))
			cv2.imwrite(save_folder +'/'+'.'.join(name.split('.')[:-1])+'.jpg',image)
			#print(index/num)
	pnms = np.zeros([2048,2448*num])
	for index,name in enumerate(names[(epoch+1)*num:]):
		pnm_file = name
		pnm = cv2.imread('/dataset/training/'+pnm_file,0)
		eq = cv2.equalizeHist(pnm)
		#print(pnm.shape)
		pnms[:,index*2448:(index+1)*2448] = eq
	print('read over')

	print('eq over')
	pnms_3 = demosaicing_CFA_Bayer_bilinear(pnms)
	#print('demosaicing_CFA_Bayer_bilinear over')
	#print('write')
	for index,name in enumerate(names[(epoch+1)*num:]):
		image = pnms_3[:,index*2448:(index+1)*2448,:] 
		if not os.path.exists(save_folder+'/'+'/'.join(name.split('/')[:-1])):
				os.mkdir(save_folder+'/'+'/'.join(name.split('/')[:-2]))
				os.mkdir(save_folder+'/'+'/'.join(name.split('/')[:-1]))
		cv2.imwrite(save_folder +'/'+'.'.join(name.split('.')[:-1])+'.jpg',image)
コード例 #9
0
    def getFrame(self, index, decode=True):
        #get frame image (I) and timestamp (ts) at which frame was recorded
        nch = self.header['imageBitDepth'] / 8
        if self.ext in [
                'raw', 'brgb8'
        ]:  #read in an uncompressed iamge( assume imageBitDepthReal==8)
            shape = (self.header['height'], self.header['width'])
            self.file.seek(1024 + index * self.header['trueImageSize'], 0)
            I = fread(self.file, self.header['imageSizeBytes'], 'B')

            if decode:
                if nch == 1:
                    I = np.reshape(I, shape)
                else:
                    I = np.reshape(I, (shape, nch))
                if nch == 3:
                    t = I[:, :, 2]
                    I[:, :, 2] = I[:, :, 0]
                    I[:, :, 1] = t
                if self.ext == 'brgb8':
                    I = colour_demosaicing.demosaicing_CFA_Bayer_bilinear(
                        I, 'BGGR')

        elif self.ext in ['jpg', 'jbrgb']:
            if decode:
                self.file.seek(self.seek_table[index], 0)
                nBytes = fread(self.file, 1, 'I')
                data = fread(self.file, nBytes - 4, 'B')
                I = PIL.Image.open(io.BytesIO(data))
                if self.ext == 'jbrgb':
                    I = colour_demosaicing.demosaicing_CFA_Bayer_bilinear(
                        I, 'BGGR')

        elif self.ext == 'png':
            self.file.seek(self.seek_table[index], 0)
            nBytes = fread(self.file, 1, 'I')
            I = fread(self.file, nBytes - 4, 'B')
            if decode:
                I = np.array(I).transpose(range(I.shape, -1, -1))
        else:
            assert (False)
        return np.array(I)
コード例 #10
0
ファイル: multi.py プロジェクト: qiuzhongwei-USTB/ice_vision
def converter1(names,hash_tabel=hash_tabel):
    #print(len(names))
    print('start process...')
    for name in tqdm(names):
        pnm_file = name
        if pnm_file in hash_tabel:
            #print(pnm_file,'already in')
            continue
        #print(pnm_file)
        pnm = cv2.imread('/dataset/training/'+pnm_file,0)
        eq = cv2.equalizeHist(pnm)
        #print(name)
        #print(eq.shape)
        pnms_3 = demosaicing_CFA_Bayer_bilinear(eq)
        if not cv2.imwrite(save_folder +'/'+'.'.join(name.split('.')[:-1])+'.jpg',pnms_3):
            w = cv2.imwrite(save_folder +'/'+'.'.join(name.split('.')[:-1])+'.jpg',pnms_3)
コード例 #11
0
def demosaicking(image: np.ndarray,
                 method: str = "bilinear",
                 pattern: str = "RGGB") -> np.ndarray:
    """Returns the demosaicked image given a method.

    Parameters
    -------------------
    image: np.ndarray,
        The image to be demosaicked.
    method: str,
        Demosaicking method to be applied.
    pattern: str,
        Arrangement of the colour filters on the pixel array.
        Possible patterns are: {RGGB, BGGR, GRBG, GBRG}.

    Raises
    ------------------
    ValueError,
        If given method does not exist.

    Returns
    -------------------
    Returns the demosaicked image.
    """
    image_rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
    image_cfa = mosaicing_CFA_Bayer(image_rgb, pattern=pattern) / 255

    if method == 'bilinear':
        image_demo = ((cctf_encoding(
            demosaicing_CFA_Bayer_bilinear(image_cfa, pattern=pattern))) *
                      255).astype(np.uint8)
    elif method == 'malvar':
        image_demo = ((cctf_encoding(
            demosaicing_CFA_Bayer_Malvar2004(image_cfa, pattern=pattern))) *
                      255).astype(np.uint8)
    elif method == 'menon':
        image_demo = ((cctf_encoding(
            demosaicing_CFA_Bayer_Menon2007(image_cfa, pattern=pattern))) *
                      255).astype(np.uint8)
    else:
        raise ValueError(
            'Given method \'{}\' does not belong to possible methods. '
            'Valid methods are: \'bilinear\', \'malvar\' and \'menon\'.'.
            format(method))

    return cv2.cvtColor(image_demo, cv2.COLOR_RGB2GRAY)
コード例 #12
0
    def readCurrentImage(self):
        img = self.__images[self.__current_image]

        if img['pds_data'] is None:
            # 读入pds label
            img['pds_data'] = pds4_tools.read('%s/%s' %(img['path'], img['filename']))

            # 将图像数据提取出来
            img['raw_data'] = np.asanyarray(img['pds_data'][0].data)
            print(img['raw_data'].shape, img['raw_data'].ndim)
            img['raw_data'] = img['raw_data'] / 1023      #10位的图像数据归一化 

            # de-bayer
            rgb_data = colour.cctf_encoding(colour_demosaicing.demosaicing_CFA_Bayer_bilinear(img['raw_data'], 'RGGB')) 
            print(rgb_data.shape, rgb_data.ndim)
            img['rgb_data'] = rgb_data

            ## 直方图拉伸
            #lower, upper = np.percentile(rgb_data, (0.2,99.8))
            #print(lower, upper)
            #scale_data = exposure.rescale_intensity(rgb_data, in_range=(lower, upper)) 

        return img
コード例 #13
0
#print(names)
num = 10
t1 = time.time()
for epoch in tqdm(range(int(len(names)/num))):

	pnms = np.zeros([2048,2448*num])
	for index,name in enumerate(names[epoch*num:(epoch+1)*num]):
		pnm_file = name
		pnm = cv2.imread('/dataset/training/'+pnm_file,0)
		eq = cv2.equalizeHist(pnm)
		#print(pnm.shape)
		pnms[:,index*2448:(index+1)*2448] = eq
	#print('read over')

	#print('eq over')
	pnms_3 = demosaicing_CFA_Bayer_bilinear(pnms)
	#print('demosaicing_CFA_Bayer_bilinear over')
	#print('write')
	for index,name in enumerate(names[epoch*num:(epoch+1)*num]):
		#print(pnms_3[:,index*2448:index*2448+2448,:].shape)
		image = pnms_3[:,index*2448:index*2448+2448,:] 
		#print(image.shape)
		if not os.path.exists(save_folder+'/'.join(name.split('/')[:-1])):
			os.mkdir(save_folder+'/'.join(name.split('/')[:-2]))
			os.mkdir(save_folder+'/'.join(name.split('/')[:-1]))
		cv2.imwrite(save_folder +'.'.join(name.split('.')[:-1])+'.jpg',image)
		#print(index/num)
pnms = np.zeros([2048,2448*num])
for index,name in enumerate(names[(epoch+1)*num:]):
	pnm_file = name
	pnm = cv2.imread('/dataset/training/'+pnm_file,0)
コード例 #14
0
def imread_merge_demosaic(files, metadata, align, pattern='RGGB'):
    """
	Merge RAW images before demosaicing. This function merges in an online
	way and can handle a large number of inputs with little memory.

	:files: filenames containing the inpt images
	:metadata: internally generated metadata dict
	:align: perform homography based alignment before merging
	:pattern: bayer pattern used in RAW images
	:return: Merged FP32 HDR image
	"""

    if align:
        raise NotImplementedError
    # Some sanity checks and logs related to colour-space and white-balnce
    logger.info('Merging before demosaicing.')
    if metadata['color_space'] != 'raw':
        logger.warning('Switching to RAW color-space since it is the only one ' \
         'supported in the current mode.')
    raw = rawpy.imread(files[0])
    if metadata['wb'] == 'auto':
        logger.warning(
            'Auto white-balance not supported. Using daylight whitebalance')
        wb = np.array(raw.daylight_whitebalance)
    elif metadata['wb'] == 'camera':
        logger.warning('Make sure that white-balance was not set to "auto" while ' \
         'capturing the stack. Using white-balance of first image.')
        wb = np.array(raw.camera_whitebalance)
    else:
        raise NotImplementedError

    if pattern == 'RGGB':
        assert wb[1] == wb[3] or wb[3] == 0
        wb = wb[:3] / wb.max()
    else:
        raise NotImplementedError

    # Check for saturation in shortest exposure
    shortest_exposure = np.argmin(metadata['exp'] * metadata['gain'] *
                                  metadata['aperture'])
    logger.info(f'Shortest exposure is {shortest_exposure}')

    num_saturated = 0
    num, denom = np.zeros((2, metadata['h'], metadata['w']))
    black_frame = np.tile(metadata['black_level'].reshape(2, 2),
                          (metadata['h'] // 2, metadata['w'] // 2))
    for i, f in enumerate(tqdm.tqdm(files)):
        raw = rawpy.imread(f)
        img = raw.raw_image_visible.astype(np.float32)

        # Ignore saturated pixels in all but shortest exposure
        if i == shortest_exposure:
            unsaturated = np.ones_like(raw, dtype=bool)
            num_sat = np.count_nonzero(
                np.logical_not(
                    get_unsaturated(raw, metadata['saturation_point'])))
        else:
            unsaturated = get_unsaturated(raw, metadata['saturation_point'])

        # Subtract black level for linearity
        img -= black_frame

        X_times_t = img / metadata['gain'][i] / metadata['aperture'][i]
        denom[unsaturated] += metadata['exp'][i]
        num[unsaturated] += X_times_t[unsaturated]

    HDR_bayer = (num / denom)

    # Libraw does not support 32-bit values. Use colour-demosaicing instead:
    # https://colour-demosaicing.readthedocs.io/en/latest/manual.html
    logger.info('Running bilinear demosaicing')
    import colour_demosaicing as cd
    HDR = cd.demosaicing_CFA_Bayer_bilinear(HDR_bayer, pattern=pattern)

    # White-balance
    HDR = (HDR * wb[np.newaxis, np.newaxis, :]).astype(np.float32)
    if num_sat > 0:
        logger.warning(
            f"{num_sat/(metadata['h']*metadata['w']):.3f}% of pixels (n={num_sat}) are \
			saturated in the shortest exposure. The values for those pixels will be inaccurate."
        )

    return HDR
コード例 #15
0
ファイル: evaluate.py プロジェクト: apocalypsetank/DeepMosaic
# ==================== Evaluation ====================

desc_list = ["Bilinear", "Malvar (2004)", "DDFAPD", "Learned"]

# Indexing:  Algorithm - Test Image - Row - Column - RGB channel
results = np.zeros((len(desc_list), dataset.shape[0], dataset.shape[1],
                    dataset.shape[2], dataset.shape[3]))

model.compile(optimizer='adam', loss='mean_squared_error')
model.load_weights("checkpoints/weights.100-0.00.hdf5")
model.summary()
results_nn = model.predict(dataset)

for i in range(dataset.shape[0]):
    results[0][i] = demosaicing_CFA_Bayer_bilinear(dataset_mosaiced[i],
                                                   pattern="RGGB")
    results[1][i] = demosaicing_CFA_Bayer_Malvar2004(dataset_mosaiced[i],
                                                     pattern="RGGB")
    results[2][i] = demosaicing_CFA_Bayer_DDFAPD(dataset_mosaiced[i],
                                                 pattern="RGGB")
    results[3][i] = results_nn[i]

psnrs = np.zeros((dataset.shape[0], len(desc_list)))

for alg_idx in range(len(desc_list)):
    for img_idx in range(dataset.shape[0]):
        psnrs[img_idx][alg_idx] = calc_cpsnr(dataset[img_idx],
                                             results[alg_idx][img_idx])

for img_idx in range(dataset.shape[0]):
    print(files_grabbed[img_idx] + ":")
コード例 #16
0
def main():
    args = parse_args()
    conn = serial.Serial(args.port, timeout=1)

    while (1):
        fig = plt.figure(figsize=(15, 6))
        plt.subplots_adjust(left=0.05, right=0.99)
        # image left
        im1 = fig.add_subplot(1, 2, 1)
        conn.write("dcmi\r\n".encode())
        buf = bytes()
        print("Placing board in acquisition mode... ", end="")
        while not buf.decode().endswith("Done !\r\n"):
            buf = buf + conn.read(1)
        print("done")

        # Then read the whole sample out
        buf = bytearray()
        pbar = progressbar.ProgressBar(maxval=TOTAL_SAMPLES_TO_READ).start()
        while len(buf) < 4 * TOTAL_SAMPLES_TO_READ:
            pbar.update(len(buf) / 4)
            buf += conn.read(100)
        pbar.finish()

        print(len(buf))

        image = np.frombuffer(buf, dtype='uint16')
        image = image.reshape((HEIGHT, WIDTH))

        image_rgb = demosaicing_CFA_Bayer_bilinear(image, 'GRBG')

        image_rgb = image_rgb / np.amax(image_rgb)

        plt.imshow(image_rgb)

        # image right
        im2 = fig.add_subplot(1, 2, 2)
        conn.write("dcmi\r\n".encode())
        buf = bytes()
        print("Placing board in acquisition mode... ", end="")
        while not buf.decode().endswith("Done !\r\n"):
            buf = buf + conn.read(1)
        print("done")

        # Then read the whole sample out
        buf = bytearray()
        pbar = progressbar.ProgressBar(maxval=TOTAL_SAMPLES_TO_READ).start()
        while len(buf) < 4 * TOTAL_SAMPLES_TO_READ:
            pbar.update(len(buf) / 4)
            buf += conn.read(100)
        pbar.finish()

        print(len(buf))

        image = np.frombuffer(buf, dtype='uint16')
        image = image.reshape((HEIGHT, WIDTH))

        image_rgb = demosaicing_CFA_Bayer_bilinear(image, 'GRBG')

        image_rgb = image_rgb / np.amax(image_rgb)

        plt.imshow(image_rgb)

        plt.pause(0.033)
        plt.close()
コード例 #17
0
def main():

    configfile = 'processing.cfg'
    parser = argparse.ArgumentParser(
        description='Process raw RGB FITS files for specific star')
    parser.add_argument('--target',
                        dest='target',
                        default='ALPLYR',
                        help='ALPLYR|GAMCYG')
    parser.add_argument('--configfile',
                        dest='configfile',
                        default=configfile,
                        help='name for config file')
    parser.add_argument('--dispersion',
                        dest='dispersion',
                        action='store_true',
                        default=False,
                        help='find dispersion relation by marking lines')

    args = parser.parse_args()

    print(args.dispersion)
    plt.style.use(astropy_mpl_style)

    config = RawConfigParser()
    config.read(configfile)

    section = args.target
    fitsdirs = glob(config.get(section, 'datapath'))
    master = config.get(section, 'master')
    numbers = eval(config.get(section, 'numbers'))
    rawfile = config.get(section, 'raw_rgb_file')
    mincol = int(config.get('MAIN', 'ccdcols_min'))
    maxcol = int(config.get('MAIN', 'ccdcols_max'))
    minrow = int(config.get('MAIN', 'ccdrows_min'))
    maxrow = int(config.get('MAIN', 'ccdrows_max'))

    filenames = []
    for number in numbers:
        filenames.append("%s%03d%s" % (master, number, '.fit'))

    ic = ImageFileCollection(fitsdirs[0], keywords='*', filenames=filenames)

    rawfile = config.get(section, 'raw_rgb_file')
    if os.path.exists(rawfile):
        with open(rawfile, 'rb') as f:
            data = pickle.load(f)
            raw_r = data['raw_r']
            raw_g = data['raw_g']
            raw_b = data['raw_b']
    else:

        raw_r = np.zeros(maxcol)
        raw_g = np.zeros(maxcol)
        raw_b = np.zeros(maxcol)

        for data, fname in ic.data(return_fname=True):
            minrow = int(config.get('MAIN', 'ccdrows_min'))
            maxrow = int(config.get('MAIN', 'ccdrows_max'))

            rgb = demosaicing_CFA_Bayer_bilinear(data)

            trace = rgb[minrow:maxrow, mincol:maxcol, 1].sum(axis=1)
            max_i = max(trace)
            ix_line = np.where(trace > (max_i * 0.1))
            minrow = np.min(ix_line)
            maxrow = np.max(ix_line)

            print(fname, minrow, maxrow, maxrow - minrow, max_i)
            raw_r = raw_r + rgb[minrow:maxrow, mincol:maxcol, 0].sum(axis=0)
            raw_g = raw_g + rgb[minrow:maxrow, mincol:maxcol, 1].sum(axis=0)
            raw_b = raw_b + rgb[minrow:maxrow, mincol:maxcol, 2].sum(axis=0)
        data = {
            'raw_r': raw_r,
            'raw_g': raw_g,
            'raw_b': raw_b,
        }
        with open(rawfile, 'wb') as f:
            pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)

    if args.dispersion == False and config.has_option(section, 'coeff'):
        print("Using existing dispersion relation")
        coeff = eval(config.get(section, 'coeff'))
        einsen = np.linspace(1, len(raw_r), num=len(raw_r))
        wave = einsen * einsen * coeff[0] + einsen * coeff[1] + coeff[2]
        wave = wave[0:len(einsen)]

        fig = plt.figure(figsize=(14, 6))
        plt.plot(wave, raw_r[::-1], color='r')
        plt.plot(wave, raw_g[::-1], color='g')
        plt.plot(wave, raw_b[::-1], color='b')

        for wave0 in eval(config.get(section, 'lines')):
            plt.plot([wave0, wave0], [0.1e7, 1.0e7])
            plt.text(wave0,
                     1.0e7,
                     str(wave0),
                     rotation=90,
                     rotation_mode='anchor',
                     fontsize=10)
            #plt.Text(wave0,1.0e7,text=str(wave0))
        plt.xlim(3500, 8000)
        plt.show()

        fig = plt.figure(figsize=(14, 6))
        plt.plot(wave, raw_r[::-1] + 2 * raw_g[::-1] + raw_b[::-1])
        plt.show()
    else:
        print("Mark spectrum lines for dispersion solution")
        print("Hint: typical lines:")
        print(
            "Balmer Series: H-alpha 6563 H-beta 4861 H-gamma 4340 H-delta 4102 H-epsilon 3970 H-zeta 3889 H-eta 3835 "
        )
        print("Telluric lines: 6863, 7594")
        fig = plt.figure(figsize=(14, 6))
        plt.axis([1000, 3500, 0, 2.0e7])
        n = len(raw_r[::-1])
        pixels = np.linspace(1, n - 1, num=n - 1)

        pixels = np.arange(1, n + 1, 1)
        line, = plt.plot(pixels, raw_r[::-1], color='r')
        dc = DispersionDataCursor(plt.gca())
        fig.canvas.mpl_connect('pick_event', dc)
        line.set_picker(5)  # Tolerance in points
        plt.show()

        print("detected:")
        print(dc.getPositions())
        print(dc.getWavelengths())
        sortedPositions = np.sort(dc.getPositions())
        sortedWavelengths = np.sort(dc.getWavelengths())
        print(sortedPositions)
        print(sortedWavelengths)

        query = input('accept (Y/N)? ')
        if query == 'Y':
            config.set(section, 'positions', value=str(sortedPositions))
            config.set(section, 'wavelengths', value=str(sortedWavelengths))
            fig = plt.figure(figsize=(14, 6))
            plt.plot(sortedPositions, sortedWavelengths)
            plt.show()
            with open(configfile, 'w') as cf:
                config.write(cf)
コード例 #18
0
'''from PIL import Image
import numpy as np
img = (np.fromfile('tempmio.out', dtype=np.uint16).astype(np.float32) / 0x3FFF * 0xFF).astype(np.uint8).reshape((-1,5280))#3456  #5280
print(img)
img=Image.fromarray(img)
img.save('gray.png')

#img.save('gray.png')
#!/usr/bin/env python3
'''
import argparse

import colour_demosaicing
import numpy as np
from PIL import Image

data = (np.fromfile('tempmio.out', dtype=np.dtype('u2')).reshape(
    (3456, -1)).astype(np.float64) / 0x3FFF)
data = colour_demosaicing.demosaicing_CFA_Bayer_bilinear(data)
data = (data * 0xFF).astype(np.uint8)
img = Image.fromarray(data)
img.show()
コード例 #19
0
ファイル: watchedcamera.py プロジェクト: MartinCooke/jocular
    def process_sub(self, s, path):
        ''' Apply user-specified colour space conversion and binning. Also 
			check if image has 3 separated RGB planes and generate 3 separate
			subs. 
		'''

        bn = os.path.basename(path)
        im = s.get_image()
        imshape = str(im.shape)
        imstats = 'mean {:.4f}. range {:.4f}-{:.4f}'.format(
            np.mean(im), np.min(im), np.max(im))

        # check if image has 3 separate (RGB) planes
        if im.ndim == 3:
            logger.debug('sub RGB {:} {:}'.format(imshape, imstats))
            if im.shape[0] == 3:  # 3 x X x Y
                for i, chan in enumerate(['R', 'G', 'B']):
                    self.save_mono(s,
                                   self.bin(im[i, :, :]),
                                   bn,
                                   filt=chan,
                                   sub_type='light')
            elif im.shape[2] == 3:  # X x Y x 3
                for i, chan in enumerate(['R', 'G', 'B']):
                    self.save_mono(s,
                                   self.bin(im[:, :, i]),
                                   bn,
                                   filt=chan,
                                   sub_type='light')
            else:
                # shouldn't happen as im nims/shape is checked in Image
                logger.error('cannot process 3D image')

        # don't debayer if mono
        elif self.colour_space == 'mono':
            logger.debug('sub mono {:} {:}'.format(imshape, imstats))
            self.save_mono(s, self.bin(im), bn)

        # don't debayer if master calibration frame
        elif s.is_master:
            logger.debug('master mono {:} {:}'.format(imshape, imstats))
            self.save_mono(s, self.bin(im), bn)

        # debayer
        else:
            logger.debug('sub OSC {:} {:}'.format(imshape, imstats))
            from colour_demosaicing import demosaicing_CFA_Bayer_bilinear
            rgb = demosaicing_CFA_Bayer_bilinear(im, pattern=self.colour_space)
            # rescale to original intensity range
            cfa_min, cfa_max = np.min(im), np.max(im)
            rgb_min, rgb_max = np.min(rgb), np.max(rgb)
            rgb = cfa_min + (cfa_max - cfa_min) * (rgb - rgb_min) / (rgb_max -
                                                                     rgb_min)
            for i, chan in enumerate(['R', 'G', 'B']):
                self.save_mono(s,
                               self.bin(rgb[:, :, i]),
                               bn,
                               filt=chan,
                               sub_type='light')

        if self.save_originals:
            Component.get('ObjectIO').save_original(path)
        else:
            Component.get('ObjectIO').delete_file(path)
コード例 #20
0
ファイル: dataset.py プロジェクト: lilong10/dmcnn-vd
 def _bilin(self, cfa):
     bilin = demosaicing_CFA_Bayer_bilinear(cfa[:, :, 0])
     return bilin
コード例 #21
0
def flip(raw_img, flip):
    if flip == 3:
        raw_img = np.rot90(raw_img, k=2)
    elif flip == 5:
        raw_img = np.rot90(raw_img, k=1)
    elif flip == 6:
        raw_img = np.rot90(raw_img, k=3)
    else:
        pass
    return raw_img


for path in dng_path:
    print("Start Processing %s" % os.path.basename(path))
    raw = rawpy.imread(path)
    file_name = path.split('/')[-1].split('.')[0]
    im = raw.postprocess(use_camera_wb=True, no_auto_bright=True)
    flip_val = raw.sizes.flip
    cwb = raw.camera_whitebalance
    raw_img = raw.raw_image_visible
    if camera_name == 'Canon EOD 5D':
        raw_img = np.maximum(raw_img - 127.0, 0)
    de_raw = colour_demosaicing.demosaicing_CFA_Bayer_bilinear(
        raw_img, Bayer_Pattern)
    de_raw = flip(de_raw, flip_val)
    rgb_img = PILImage.fromarray(im).save(rgb_target_path + file_name + '.jpg',
                                          quality=JPEG_Quality,
                                          subsampling=1)
    np.savez(raw_input_path + file_name + '.npz', raw=de_raw, wb=cwb)
コード例 #22
0
def generateDemosaicWithGaussianPoissonNoiseSTD(img, std):
    gausspois_out = generateGaussianRandomVarNoiseChannelSpecificSTD(img, std)
    tmp_mosaic = mosaicing_CFA_Bayer(gausspois_out)
    tmp_demosaic = demosaicing_CFA_Bayer_bilinear(tmp_mosaic)
    return np.clip(tmp_demosaic, 0, 1)
コード例 #23
0
        data = pickle.load(f)
        raw_r = data['raw_r']
        raw_g = data['raw_g']
        raw_b = data['raw_b']
else:
        
    raw_r = np.zeros(maxcol)
    raw_g = np.zeros(maxcol)
    raw_b = np.zeros(maxcol)


    for data, fname in ic.data( return_fname=True):
        minrow = int(config.get('MAIN','ccdrows_min'))
        maxrow = int(config.get('MAIN','ccdrows_max'))

        rgb = demosaicing_CFA_Bayer_bilinear(data)
        
        trace = rgb[minrow:maxrow,mincol:maxcol,1].sum(axis=1)
        max_i = max(trace)
        ix_line = np.where(trace > (max_i*0.1))
        minrow = np.min(ix_line)
        maxrow = np.max(ix_line)
        
        print (fname, minrow, maxrow, maxrow-minrow, max_i)
        raw_r = raw_r + rgb[minrow:maxrow,mincol:maxcol,0].sum(axis=0)
        raw_g = raw_g + rgb[minrow:maxrow,mincol:maxcol,1].sum(axis=0)
        raw_b = raw_b + rgb[minrow:maxrow,mincol:maxcol,2].sum(axis=0)
    data = {
        'raw_r': raw_r,
        'raw_g': raw_g,
        'raw_b': raw_b,
コード例 #24
0
def crop(redmiraw_file, redmiraw_img, huaweirgb_img, 
    redmiraw_gray, huaweirgb_cropped, output_root, tag):
    img1_raw = redmiraw_img
    img1 = redmiraw_gray
    img2 = huaweirgb_cropped

    img_name = os.path.split(redmiraw_file)[-1]
    img_name = os.path.splitext(img_name)[0]

    h, w = img1.shape
    stride=448 #patch
    pp_dir = os.path.join(output_root, tag+'_pairedpatches')
    if not os.path.exists(pp_dir):
        os.makedirs(pp_dir)

    # patches_dir
    patches_dir = os.path.join(output_root, tag+'_patches') 
    raw_patches_dir = os.path.join(patches_dir, tag.split('_')[1])
    rgb_patches_dir = os.path.join(patches_dir, tag.split('_')[-1])
    #print('\t@raw_patches_dir: {}'.format(raw_patches_dir))
    #print('\t@rgb_patches_dir: {}'.format(rgb_patches_dir))

    if not os.path.exists(patches_dir):
        os.makedirs(patches_dir)

    if not os.path.exists(raw_patches_dir):
        os.makedirs(raw_patches_dir)

    if not os.path.exists(rgb_patches_dir):
        os.makedirs(rgb_patches_dir)

    idx = 0
    for hc in range (0, h, stride):
        for wc in range(0, w, stride):
            if hc + 448 < h and wc + 448 < w:
                cropped1 = img1[0+hc:448+hc, 0+wc:448+wc]
                cropped2 = img2[0+hc:448+hc, 0+wc:448+wc]
                cropp1 = cropped1.reshape(cropped1.size, order='C')  # 
                cropp2 = cropped2.reshape(cropped2.size, order='C')
                #print("FileName: "+str(hc+448)+'x'+str(wc+448)+"  Score: "+str(np.corrcoef(cropp1, cropp2)[0, 1]))
                #threshold
                ssssss = np.corrcoef(cropp1, cropp2)[0, 1] 
                if ssssss > 0.9:
                    idx += 1
                    #print('\t@cropped1: ', np.shape(cropped1))
                    #print('\t@cropped2: ', np.shape(cropped1))
                    cropped_compare = np.hstack((cropped1, cropped2))
                    pp_file = os.path.join(pp_dir, '%s_%d_%.02f.jpg'%(img_name, idx, ssssss))
                    cv2.imwrite(pp_file, cropped_compare) 

                    # Save raw_patch & rgb_patch
                    cropped1_raw = img1_raw[0+hc:448+hc, 0+wc:448+wc]
                    rgb_patch = huaweirgb_img[0+hc:448+hc, 0+wc:448+wc]

                    h0 = np.shape(cropped1_raw)[0]
                    w0 = np.shape(cropped1_raw)[1]
                    h1 = np.shape(rgb_patch)[0] 
                    w1 = np.shape(rgb_patch)[1] 
 
                    if h0==448 and h1 == 448 and w0 == 448 and w1 == 448:
                        cropped1_raw = cropped1_raw.astype(np.uint16)

                        raw_patch_file = os.path.join(raw_patches_dir, '%s_%d.png'%(img_name, idx))
                        imageio.imwrite(raw_patch_file, cropped1_raw)
     
                        rgb_patch_file = os.path.join(rgb_patches_dir, '%s_%d.jpg'%(img_name, idx))
                        cv2.imwrite(rgb_patch_file, rgb_patch)
 
                        # Check patch: raw_rgb
                        #     Read redmi8 raw patch
                        raw_image = np.asarray(imageio.imread(raw_patch_file)) 
                        CFA = raw_image.astype(np.float32)

                        # pip install colour-demosaicing
                        if 'redmi' in 'redmiraw_file':
                            bayyer_pattern = 'BGGR'
                        if 'oppo' in 'redmiraw_file':
                            bayyer_pattern = 'RGGB' 
                        raw_rgb = demosaicing_CFA_Bayer_bilinear(CFA, bayyer_pattern)

                        # Check patch: raw_rgb
                        #     Read huawei mate30pro rgb patch
                        rgb = np.asarray(cv2.imread(rgb_patch_file)) 

                        #print('\t@raw_rgb: ', np.shape(raw_rgb))
                        #print('\t@rgb: ', np.shape(rgb))

                        patch_compare = np.hstack((raw_rgb, rgb))
                        pp_file = os.path.join(pp_dir, '%s_%d_patch.jpg'%(img_name, idx))
                        cv2.imwrite(pp_file, patch_compare)
コード例 #25
0
def process(filename, use_srgb=True, use_gamma=True, brightness='percentile', demosaicing='menon'):
    """
    A simple imaging pipeline implemented from scratch.
    :param filename: input RAW image
    :param use_srgb: set to False to disable camera RGB to sRGB conversion
    :param use_gamma: set to False to disable gamma correction
    :param brightness: global brightness correction method (percentile, shift or None)
    :param demosaicing: demosaicing method (menon, bilinear)
    """

    # Sanity checks
    if brightness not in ['percentile', 'shift', None]:
        raise ValueError('Unsupported brightness correction mode!')
        
    if demosaicing not in ['menon', 'bilinear']:
        raise ValueError('Unsupported demosaicing method!')
    
    with Raw(filename) as raw:
        raw.unpack()
        
        log.debug('Model : {} {}'.format(raw.metadata.make.decode(), raw.metadata.model.decode()))
        log.debug('CFA   : {}'.format(raw.color_description.decode()))    
        
        image_raw = np.array(raw.raw_image(), dtype=np.float32)

        # Normalization and calibration
        black = raw.data.contents.color.black
        saturation = raw.data.contents.color.maximum

        image_raw = image_raw.astype(np.float32)
        image_raw -= black
        
        uint14_max = 1
        image_raw *= uint14_max / (saturation - black)
        image_raw = np.clip(image_raw, 0, uint14_max)
            
        # White balancing
        cam_mul = np.array(raw.data.contents.color.cam_mul, dtype=np.float32)
        cam_mul /= cam_mul[1] # Set the multiplier for G to be 1
        
        cfa_pattern = ''.join([''.join(x) for x in raw.color_filter_array])
        
        if cfa_pattern == 'GBRG':    
            image_raw[1::2, 0::2] *= cam_mul[0]
            image_raw[0::2, 1::2] *= cam_mul[2]
        elif cfa_pattern == 'RGGB':    
            image_raw[0::2, 0::2] *= cam_mul[0]
            image_raw[1::2, 1::2] *= cam_mul[2]
        elif cfa_pattern == 'BGGR':    
            image_raw[1::2, 1::2] *= cam_mul[0]
            image_raw[0::2, 0::2] *= cam_mul[2]        
            
        image_raw = image_raw.clip(0, uint14_max)
        
        # Demosaicing
        if demosaicing == 'menon':
            image_rgb = colour_demosaicing.demosaicing_CFA_Bayer_Menon2007(image_raw, pattern=cfa_pattern)
        elif demosaicing == 'bilinear':
            image_rgb = colour_demosaicing.demosaicing_CFA_Bayer_bilinear(image_raw, pattern=cfa_pattern)
            
        # Color space conversion
        if use_srgb:
            cam2srgb = np.array(raw.data.contents.color.rgb_cam, dtype=np.float).reshape((3,4))[:, 0:3]
            
            shape = image_rgb.shape
            pixels = image_rgb.reshape(-1, 3).T
            pixels = cam2srgb.dot(pixels)
            
            image_rgb = pixels.T.reshape(shape)
            image_rgb = image_rgb.clip(0, uint14_max)
            
            # Deallocate
            del pixels
        
        # Brightness correction
        if brightness == 'percentile':
            percentile = 0.5
            image_rgb -= np.percentile(image_rgb, percentile)
            image_rgb /= np.percentile(image_rgb, 100 - percentile)
        elif brightness == 'shift':
            mult = 0.25 / np.mean(image_rgb)
            image_rgb *= mult
            
        image_rgb = image_rgb.clip(0, 1)
            
        # Gamma correction
        if use_gamma:
            image_rgb = np.power(image_rgb, 1/2.2)

        # Clip invisible pixels
        image_rgb = image_rgb[0:raw.metadata.height, 0:raw.metadata.width, :]

        # Clip & rotate canvas, if needed
        if raw.metadata.orientation == 5:
            image_rgb = np.rot90(image_rgb)
        elif raw.metadata.orientation == 6:
            image_rgb = np.rot90(image_rgb, 3)
        
    return image_rgb
コード例 #26
0
def main():

    configfile = 'processing.cfg'
    parser = argparse.ArgumentParser(
        description='Process raw RGB FITS files for specific star')
    parser.add_argument('--target',
                        dest='target',
                        default='ALPLYR',
                        help='ALPLYR|GAMCYG')
    parser.add_argument('--configfile',
                        dest='configfile',
                        default=configfile,
                        help='name for config file')
    parser.add_argument('--dispersion',
                        dest='dispersion',
                        action='store_true',
                        default=False,
                        help='find dispersion relation by marking lines')

    args = parser.parse_args()

    print(args.dispersion)
    plt.style.use(astropy_mpl_style)

    config = RawConfigParser()
    config.read(configfile)

    section = args.target
    fitsdirs = glob(config.get(section, 'datapath'))
    master = config.get(section, 'master')
    numbers = eval(config.get(section, 'numbers'))
    rawfile = config.get(section, 'raw_rgb_file')
    mincol = int(config.get('MAIN', 'ccdcols_min'))
    maxcol = int(config.get('MAIN', 'ccdcols_max'))
    minrow = int(config.get('MAIN', 'ccdrows_min'))
    maxrow = int(config.get('MAIN', 'ccdrows_max'))

    filenames = []
    for number in numbers:
        filenames.append("%s%03d%s" % (master, number, '.fit'))

    ic = ImageFileCollection(fitsdirs[0], keywords='*', filenames=filenames)

    rawfile = config.get(section, 'raw_rgb_file')

    raw_r = np.zeros(maxcol)
    raw_g = np.zeros(maxcol)
    raw_b = np.zeros(maxcol)

    for data, fname in ic.data(return_fname=True):
        minrow = int(config.get('MAIN', 'ccdrows_min'))
        maxrow = int(config.get('MAIN', 'ccdrows_max'))

        rgb = demosaicing_CFA_Bayer_bilinear(data)

        trace = rgb[minrow:maxrow, mincol:maxcol, 1].sum(axis=1)
        max_i = max(trace)

        fig = plt.figure(figsize=(14, 6))
        #plt.plot(wave_np,flux_r,'r')

        plt.plot(trace / max_i)

        plt.show()
    def synthesize_noise(self,
                         img,
                         max_s=25 / 255,
                         max_c=25 / 255,
                         min_s=0,
                         min_c=0):
        channel = img.shape[2]  # W H C: rgb
        if self.sigma_s is None:
            np.random.seed(seed=None)
            sigma_s = np.random.uniform(min_s, max_s, (1, 1, channel))
        else:
            sigma_s = self.sigma_s
        if self.sigma_c is None:
            np.random.seed(seed=None)
            sigma_c = np.random.uniform(min_c, max_c, (1, 1, channel))
        else:
            sigma_c = self.sigma_c
        if self.crf_index is None:
            np.random.seed(seed=None)
            crf_index = random.randint(0, 200)
        else:
            crf_index = self.crf_index
        if self.pattern is None:
            np.random.seed(seed=None)
            pattern = random.randint(0, 5)
        else:
            pattern = self.pattern

        I = self.data_I_B['I'][crf_index, :].tolist()
        B = self.data_I_B['B'][crf_index, :].tolist()
        invI = self.data_invI_invB['invI'][crf_index, :].tolist()
        invB = self.data_invI_invB['invB'][crf_index, :].tolist()

        # x-->L
        temp_x = self.ICRF_Map(img, invI, invB)

        # adding noise
        noise_s_map = np.tile(sigma_s,
                              (temp_x.shape[0], temp_x.shape[1], 1)) * temp_x
        if self.mode == 'Test':
            np.random.seed(seed=0)  # for reproducibility
            noise_s = np.random.normal(0, 1, temp_x.shape) * noise_s_map
        else:
            np.random.seed(seed=None)
            noise_s = np.random.normal(0, 1, temp_x.shape) * noise_s_map

        noise_c_map = np.tile(sigma_c, (temp_x.shape[0], temp_x.shape[1], 1))
        if self.mode == 'Test':
            np.random.seed(seed=0)  # for reproducibility
            noise_c = np.random.normal(0, 1, temp_x.shape) * noise_c_map
        else:
            np.random.seed(seed=None)
            noise_c = np.random.normal(0, 1, temp_x.shape) * noise_c_map

        temp_n = temp_x + noise_s + noise_c
        noise_map = np.sqrt(noise_s_map + noise_c_map)

        if self.corr:
            # L-->x
            temp_x = self.CRF_Map(temp_x, I, B)

            # add Mosai
            if pattern == 0:
                B_b_x = mosaicing_CFA_Bayer(temp_x, 'GBRG')
            elif pattern == 1:
                B_b_x = mosaicing_CFA_Bayer(temp_x, 'GRBG')
            elif pattern == 2:
                B_b_x = mosaicing_CFA_Bayer(temp_x, 'BGGR')
            elif pattern == 3:
                B_b_x = mosaicing_CFA_Bayer(temp_x, 'RGGB')
            else:
                B_b_x = temp_x
            temp_x = B_b_x

            # DeMosai
            if pattern == 0:
                lin_rgb_x = demosaicing_CFA_Bayer_bilinear(temp_x, 'GBRG')
            elif pattern == 1:
                lin_rgb_x = demosaicing_CFA_Bayer_bilinear(temp_x, 'GRBG')
            elif pattern == 2:
                lin_rgb_x = demosaicing_CFA_Bayer_bilinear(temp_x, 'BGGR')
            elif pattern == 3:
                lin_rgb_x = demosaicing_CFA_Bayer_bilinear(temp_x, 'RGGB')
            else:
                lin_rgb_x = temp_x
            temp_x = lin_rgb_x

        # L-->x
        temp_n = self.CRF_Map(temp_n, I, B)

        # add Mosai
        if pattern == 0:
            B_b_n = mosaicing_CFA_Bayer(temp_n, 'GBRG')
        elif pattern == 1:
            B_b_n = mosaicing_CFA_Bayer(temp_n, 'GRBG')
        elif pattern == 2:
            B_b_n = mosaicing_CFA_Bayer(temp_n, 'BGGR')
        elif pattern == 3:
            B_b_n = mosaicing_CFA_Bayer(temp_n, 'RGGB')
        else:
            B_b_n = temp_n
        temp_n = B_b_n

        # DeMosai
        if pattern == 0:
            lin_rgb_n = demosaicing_CFA_Bayer_bilinear(temp_n, 'GBRG')
        elif pattern == 1:
            lin_rgb_n = demosaicing_CFA_Bayer_bilinear(temp_n, 'GRBG')
        elif pattern == 2:
            lin_rgb_n = demosaicing_CFA_Bayer_bilinear(temp_n, 'BGGR')
        elif pattern == 3:
            lin_rgb_n = demosaicing_CFA_Bayer_bilinear(temp_n, 'RGGB')
        else:
            lin_rgb_n = temp_n
        temp_n = lin_rgb_n

        if self.corr:
            y = temp_n - temp_x + img
        else:
            y = temp_n
        return y, noise_map