コード例 #1
0
    def handle_pushButton_cmos_snr_linearity_clicked(self, checked):
        print("cmos_snr_linearity pushButton clicked", self)
        i = 0
        filePath = QFileDialog.getExistingDirectory(self.ui, "选择存储路径")
        for root, dirs, files in os.walk(filePath):
            for f in files:
                i = i + 1  # 统计文件夹内总共有几个文件
        print("i=", i)
        if i < 2:  # 少于2个文件,无法测试线性化程度
            QMessageBox.warning(
                self.ui,
                '文件太少',
                '文件夹内文件数目必须大于1个')
            return

        filename = [0] * i  # 初始化成员为i个的一个列表
        Rvalue = [0] * i
        GRvalue = [0] * i
        GBvalue = [0] * i
        Bvalue = [0] * i

        width = int(self.ui.lineEdit_cmos_snr_width.text())
        height = int(self.ui.lineEdit_cmos_snr_height.text())
        w_percent = int(self.ui.lineEdit_cmos_snr_w_percent.text()) / 100
        h_percent = int(self.ui.lineEdit_cmos_snr_h_percent.text()) / 100
        pattern = self.ui.comboBox_cmos_snr_cfa.currentText()
        dataformat = self.ui.comboBox_cmos_snr_dataformat.currentText()
        shift_bits = int(self.ui.comboBox_cmos_snr_bitshift.currentText())
        WidthBorder = round((1 - w_percent) * width / 4)
        HeightBorder = round((1 - h_percent) * height / 4)
        # print("WidthBorder:", WidthBorder, HeightBorder, (width / 2 - WidthBorder), (height / 2 - HeightBorder))
        i = 0
        for root, dirs, files in os.walk(filePath):
            for f in files:
                filename[i] = os.path.join(root, f)  # 将文件名填写到列表中
                iamge = rawimage.read_plained_file(filename[i], "uint16", width, height, dataformat)
                R, GR, GB, B, G = rawimage.bayer_channel_separation(iamge, pattern)
                R = R[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                GR = GR[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                GB = GB[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                B = B[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                # print("shape:", np.shape(R))
                Rvalue[i] = np.mean(R)
                GRvalue[i] = np.mean(GR)
                GBvalue[i] = np.mean(GB)
                Bvalue[i] = np.mean(B)

                print(filename[i])
                i = i + 1
        print("len = ", len(filename))
        x = np.arange(0, i)
        plt.plot(x, Rvalue, "r", label="R")
        plt.plot(x, GRvalue, "g", label="GR")
        plt.plot(x, GBvalue, "c", label="GB")
        plt.plot(x, Bvalue, "b", label="B")

        plt.title("Linearity")
        plt.legend(loc="lower right")
        plt.show()
        self.ui.close()
コード例 #2
0
    def handle_pushButton_prnu_calculate_clicked(self, checked):
        print("cmos_prnu_calculate pushButton clicked", self)
        print("path1:\n", self.filePath1)
        print("path2:\n", self.filePath2)

        width = int(self.ui.lineEdit_prnu_width.text())
        height = int(self.ui.lineEdit_prnu_height.text())
        widthpercent = int(self.ui.lineEdit_prnu_widthpercent.text()) / 100
        heightpercent = int(self.ui.lineEdit_prnu_heightpercent.text()) / 100
        filtersize = int(self.ui.lineEdit_prnu_filter_size.text())

        sensorbit = int(self.ui.comboBox_prnu_sensorbit.currentText())
        pattern = self.ui.comboBox_prnu_cfa.currentText()

        image1 = rawimage.read_plained_file(self.filePath1, 'uint16', width,
                                            height, 'ieee-le')
        image2 = rawimage.read_plained_file(self.filePath2, 'uint16', width,
                                            height, 'ieee-le')
        image = image1 - image2
        R, GR, GB, B, G = rawimage.bayer_channel_separation(image, pattern)
        WidthBorder = round((1 - widthpercent) * width / 4)
        HeightBorder = round((1 - heightpercent) * height / 4)
        G = G[HeightBorder:int(height / 2 - HeightBorder),
              WidthBorder:int(width / 2 - WidthBorder)]
        filter1 = np.ones(
            (2 * filtersize + 1, 2 * filtersize + 1)) / ((2 * filtersize + 1) *
                                                         (2 * filtersize + 1))
        filter_im = signal.convolve(G, filter1)

        std_G = np.std(filter_im)
        mean_G = np.mean(G)
        prnu_value = 100 * std_G / mean_G
        self.ui.lineEdit_prnu_value.setText(str(format(prnu_value, '.2f')))
コード例 #3
0
    def handle_pushButton_sensitivity_calculate_clicked(self, checked):
        print("cmos_sensitivity_calculate pushButton clicked", self)
        print("path1:\n", self.filePath1)
        print("path2:\n", self.filePath2)

        width = int(self.ui.lineEdit_sensitivity_width.text())
        height = int(self.ui.lineEdit_sensitivity_height.text())
        widthpercent = int(
            self.ui.lineEdit_sensitivity_widthpercent.text()) / 100
        heightpercent = int(
            self.ui.lineEdit_sensitivity_heightpercent.text()) / 100
        bright = int(self.ui.lineEdit_sensitivity_bright.text())
        time_int = int(self.ui.lineEdit_sensitivity_time_int.text())

        sensorbit = int(self.ui.comboBox_sensitivity_sensorbit.currentText())
        pattern = self.ui.comboBox_sensitivity_cfa.currentText()

        image_white = rawimage.read_plained_file(self.filePath1, 'uint16',
                                                 width, height, 'ieee-le')
        image_black = rawimage.read_plained_file(self.filePath2, 'uint16',
                                                 width, height, 'ieee-le')
        image = image_white - image_black
        R, GR, GB, B, G = rawimage.bayer_channel_separation(image, pattern)
        WidthBorder = round((1 - widthpercent) * width / 4)
        HeightBorder = round((1 - heightpercent) * height / 4)
        G = G[HeightBorder:int(height / 2 - HeightBorder),
              WidthBorder:int(width / 2 - WidthBorder)]
        G_mean = np.mean(G)
        fsd = pow(2, sensorbit) - 1
        sensitivity_value = G_mean / (bright * fsd / time_int)
        self.ui.lineEdit_sensitivity_value.setText(
            str(format(sensitivity_value, '.7f')))
コード例 #4
0
    def handle_pushButton_dsnu_calculate_clicked(self, checked):
        print("cmos_dsnu_calculate pushButton clicked", self)
        print("path1:\n", self.filePath1)
        print("path2:\n", self.filePath2)

        width = int(self.ui.lineEdit_dsnu_width.text())
        height = int(self.ui.lineEdit_dsnu_height.text())
        widthpercent = int(self.ui.lineEdit_dsnu_widthpercent.text()) / 100
        heightpercent = int(self.ui.lineEdit_dsnu_heightpercent.text()) / 100
        time_image1 = int(self.ui.lineEdit_dsnu_time_image1.text())
        time_image2 = int(self.ui.lineEdit_dsnu_time_image2.text())

        sensorbit = int(self.ui.comboBox_dsnu_sensorbit.currentText())
        pattern = self.ui.comboBox_dsnu_cfa.currentText()

        image1 = rawimage.read_plained_file(self.filePath1, 'uint16', width,
                                            height, 'ieee-le')
        image2 = rawimage.read_plained_file(self.filePath2, 'uint16', width,
                                            height, 'ieee-le')
        image = image1 - image2
        R, GR, GB, B, G = rawimage.bayer_channel_separation(image, pattern)
        WidthBorder = round((1 - widthpercent) * width / 4)
        HeightBorder = round((1 - heightpercent) * height / 4)
        G = G[HeightBorder:int(height / 2 - HeightBorder),
              WidthBorder:int(width / 2 - WidthBorder)]
        std_G = np.std(G)
        fsd = pow(2, sensorbit) - 1
        dsnu_value = 1000 * std_G / ((time_image1 - time_image2) * fsd)
        self.ui.lineEdit_dsnu_value.setText(str(format(dsnu_value, '.7f')))
コード例 #5
0
def bayer_average(image, pattern):
    R, GR, GB, B = rawimage.bayer_channel_separation(image, pattern)
    R_a = mono_average(R)
    GR_a = mono_average(GR)
    GB_a = mono_average(GB)
    B_a = mono_average(B)
    return R_a, GR_a, GB_a, B_a
コード例 #6
0
def bayer_cumuhistogram(image, pattern, max1):
    R, GR, GB, B = rawimage.bayer_channel_separation(image, pattern)
    R_hist = mono_cumuhistogram(R, max1)
    GR_hist = mono_cumuhistogram(GR, max1)
    GB_hist = mono_cumuhistogram(GB, max1)
    B_hist = mono_cumuhistogram(B, max1)
    return R_hist, GR_hist, GB_hist, B_hist
コード例 #7
0
ファイル: cj_lsc.py プロジェクト: XiaoZhuXinYu/ISPcode-python
def apply_shading_to_image(img, block_size, shading_R, shading_GR, shading_GB,
                           shading_B, pattern, ratio):
    # 用G做luma
    luma_shading = (shading_GR + shading_GB) / 2
    # 计算color shading
    R_color_shading = shading_R / luma_shading
    GR_color_shading = shading_GR / luma_shading
    GB_color_shading = shading_GB / luma_shading
    B_color_shading = shading_B / luma_shading
    # 计算调整之后luma shading
    new_luma_shading = (luma_shading - 1) * ratio + 1
    # 合并两种shading
    new_shading_R = R_color_shading * new_luma_shading
    new_shading_GR = GR_color_shading * new_luma_shading
    new_shading_GB = GB_color_shading * new_luma_shading
    new_shading_B = B_color_shading * new_luma_shading

    R, GR, GB, B = rawimage.bayer_channel_separation(img, pattern)
    HH, HW = R.shape
    size_new = (HW + block_size, HH + block_size)
    # 插值的方法的选择
    ex_R_gain_map = cv.resize(new_shading_R,
                              size_new,
                              interpolation=cv.INTER_CUBIC)
    ex_GR_gain_map = cv.resize(new_shading_GR,
                               size_new,
                               interpolation=cv.INTER_CUBIC)
    ex_GB_gain_map = cv.resize(new_shading_GB,
                               size_new,
                               interpolation=cv.INTER_CUBIC)
    ex_B_gain_map = cv.resize(new_shading_B,
                              size_new,
                              interpolation=cv.INTER_CUBIC)
    # 裁剪到原图大小
    half_b_size = int(block_size / 2)
    R_gain_map = ex_R_gain_map[half_b_size:half_b_size + HH,
                               half_b_size:half_b_size + HW]
    GR_gain_map = ex_GR_gain_map[half_b_size:half_b_size + HH,
                                 half_b_size:half_b_size + HW]
    GB_gain_map = ex_GB_gain_map[half_b_size:half_b_size + HH,
                                 half_b_size:half_b_size + HW]
    B_gain_map = ex_B_gain_map[half_b_size:half_b_size + HH,
                               half_b_size:half_b_size + HW]

    R_new = R * R_gain_map
    GR_new = GR * GR_gain_map
    GB_new = GB * GB_gain_map
    B_new = B * B_gain_map

    new_image = rawimage.bayer_channel_integration(R_new, GR_new, GB_new,
                                                   B_new, pattern)
    # 值缩减到0~1023
    new_image = np.clip(new_image, a_min=0, a_max=1023)
    return new_image
コード例 #8
0
def raw_white_balance(image, type, sensorbit, pattern):
    if sensorbit == 10:
        smax = 1023
    elif sensorbit == 12:
        smax = 4095
    else:
        smax = 255

    R, GR, GB, B, G = rawimage.bayer_channel_separation(image, pattern)
    if type == "grey_world":
        R_gain, G_gain, B_gain = grey_world(R, G, B)
    elif type == "auto_threshold":
        R_gain, G_gain, B_gain = auto_threshold(R, G, B)
    elif type == "grey_world2":
        R_gain, G_gain, B_gain = grey_edge(R, G, B, njet=0, mink_norm=1, sigma=0, saturation_threshold=smax)
    elif type == "shade_of_grey":
        R_gain, G_gain, B_gain = grey_edge(R, G, B, njet=0, mink_norm=5, sigma=0, saturation_threshold=smax)
    elif type == "max_RGB":
        R_gain, G_gain, B_gain = grey_edge(R, G, B, njet=0, mink_norm=-1, sigma=0, saturation_threshold=smax)
    elif type == "grey_edge":
        R_gain, G_gain, B_gain = grey_edge(R, G, B, njet=1, mink_norm=5, sigma=2, saturation_threshold=smax)

    result_image = apply_raw(pattern, R, GR, GB, B, R_gain, G_gain, B_gain, smax)

    # rawimage.show_planedraw(result_image, width=4032, height=2742, pattern="gray", sensorbit=10, compress_ratio=1)
    # rawimage.show_planedraw(result_image, width=4032, height=2752, pattern="GRBG", sensorbit=10, compress_ratio=1)

    h, w = R.shape
    img = np.zeros(shape=(h, w, 3))
    img2 = np.zeros(shape=(h, w, 3))
    img[:, :, 0] = R
    img[:, :, 1] = G
    img[:, :, 2] = B
    R2, GR2, GB2, B2, G2 = rawimage.bayer_channel_separation(result_image, pattern)
    img2[:, :, 0] = R2
    img2[:, :, 1] = GR2
    img2[:, :, 2] = B2

    rawimage.show_planedraw(img, w, h, pattern="color", sensorbit=10, compress_ratio=1)
    rawimage.show_planedraw(img2, w, h, pattern="color", sensorbit=10, compress_ratio=1)
コード例 #9
0
ファイル: cj_lsc.py プロジェクト: XiaoZhuXinYu/ISPcode-python
def apply_shading_to_image_base(img, block_size, shading_R, shading_GR,
                                shading_GB, shading_B, pattern, ratio):
    # 用G做luma
    luma_shading = (shading_GR + shading_GB) / 2
    # 计算color shading
    R_color_shading = shading_R / luma_shading
    GR_color_shading = shading_GR / luma_shading
    GB_color_shading = shading_GB / luma_shading
    B_color_shading = shading_B / luma_shading
    # 计算调整之后luma shading
    new_luma_shading = (luma_shading - 1) * ratio + 1
    # 合并两种shading
    new_shading_R = R_color_shading * new_luma_shading
    print("new_shading_R.shape:", new_shading_R.shape,
          "R_color_shading.shape:", R_color_shading.shape,
          "new_luma_shading.shape:", new_luma_shading.shape)
    new_shading_GR = GR_color_shading * new_luma_shading
    new_shading_GB = GB_color_shading * new_luma_shading
    new_shading_B = B_color_shading * new_luma_shading

    R, GR, GB, B = rawimage.bayer_channel_separation(img, pattern)
    HH, HW = R.shape
    size_new = (HW, HH)  # 注意opencv和python的不同
    # 插值的方法的选择
    ex_R_gain_map = cv.resize(new_shading_R,
                              size_new,
                              interpolation=cv.INTER_CUBIC)
    ex_GR_gain_map = cv.resize(new_shading_GR,
                               size_new,
                               interpolation=cv.INTER_CUBIC)
    ex_GB_gain_map = cv.resize(new_shading_GB,
                               size_new,
                               interpolation=cv.INTER_CUBIC)
    ex_B_gain_map = cv.resize(new_shading_B,
                              size_new,
                              interpolation=cv.INTER_CUBIC)

    R_new = R * ex_R_gain_map
    print("R_new.shape:", R_new.shape, "R.shape:", R.shape,
          "ex_R_gain_map.shape:", ex_R_gain_map.shape)

    GR_new = GR * ex_GR_gain_map
    GB_new = GB * ex_GB_gain_map
    B_new = B * ex_B_gain_map

    new_image = rawimage.bayer_channel_integration(R_new, GR_new, GB_new,
                                                   B_new, pattern)
    # 值缩减到0~1023
    new_image = np.clip(new_image, a_min=0, a_max=1023)
    return new_image
コード例 #10
0
    def handle_pushButton_temporal_noise_calculate_clicked(self, checked):
        print("cmos_temporal_noise_calculate pushButton clicked", self)
        i = 0
        filePath = QFileDialog.getExistingDirectory(self.ui, "选择存储路径")
        for root, dirs, files in os.walk(filePath):
            for f in files:
                i = i + 1  # 统计文件夹内总共有几个文件
        # print("i=", i)
        if i == 0:  # 没有加载文件夹
            return

        img_num = i
        filename = [0] * img_num  # 初始化成员为i个的一个列表
        width = int(self.ui.lineEdit_temporal_noise_width.text())
        height = int(self.ui.lineEdit_temporal_noise_height.text())
        roi_x = int(self.ui.lineEdit_temporal_noise_start_x.text())
        roi_y = int(self.ui.lineEdit_temporal_noise_start_y.text())
        roi_width = int(self.ui.lineEdit_temporal_noise_roi_width.text())
        roi_height = int(self.ui.lineEdit_temporal_noise_roi_height.text())

        bit_shift = int(self.ui.comboBox_temporal_noise_bitshift.currentText())
        pattern = self.ui.comboBox_temporal_noise_cfa.currentText()
        dataformat = self.ui.comboBox_temporal_noise_dataformat.currentText()
        inputformat = self.ui.comboBox_temporal_noise_inputformat.currentText()
        Rvalue = np.empty((img_num, roi_height, roi_width))
        i = 0
        for root, dirs, files in os.walk(filePath):
            for f in files:
                filename[i] = os.path.join(root, f)  # 将文件名填写到列表中
                image = rawimage.read_plained_file(filename[i], inputformat,
                                                   width, height, dataformat)
                R, GR, GB, B, G = rawimage.bayer_channel_separation(
                    image, pattern)
                Rvalue[i] = R[roi_y:(roi_y + roi_height),
                              roi_x:(roi_x + roi_width)]
                i = i + 1
        var_Rvalue = Rvalue.var(axis=0)  # 不同图像同一位置的像素求方差
        mean_std = math.sqrt(
            np.sum(np.sum(var_Rvalue, axis=1), axis=0) /
            (roi_width * roi_height))
        fsd = pow(2, (16 + bit_shift)) - 1
        db = 20 * np.log10(mean_std / fsd)
        db = format(db, '.2f')
        mean_std = format(mean_std, '.2f')
        self.ui.lineEdit_temporal_noise_emva1288.setText(str(mean_std))
        self.ui.lineEdit_temporal_noise_smia.setText(str(db))
コード例 #11
0
def binning_image(image, height, width, block_size_h, block_size_w, pattern):
    region_h_n = int(height / block_size_h)
    region_w_n = int(width / block_size_w)
    binning_image1 = np.empty((region_h_n * 2, region_w_n * 2),
                              dtype=np.float32)
    x1 = 0
    y1 = 0
    for j in range(region_h_n):
        for i in range(region_w_n):
            region_data = get_region(image, y1, x1, block_size_h, block_size_w)
            R, GR, GB, B = rawimage.bayer_channel_separation(
                region_data, pattern)
            binning_image1[j * 2, i * 2] = np.mean(R)
            binning_image1[j * 2, (i * 2) + 1] = np.mean(GR)
            binning_image1[(j * 2) + 1, i * 2] = np.mean(GB)
            binning_image1[(j * 2) + 1, (i * 2) + 1] = np.mean(B)
            x1 = x1 + block_size_w
        y1 = y1 + block_size_h
        x1 = 0
    return binning_image1
コード例 #12
0
    def handle_pushButton_cmos_snr_snr_clicked(self, checked):
        print("cmos_snr_snr pushButton clicked", self)
        i = 0
        filePath = QFileDialog.getExistingDirectory(self.ui, "选择存储路径")
        for root, dirs, files in os.walk(filePath):
            for f in files:
                i = i + 1  # 统计文件夹内总共有几个文件
        print("i=", i)
        if i == 0:  # 没有加载文件夹
            return

        filename = [0] * i  # 初始化成员为i个的一个列表
        Rvalue = [0] * i
        RNoise = [0] * i
        R_SNR = [0] * i
        GRvalue = [0] * i
        GRNoise = [0] * i
        GR_SNR = [0] * i
        GBvalue = [0] * i
        GBNoise = [0] * i
        GB_SNR = [0] * i
        Bvalue = [0] * i
        BNoise = [0] * i
        B_SNR = [0] * i
        width = int(self.ui.lineEdit_cmos_snr_width.text())
        height = int(self.ui.lineEdit_cmos_snr_height.text())
        w_percent = int(self.ui.lineEdit_cmos_snr_w_percent.text()) / 100
        h_percent = int(self.ui.lineEdit_cmos_snr_h_percent.text()) / 100
        pattern = self.ui.comboBox_cmos_snr_cfa.currentText()
        dataformat = self.ui.comboBox_cmos_snr_dataformat.currentText()
        shift_bits = int(self.ui.comboBox_cmos_snr_bitshift.currentText())
        WidthBorder = round((1 - w_percent) * width / 4)
        HeightBorder = round((1 - h_percent) * height / 4)
        # print("WidthBorder:", WidthBorder, HeightBorder, (width / 2 - WidthBorder), (height / 2 - HeightBorder))
        i = 0
        for root, dirs, files in os.walk(filePath):
            for f in files:
                filename[i] = os.path.join(root, f)  # 将文件名填写到列表中
                iamge = rawimage.read_plained_file(filename[i], "uint16", width, height, dataformat)
                R, GR, GB, B, G = rawimage.bayer_channel_separation(iamge, pattern)
                R = R[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                GR = GR[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                GB = GB[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                B = B[HeightBorder:int(height / 2 - HeightBorder), WidthBorder:int(width / 2 - WidthBorder)]
                # print("shape:", np.shape(R))
                Rvalue[i] = np.mean(R)
                RNoise[i] = np.std(R)
                R_SNR[i] = Rvalue[i] / RNoise[i]
                GRvalue[i] = np.mean(GR)
                GRNoise[i] = np.std(GR)
                GR_SNR[i] = GRvalue[i] / GRNoise[i]
                GBvalue[i] = np.mean(GB)
                GBNoise[i] = np.std(GB)
                GB_SNR[i] = GBvalue[i] / GBNoise[i]
                Bvalue[i] = np.mean(B)
                BNoise[i] = np.std(B)
                B_SNR[i] = Bvalue[i] / BNoise[i]
                print(filename[i])
                i = i + 1
        print("len = ", len(filename))
        if i > 1:
            x = np.arange(0, i)
            plt.plot(x, R_SNR, "r", label="R")
            plt.plot(x, GR_SNR, "g", label="GR")
            plt.plot(x, GB_SNR, "c", label="GB")
            plt.plot(x, B_SNR, "b", label="B")
        else:
            plt.scatter(1, R_SNR[0], color="r", label="R", linewidth=3)
            plt.scatter(2, GR_SNR[0], color="g", label="GR", linewidth=3)
            plt.scatter(3, GB_SNR[0], color="c", label="GB", linewidth=3)
            plt.scatter(4, B_SNR[0], color="b", label="B", linewidth=3)

        plt.title("SNR")
        plt.legend(loc="lower right")
        plt.show()
        self.ui.close()
コード例 #13
0
    def handle_pushButton_fpn_calculate_clicked(self, checked):
        print("cmos_fpn_calculate pushButton clicked", self)
        i = 0
        filePath = QFileDialog.getExistingDirectory(self.ui, "选择存储路径")
        for root, dirs, files in os.walk(filePath):
            for f in files:
                i = i + 1  # 统计文件夹内总共有几个文件
        # print("i=", i)
        if i == 0:  # 没有加载文件夹
            return

        img_num = i
        filename = [0] * img_num  # 初始化成员为i个的一个列表
        width = int(self.ui.lineEdit_fpn_width.text())
        height = int(self.ui.lineEdit_fpn_height.text())
        roi_x = int(self.ui.lineEdit_fpn_start_x.text())
        roi_y = int(self.ui.lineEdit_fpn_start_y.text())
        roi_width = int(self.ui.lineEdit_fpn_roi_width.text())
        roi_height = int(self.ui.lineEdit_fpn_roi_height.text())

        bit_shift = int(self.ui.comboBox_fpn_bitshift.currentText())
        pattern = self.ui.comboBox_fpn_cfa.currentText()
        dataformat = self.ui.comboBox_fpn_dataformat.currentText()
        inputformat = self.ui.comboBox_fpn_inputformat.currentText()
        Rvalue = np.zeros((roi_height, roi_width), np.int)
        average_Rvalue = np.zeros((roi_height, roi_width), np.int)
        i = 0
        for root, dirs, files in os.walk(filePath):
            for f in files:
                filename[i] = os.path.join(root, f)  # 将文件名填写到列表中
                image = rawimage.read_plained_file(filename[i], inputformat,
                                                   width, height, dataformat)
                R, GR, GB, B, G = rawimage.bayer_channel_separation(
                    image, pattern)
                Rvalue = R[roi_y:(roi_y + roi_height),
                           roi_x:(roi_x + roi_width)]
                average_Rvalue = average_Rvalue + Rvalue
                # print("Rvalue = \n", Rvalue[0][0])
                # print("average_Rvalue = \n",  average_Rvalue[0][0])
                i = i + 1
        average_Rvalue = average_Rvalue / i
        # print("shape = ", average_Rvalue)
        Noise_tol = np.std(Rvalue)  # 所有元素参加计算标准差
        FPN_total = np.std(average_Rvalue)
        # print("Noise_tol = ", Noise_tol, str(Noise_tol))
        # print("FPN_total = ", FPN_total, str(FPN_total))
        self.ui.lineEdit_fpn_total.setText(str(format(FPN_total, '.7f')))
        self.ui.lineEdit_fpn_temporal_noise.setText(
            str(format(Noise_tol - FPN_total, '.7f')))

        column_array = np.mean(average_Rvalue, axis=0)  # axis=0,计算每一列的均值
        row_array = np.mean(average_Rvalue, axis=1)  # axis=1,计算每一行的均值
        sum_c = 0
        sum_r = 0
        # print("shape:", column_array.shape[0], row_array.shape)
        for i in range(0, column_array.shape[0] - 1):
            sum_c = sum_c + pow((column_array[i] - column_array[i + 1]), 2)
        for i in range(0, row_array.shape[0] - 1):
            sum_r = sum_r + pow((row_array[i] - row_array[i + 1]), 2)

        fsd = pow(2, (16 + bit_shift)) - 1
        fpn_v_level = np.sqrt(sum_c / (column_array.shape[0] - 1)) / fsd
        fpn_h_level = np.sqrt(sum_r / (row_array.shape[0] - 1)) / fsd
        KERNEL = np.array([-1, -1, -1, -1, -1, 10, -1, -1, -1, -1, -1]) / 10
        fpn_v_max = max(signal.convolve(column_array, KERNEL)) / fsd
        fpn_h_max = max(signal.convolve(row_array, KERNEL)) / fsd
        self.ui.lineEdit_fpn_v_level.setText(str(format(fpn_v_level, '.7f')))
        self.ui.lineEdit_fpn_h_level.setText(str(format(fpn_h_level, '.7f')))
        self.ui.lineEdit_fpn_v_max.setText(str(format(fpn_v_max, '.7f')))
        self.ui.lineEdit_fpn_h_max.setText(str(format(fpn_h_max, '.7f')))
コード例 #14
0
ファイル: cj_lsc.py プロジェクト: XiaoZhuXinYu/ISPcode-python
def create_lsc_data(img, block_size, pattern):
    # 分开四个颜色通道
    R, GR, GB, B = rawimage.bayer_channel_separation(img, pattern)
    # print(img.shape, R.shape)

    # 每张的高宽
    HH, HW = R.shape

    # 生成分多少块
    Hblocks = int(HH / block_size)
    Wblocks = int(HW / block_size)

    # 整个图像被分成 Hblocks * Wblocks 块,生成一个 Hblocks * Wblocks 的矩阵,用于数据储存。
    R_LSC_data = np.zeros((Hblocks, Wblocks))  # 每个块 R 的平均值
    B_LSC_data = np.zeros((Hblocks, Wblocks))
    GR_LSC_data = np.zeros((Hblocks, Wblocks))
    GB_LSC_data = np.zeros((Hblocks, Wblocks))

    # 块距离光心的距离
    RA = np.zeros((Hblocks, Wblocks))

    # 计算每个块的平均值
    for y in range(0, HH, block_size):
        for x in range(0, HW, block_size):
            block_y_num = int(y / block_size)
            block_x_num = int(x / block_size)
            R_LSC_data[block_y_num,
                       block_x_num] = R[y:y + block_size, x:x +
                                        block_size].mean()  # 对一块block数据求平均
            GR_LSC_data[block_y_num,
                        block_x_num] = GR[y:y + block_size,
                                          x:x + block_size].mean()
            GB_LSC_data[block_y_num,
                        block_x_num] = GB[y:y + block_size,
                                          x:x + block_size].mean()
            B_LSC_data[block_y_num, block_x_num] = B[y:y + block_size,
                                                     x:x + block_size].mean()

    # 根据GR的最大值,寻找真正的光心块
    center_point = np.where(GR_LSC_data == np.max(GR_LSC_data))
    center_y = center_point[0] * block_size + block_size / 2
    center_x = center_point[1] * block_size + block_size / 2

    # 计算块距离光心的距离
    for y in range(0, HH, block_size):
        for x in range(0, HW, block_size):
            xx = x + block_size / 2
            yy = y + block_size / 2
            block_y_num = int(y / block_size)
            block_x_num = int(x / block_size)
            RA[block_y_num, block_x_num] = (yy - center_y) * (
                yy - center_y) + (xx - center_x) * (xx - center_x)

    # 4个颜色数据通道展平,便于数据进行拟合,RA_flatten相当于x,R_LSC_data_flatten和其他三个相当于y
    RA_flatten = RA.flatten()
    R_LSC_data_flatten = R_LSC_data.flatten()
    GR_LSC_data_flatten = GR_LSC_data.flatten()
    GB_LSC_data_flatten = GB_LSC_data.flatten()
    B_LSC_data_flatten = B_LSC_data.flatten()

    # 最亮块的值
    Max_R = np.max(R_LSC_data_flatten)
    Max_GR = np.max(GR_LSC_data_flatten)
    Max_GB = np.max(GB_LSC_data_flatten)
    Max_B = np.max(B_LSC_data_flatten)

    # 得到gain,还没有外插
    G_R_LSC_data = Max_R / R_LSC_data_flatten
    G_GR_LSC_data = Max_GR / GR_LSC_data_flatten
    G_GB_LSC_data = Max_GB / GB_LSC_data_flatten
    G_B_LSC_data = Max_B / B_LSC_data_flatten

    # gain
    plt.scatter(RA_flatten, G_R_LSC_data, color='red')
    plt.scatter(RA_flatten, G_GR_LSC_data, color='green')
    plt.scatter(RA_flatten, G_GB_LSC_data, color='green')
    plt.scatter(RA_flatten, G_B_LSC_data, color='blue')
    plt.show()

    # 进行gain曲线拟合拟合
    par_R = np.polyfit(RA_flatten, G_R_LSC_data, 3)
    par_GR = np.polyfit(RA_flatten, G_GR_LSC_data, 3)
    par_GB = np.polyfit(RA_flatten, G_GB_LSC_data, 3)
    par_B = np.polyfit(RA_flatten, G_B_LSC_data, 3)

    # 拟合之后生成所有点的值
    ES_R = par_R[0] * (RA_flatten**3) + par_R[1] * (
        RA_flatten**2) + par_R[2] * RA_flatten + par_R[3]
    ES_GR = par_GR[0] * (RA_flatten**3) + par_GR[1] * (
        RA_flatten**2) + par_GR[2] * RA_flatten + par_GR[3]
    ES_GB = par_GB[0] * (RA_flatten**3) + par_GB[1] * (
        RA_flatten**2) + par_GB[2] * RA_flatten + par_GB[3]
    ES_B = par_B[0] * (RA_flatten**3) + par_B[1] * (
        RA_flatten**2) + par_B[2] * RA_flatten + par_B[3]
    # 拟合数据和原有数据有什么不同
    plt.scatter(RA_flatten, ES_R, color='red')
    plt.scatter(RA_flatten, ES_GR, color='green')
    plt.scatter(RA_flatten, ES_GB, color='green')
    plt.scatter(RA_flatten, ES_B, color='blue')
    plt.show()

    # 外插补偿的gain通过曲线拟合得到,这边进行外插主要是考虑有些图像的分辨率不能被block整除
    EX_RA = np.zeros((Hblocks + 2, Wblocks + 2))
    EX_R = np.zeros((Hblocks + 2, Wblocks + 2))
    EX_GR = np.zeros((Hblocks + 2, Wblocks + 2))
    EX_GB = np.zeros((Hblocks + 2, Wblocks + 2))
    EX_B = np.zeros((Hblocks + 2, Wblocks + 2))
    new_center_y = center_point[0] + 1
    new_center_x = center_point[1] + 1
    for y in range(0, Hblocks + 2):
        for x in range(0, Wblocks + 2):
            EX_RA[y, x] = (y - new_center_y) * block_size * (
                y - new_center_y) * block_size + (
                    x - new_center_x) * block_size * (
                        x - new_center_x) * block_size
            EX_R[y, x] = par_R[0] * (EX_RA[y, x] ** 3) + par_R[1] * (EX_RA[y, x] ** 2) + par_R[2] * (EX_RA[y, x]) + \
                         par_R[3]

            EX_GR[y, x] = par_GR[0] * (EX_RA[y, x] ** 3) + par_GR[1] * (EX_RA[y, x] ** 2) + par_GR[2] * (EX_RA[y, x]) + \
                          par_GR[3]

            EX_GB[y, x] = par_GB[0] * (EX_RA[y, x] ** 3) + par_GB[1] * (EX_RA[y, x] ** 2) + par_GB[2] * (EX_RA[y, x]) + \
                          par_GB[3]

            EX_B[y, x] = par_B[0] * (EX_RA[y, x] ** 3) + par_B[1] * (EX_RA[y, x] ** 2) + par_B[2] * (EX_RA[y, x]) + \
                         par_B[3]

    # 中心用实际采样的数据
    G_R_LSC_data.shape = (Hblocks, Wblocks)
    G_GR_LSC_data.shape = (Hblocks, Wblocks)
    G_GB_LSC_data.shape = (Hblocks, Wblocks)
    G_B_LSC_data.shape = (Hblocks, Wblocks)
    EX_R[1:1 + Hblocks, 1:1 + Wblocks] = G_R_LSC_data
    EX_GR[1:1 + Hblocks, 1:1 + Wblocks] = G_GR_LSC_data
    EX_GB[1:1 + Hblocks, 1:1 + Wblocks] = G_GB_LSC_data
    EX_B[1:1 + Hblocks, 1:1 + Wblocks] = G_B_LSC_data

    return EX_R, EX_GR, EX_GB, EX_B
コード例 #15
0
ファイル: cj_lsc.py プロジェクト: XiaoZhuXinYu/ISPcode-python
def create_lsc_data_base(img, block_size, pattern):
    # 分开四个颜色通道
    R, GR, GB, B = rawimage.bayer_channel_separation(img, pattern)
    # print(img.shape, R.shape)

    # 每张的高宽
    HH, HW = R.shape

    # 生成分多少块
    Hblocks = int(HH / block_size)
    Wblocks = int(HW / block_size)

    # 整个图像被分成 Hblocks * Wblocks 块,生成一个 Hblocks * Wblocks 的矩阵,用于数据储存。
    R_LSC_data = np.zeros((Hblocks, Wblocks))  # 每个块 R 的平均值
    B_LSC_data = np.zeros((Hblocks, Wblocks))
    GR_LSC_data = np.zeros((Hblocks, Wblocks))
    GB_LSC_data = np.zeros((Hblocks, Wblocks))

    # 块距离光心的距离
    RA = np.zeros((Hblocks, Wblocks))

    # 计算每个块的平均值
    for y in range(0, HH, block_size):
        for x in range(0, HW, block_size):
            block_y_num = int(y / block_size)
            block_x_num = int(x / block_size)
            R_LSC_data[block_y_num,
                       block_x_num] = R[y:y + block_size, x:x +
                                        block_size].mean()  # 对一块block数据求平均
            GR_LSC_data[block_y_num,
                        block_x_num] = GR[y:y + block_size,
                                          x:x + block_size].mean()
            GB_LSC_data[block_y_num,
                        block_x_num] = GB[y:y + block_size,
                                          x:x + block_size].mean()
            B_LSC_data[block_y_num, block_x_num] = B[y:y + block_size,
                                                     x:x + block_size].mean()

    # 4个颜色数据通道展平,便于数据进行拟合,RA_flatten相当于x,R_LSC_data_flatten和其他三个相当于y
    R_LSC_data_flatten = R_LSC_data.flatten()
    GR_LSC_data_flatten = GR_LSC_data.flatten()
    GB_LSC_data_flatten = GB_LSC_data.flatten()
    B_LSC_data_flatten = B_LSC_data.flatten()

    # 最亮块的值
    Max_R = np.max(R_LSC_data_flatten)
    Max_GR = np.max(GR_LSC_data_flatten)
    Max_GB = np.max(GB_LSC_data_flatten)
    Max_B = np.max(B_LSC_data_flatten)

    # 得到gain
    G_R_LSC_data = Max_R / R_LSC_data_flatten
    G_GR_LSC_data = Max_GR / GR_LSC_data_flatten
    G_GB_LSC_data = Max_GB / GB_LSC_data_flatten
    G_B_LSC_data = Max_B / B_LSC_data_flatten

    G_R_LSC_data.shape = (Hblocks, Wblocks)
    G_GR_LSC_data.shape = (Hblocks, Wblocks)
    G_GB_LSC_data.shape = (Hblocks, Wblocks)
    G_B_LSC_data.shape = (Hblocks, Wblocks)

    return G_R_LSC_data, G_GR_LSC_data, G_GB_LSC_data, G_B_LSC_data