예제 #1
0
def Rendering(img_list,exposure_times):
    # Merge exposures to HDR image
    merge_debvec = cv2.createMergeDebevec()
    hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
    merge_robertson = cv2.createMergeRobertson()
    hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())
    
    # Tonemap HDR image
    tonemap1 = cv2.createTonemapDurand(gamma=2.2)
    res_debvec = tonemap1.process(hdr_debvec.copy())
    tonemap2 = cv2.createTonemapDurand(gamma=1.3)
    res_robertson = tonemap2.process(hdr_robertson.copy())

     # Exposure fusion using Mertens
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(img_list)

     # Convert datatype to 8-bit and save
    res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
    res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8')
    res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')

    cv2.imwrite("ldr_debvec.jpg", res_debvec_8bit)
    cv2.imwrite("ldr_robertson.jpg", res_robertson_8bit)
    cv2.imwrite("fusion_mertens.jpg", res_mertens_8bit)
예제 #2
0
def HDR(_imgs_nx1, _times_nx1, method=Debevec):
    assert _imgs_nx1.dtype == np.uint8 and _times_nx1.dtype == np.float32, "Type Error"
    assert len(_imgs_nx1) == len(
        _times_nx1) and len(_times_nx1) > 0, "Len Error"
    if method == Debevec:
        CalibrateDebevec = cv2.createCalibrateDebevec(samples=70, random=True)
        crf = CalibrateDebevec.process(src=_imgs_nx1, times=_times_nx1)
        merge_debvec = cv2.createMergeDebevec()
        hdr_img = merge_debvec.process(src=_imgs_nx1,
                                       times=_times_nx1,
                                       response=crf)
        tonemap = cv2.createTonemapDurand(gamma=1.4)
        res_img = tonemap.process(hdr_img.copy())
        return crf, hdr_img, res_img
    if method == Robertson:
        CalibrateRobertson = cv2.createCalibrateRobertson()
        crf = CalibrateRobertson.process(src=_imgs_nx1, times=_times_nx1)
        merge_robertson = cv2.createMergeRobertson()
        hdr_img = merge_robertson.process(src=_imgs_nx1,
                                          times=_times_nx1,
                                          response=crf)
        #local tonermap
        tonemap = cv2.createTonemapDurand(gamma=1.4)
        res_img = tonemap.process(hdr_img.copy())
        return crf, hdr_img, res_img
    if method == Mertens:
        merge_mertens = cv2.createMergeMertens()
        res_img = merge_mertens.process(_imgs_nx1)
        # cv2.imshow("ss", res_img)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        # res_mertens_8bit = np.clip(res_img*255, 0, 255).astype('uint8')
        # cv2.imwrite("PyFusion.png", res_mertens_8bit)
        return res_img
예제 #3
0
def main():
    global client, img
    images = []
    delta = 25
    set_param('auto_exposure', True)
    set_param('auto_frame_rate', True)
    ev_auto = get_param('exposure')
    print("EV_auto: {0}".format(ev_auto))
    set_param('auto_exposure', False)
    # exposure = [ev_auto - delta, ev_auto, ev_auto + delta]
    exposure = [ev_auto - delta, ev_auto + delta]
    for ev in exposure:
        t = time.time()
        set_param('exposure', int(ev))
        delta_t = time.time() - t
        print("time: {0}".format(delta_t))
        time.sleep(1)
        name = 'image exposure :' + str(ev)
        images.append(img.copy())
        # EV = log2(f^2 / t)
        # et = math.pow(f, 2.0) / math.pow(2.0, ev)
        cv2.imshow(name, img.copy())

    exposure_times = np.array(exposure, dtype=np.float32)
    # debvec
    merge_debvec = cv2.createMergeDebevec()
    hdr_debvec = merge_debvec.process(images, times=exposure_times.copy())
    # robertson
    merge_robertson = cv2.createMergeRobertson()
    hdr_robertson = merge_robertson.process(images,
                                            times=exposure_times.copy())

    tonemap1 = cv2.createTonemapDurand(gamma=2.2)
    res_debvec = tonemap1.process(hdr_debvec.copy())
    tonemap2 = cv2.createTonemapDurand(gamma=1.3)
    res_robertson = tonemap2.process(hdr_robertson.copy())

    #  mertens not
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(images)

    cv2.imshow('debvec', res_debvec)
    cv2.imshow('robertson', res_robertson)
    cv2.imshow('mertens', res_mertens)

    while True:
        key = cv2.waitKey(1) & 0xff
        if key == ord('q'):
            break
예제 #4
0
def TonemapDurand(file_path):
    im = cv2.imread(file_path, cv2.IMREAD_ANYDEPTH)
    tonemapDurand = cv2.createTonemapDurand(1.5, 4, 1.0, 1, 1)
    ldrDurand = tonemapDurand.process(im)
    # im2_8bit = np.clip(ldrDurand * 255, 0, 255).astype('uint8')

    return ldrDurand
예제 #5
0
def tonemapping(hdr, tmo_func='reinhard', gamma=2.2, fstop=0):
    ## tone mapping hdr
    if tmo_func == 'reinhard':
        tmo = cv2.createTonemapReinhard(gamma=gamma)
    elif tmo_func == 'durand':
        tmo = cv2.createTonemapDurand(gamma=gamma)
    elif tmo_func == 'drago':
        tmo = cv2.createTonemapDrago(gamma=gamma)
    elif tmo_func == 'mantiuk':
        tmo = cv2.createTonemapMantiuk(gamma=gamma)
    elif tmo_func == 'linear':
        output = hdr - hdr.min()
        output = output / output.max()

        # return output
        return tonemapping(output, tmo_func='gamma')
    elif tmo_func == 'gamma':
        inv_gamma = 1.0 / gamma
        exposure = np.power(2., fstop)
        output = clamp_img(np.power(exposure * hdr, inv_gamma), 0, 1)
        return output
    else:
        raise NotImplementedError
    # elif tmo_func =='cut_high':
    #     output = hdr - hdr.min()
    #     output = output/output.max()
    #     return output
    output = tmo.process(hdr.astype('float32'))
    return output
예제 #6
0
def mergeImgs(imgs, expos):
    # Debevec = name of the HDR algorithm used for merging
    merge_debvec = cv2.createMergeDebevec()
    hdr_debvec = merge_debvec.process(imgs, times=np.array(expos, dtype=np.float32))
    tonemap1 = cv2.createTonemapDurand(gamma=2.2)
    res_debvec = tonemap1.process(hdr_debvec.copy())
    res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
    return res_debvec_8bit
예제 #7
0
def shades_of_gray(request):
    ''' Handling the color normalization (shades-of-gray) '''
    data = {"success": False}

    # check to see if this is a post request
    if request.method == "POST":
        # check to see if an image was uploaded
        if request.FILES.get("image", None) is not None:
            # grab the uploaded image
            image = _grab_image(stream=request.FILES["image"])

        # otherwise, return an error
        else:
            data["error"] = "No 'image' parameter found or not path provided"
            return JsonResponse(data)

        ### START WRAPPING THE APP
        elevation = float(request.POST.get("elevation"))

        # Decompose R G B channels of the image into 3 different arrays
        image = cv2.normalize(image.astype(np.float32), None, 0.0, 1.0,
                              cv2.NORM_MINMAX)

        image[:, :, 0] = image[:, :, 0]**2.2
        image[:, :, 1] = image[:, :, 1]**2.2
        image[:, :, 2] = image[:, :, 2]**2.2

        b = np.sum(np.power(image[:, :, 0], elevation))
        g = np.sum(np.power(image[:, :, 1], elevation))
        r = np.sum(np.power(image[:, :, 2], elevation))

        b = b**(1 / elevation)
        g = g**(1 / elevation)
        r = r**(1 / elevation)

        kg = 1
        kb = g * kg / b
        kr = g * kg / r

        print('Kb: ' + str(kb) + ', Kg: ' + str(kg) + ', Kr: ' + str(kr))

        # image = img_as_float(image)
        image[:, :, 0] = image[:, :, 0] * kb
        image[:, :, 1] = image[:, :, 1] * kg
        image[:, :, 2] = image[:, :, 2] * kr

        tonemap = cv2.createTonemapDurand()
        new_image_hdr = tonemap.process(image)
        new_image_hdr_8bit = np.clip(new_image_hdr * 255, 0,
                                     255).astype('uint8')
        #cv2.imwrite('result.png',new_image_hdr_8bit)
        # update the data dictionary
        data["success"] = True

    # return a JSON response
    return JsonResponse({'img': str(_encode_Base64(img=new_image_hdr_8bit))})
예제 #8
0
    def part_4(self,image):
        '''
        Tonemapping of the HDR composite image
        Arguements: Image
        Return: Image
        '''
        tonemap1 = cv2.createTonemapDurand(gamma =2.2)
        result = tonemap1.process(image)

        return result
예제 #9
0
    def __call__(self, numpy_img):
        opencv_img = cv2.merge((numpy_img[0], numpy_img[1], numpy_img[2]))
        tonemap1 = cv2.createTonemapDurand(self.gamma)
        tonemap_img = tonemap1.process(opencv_img)
        tonemap_img_8bit = np.clip(tonemap_img * 255, 0, 255).astype('uint8')

        reshape_tonemap_img_8bit = np.empty([3, 64, 128])
        for i in range(3):
            reshape_tonemap_img_8bit[i, :, :] = tonemap_img_8bit[:, :, i]

        return reshape_tonemap_img_8bit
예제 #10
0
def tone_map(img, tmo_name):
    if (tmo_name == 'exposure'):
        tmo = Exposure(gamma=opt.gamma, stops=opt.stops)
    if (tmo_name == 'reinhard'):
        tmo = cv2.createTonemapReinhard(intensity=-1.0, 
                                        light_adapt=0.8, color_adapt=0.0)
    elif tmo_name == 'mantiuk':
        tmo = cv2.createTonemapMantiuk(saturation=1.0, scale=0.75)
    elif tmo_name == 'drago':
        tmo = cv2.createTonemapDrago(saturation=1.0, bias=0.85)
    elif tmo_name == 'durand':
        tmo = cv2.createTonemapDurand(contrast=3, saturation=1.0, 
                                      sigma_space=8, sigma_color=0.4)
    return tmo.process(img)
예제 #11
0
def tone_mapping_Durand(img_file,
                        gamma = 4, 
                        contrast = 12,
                        saturation = 6,
                        sigma_space = 1.5,
                        sigma_color = 1.5):
            
    '''
    tonemapDrago = cv2.createTonemapDrago(2, 2)
    ldrDrago = tonemapDrago.process(image_output)
    ldrDrago = 3 * ldrDrago
    ldrDrago = ldrDrago * 256.0 - 0.5

    tonemapDurand = cv2.createTonemapDurand(8,4,1.0,1,1)
    ldrDurand = tonemapDurand.process(image_output)
    ldrDurand = 3 * ldrDurand

    tonemapMantiuk = cv2.createTonemapMantiuk(2.2,0.85, 1.2)
    ldrMantiuk = tonemapMantiuk.process(image_output)
    ldrMantiuk = 3 * ldrMantiuk
    
    tonemapReinhard = cv2.createTonemapReinhard(gamma,       
                                                intensity,       # [-8, 8]
                                                light_adapt,     # [0, 1]
                                                color_adapt)     # [0, 1]
    ldrReinhard = tonemapReinhard.process(img)
    ldrReinhard = ldrReinhard * 255.0
    ldrReinhard = ldrReinhard.astype(np.uint8)
    '''
    img = imread(img_file).astype(np.float32)/255
    tonemapDurand = cv2.createTonemapDurand(gamma,
                                            contrast,
                                            saturation,
                                            sigma_space,
                                            sigma_color)
    
    init_time = time.time()
    ldrDurand = tonemapDurand.process(img)
    ldrDurand = ldrDurand * 255.0
    print("passed time = {}".format(time.time()-init_time))
    np.putmask(ldrDurand, ldrDurand > 255, 255)
    np.putmask(ldrDurand, ldrDurand < 0, 0)
    ldrDurand = ldrDurand.astype(np.uint8)

    
    plt.figure(figsize = (10,10))
    plt.imshow(ldrDurand)
    misc.imsave("G:\\ECE516\\HDR team\\ldrDurand.jpg",ldrDurand)
    plt.show()
예제 #12
0
def rgb_general(request):
    ''' Handling the toogling for an image's color '''
    data = {"success": False}

    # check to see if this is a post request
    if request.method == "POST":
        # check to see if an image was uploaded
        if request.FILES.get("image", None) is not None:
            # grab the uploaded image
            image = _grab_image(stream=request.FILES["image"])

        # otherwise, return an error
        else:
            data["error"] = "No 'image' parameter found or not path provided"
            return JsonResponse(data)

        ### START WRAPPING THE APP
        better_new_img = np.zeros(image.shape, image.dtype)

        alpha = float(request.POST.get("alpha"))  # Simple brightness control
        gama = float(request.POST.get("gama"))  # Simple contrast control

        #get image as float32 for tonemaping to work
        image = cv2.normalize(image.astype(np.float32), None, 0.0, 1.0,
                              cv2.NORM_MINMAX)

        #alpha and gama adjustments
        image[:, :, 0] = np.float_power(alpha * image[:, :, 0], gama)
        image[:, :, 1] = np.float_power(alpha * image[:, :, 1], gama)
        image[:, :, 2] = np.float_power(alpha * image[:, :, 2], gama)

        #normalize the image
        better_new_img[..., 0] = better_img(img=image[:, :, 0])
        better_new_img[..., 1] = better_img(img=image[:, :, 1])
        better_new_img[..., 2] = better_img(img=image[:, :, 2])

        #process hdr for display
        tonemap = cv2.createTonemapDurand()
        image_hdr = tonemap.process(image)
        image_hdr = np.clip(image_hdr * 255, 0, 255).astype('uint8')

        data["success"] = True

    # return a JSON response
    return JsonResponse({
        'img': str(_encode_Base64(img=image_hdr)),
        'img_better': str(_encode_Base64(img=better_new_img))
    })
예제 #13
0
 def __init__(self,
              contrast=3,
              saturation=1.0,
              sigma_space=8,
              sigma_color=0.4,
              gamma=2.0,
              randomize=False):
     if randomize:
         gamma = uniform(1.8, 2.2)
         contrast = uniform(3.5)
     self.op = cv2.createTonemapDurand(
         contrast=contrast,
         saturation=saturation,
         sigma_space=sigma_space,
         sigma_color=sigma_color,
         gamma=gamma)
예제 #14
0
def gray_scale(request):
    ''' Handling the toogling for an image's color '''
    # initialize the data dictionary to be returned by the request
    data = {"success": False}

    # check to see if this is a post request
    if request.method == "POST":
        # check to see if an image was uploaded
        print(request.FILES)
        print('\n')
        if request.FILES.get("image", None) is not None:
            # grab the uploaded image
            image = _grab_image(stream=request.FILES["image"])
            #print(image)

        # otherwise, then return an error
        else:
            data["error"] = "No 'image' parameter found or not path provided"
            return JsonResponse(data)

        ### START WRAPPING THE APP
        alpha = float(request.POST.get("alpha"))  # Simple brightness control
        gama = float(request.POST.get("gama"))  # Simple contrast control

        #normalize image
        new_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        new_image = np.float_power((alpha * new_image), gama)
        better_image = better_img(img=new_image)

        ### create image for hdr display
        image_tonemap = np.float_power((alpha * image), gama)
        image_tonemap = cv2.normalize(image_tonemap.astype(np.float32), None,
                                      0.0, 1.0, cv2.NORM_MINMAX)
        tonemap = cv2.createTonemapDurand()
        image_tonemap = tonemap.process(image_tonemap)
        image_tonemap = np.clip(image_tonemap * 255, 0, 255).astype('uint8')
        image_tonemap = cv2.cvtColor(image_tonemap, cv2.COLOR_BGR2GRAY)
        # update the data dictionary
        data["success"] = True

    # return a JSON response
    return JsonResponse({
        'img': str(_encode_Base64(img=image_tonemap)),
        'img_better': str(_encode_Base64(img=better_image))
    })
예제 #15
0
def gray_world(request):
    ''' Handling the color normalization (gray-world) '''
    data = {"success": False}

    # check to see if this is a post request
    if request.method == "POST":
        # check to see if an image was uploaded
        if request.FILES.get("image", None) is not None:
            # grab the uploaded image
            image = _grab_image(stream=request.FILES["image"])

        # otherwise, return an error
        else:
            data["error"] = "No 'image' parameter found or not path provided"
            return JsonResponse(data)

        ### START WRAPPING THE APP

        # Decompose R G B channels of the image into 3 different arrays
        image = cv2.normalize(image.astype(np.float32), None, 0.0, 1.0,
                              cv2.NORM_MINMAX)

        image[:, :, 0] = image[:, :, 0]**2.2
        image[:, :, 1] = image[:, :, 1]**2.2
        image[:, :, 2] = image[:, :, 2]**2.2

        kb = 1
        kg = (kb * np.sum(image[:, :, 0])) / np.sum(image[:, :, 1])
        kr = (kb * np.sum(image[:, :, 0])) / np.sum(image[:, :, 2])

        print('Kb: ' + str(kb) + ', Kg: ' + str(kg) + ', Kr: ' + str(kr))

        image[:, :, 1] = kg * image[:, :, 1]
        image[:, :, 2] = kr * image[:, :, 2]

        tonemap = cv2.createTonemapDurand()
        new_image_hdr = tonemap.process(image)
        new_image_hdr_8bit = np.clip(new_image_hdr * 255, 0,
                                     255).astype('uint8')

    # return a JSON response
    return JsonResponse({'img': str(_encode_Base64(img=new_image_hdr_8bit))})
예제 #16
0
def main():
    #读取多张曝光的图像
    images,times = readImagesAndTimes()

    #对齐图像
    alignMTB = cv2.createAlignMTB()
    alignMTB.process(images, images)

    #恢复相机响应函数
    calibrateDebevec = cv2.createCalibrateDebevec()
    responseDebevec = calibrateDebevec.process(images, times)

    # 将多张图像融合成hdr
    mergeDebevec = cv2.createMergeDebevec()
    hdrDebevec = mergeDebevec.process(images, times, responseDebevec)
    # 保存融合结果,可用ps打开
    cv2.imwrite("hdrDebevec.hdr", hdrDebevec)

    # Tonemap using Drago's method to obtain 24-bit color image
    tonemapDrago = cv2.createTonemapDrago(1.0, 0.7)
    ldrDrago = tonemapDrago.process(hdrDebevec)
    ldrDrago = 3 * ldrDrago
    cv2.imwrite("ldr-Drago.jpg", ldrDrago * 255)

    # Tonemap using Durand's method obtain 24-bit color image
    tonemapDurand = cv2.createTonemapDurand(1.5,4,1.0,1,1)
    ldrDurand = tonemapDurand.process(hdrDebevec)
    ldrDurand = 3 * ldrDurand
    cv2.imwrite("ldr-Durand.jpg", ldrDurand * 255)

    # Tonemap using Reinhard's method to obtain 24-bit color image
    tonemapReinhard = cv2.createTonemapReinhard(1.5, 0,0,0)
    ldrReinhard = tonemapReinhard.process(hdrDebevec)
    cv2.imwrite("ldr-Reinhard.jpg", ldrReinhard * 255)

    # Tonemap using Mantiuk's method to obtain 24-bit color image
    tonemapMantiuk = cv2.createTonemapMantiuk(2.2,0.85, 1.2)
    ldrMantiuk = tonemapMantiuk.process(hdrDebevec)
    ldrMantiuk = 3 * ldrMantiuk
    cv2.imwrite("ldr-Mantiuk.jpg", ldrMantiuk * 255)
예제 #17
0
def hdr(imgNames, exposures, writeName, user_id):
    writePath = os.path.join(app.config['UPLOAD_FOLDER'], writeName)

    imgPaths = [os.path.join(app.config['UPLOAD_FOLDER'], imgName) for imgName in imgNames]

    imgs= [cv2.imread(imgPath) for imgPath in imgPaths]
#    imgs = imageAlignment(images)

    merge_debvec = cv2.createMergeDebevec()

    exposures = np.array(exposures, dtype=np.float32)
    hdr_debvec = merge_debvec.process(imgs, times=exposures.copy())

    tonemap = cv2.createTonemapDurand(gamma=2)
    res = tonemap.process(hdr_debvec)

    cv2.imwrite(writePath, res * 255)
    photo_id = addToDB(writeName, 'HDR', user_id)
    createThumbnail(writeName, photo_id)

    autoTag.delay(writePath, photo_id)

    for path in imgPaths:
        os.remove(path)
예제 #18
0
def durand_hdr(image_names,
               algo='debevec',
               exposures=None,
               gamma=1.0,
               contrast=4.0,
               saturation=1.0,
               sigma_space=2.0,
               sigma_color=2.0,
               output=None):
    """
    Create an HDR image from the supplied images.

    :param images: List of images to process.
    :return: Returns name of new HDR image.
    """
    hdr_img = process_image(image_names, exposures, algo)

    tonemap_durand = cv2.createTonemapDurand(
        gamma, contrast, saturation, sigma_space, sigma_color
    )
    ldr_durand = tonemap_durand.process(hdr_img)

    img_out = get_image_output(image_names[1], output)
    cv2.imwrite(img_out, ldr_durand * 255)
예제 #19
0
            n = 1
            if imgs[2][i][j][k] < threshold:
                n = n + weight2
                s = s + weight2 * imgs[2][i][j][k] * (exposure_times[0] /
                                                      exposure_times[2])
            if imgs[1][i][j][k] < threshold:
                n = n + weight1
                s = s + weight1 * imgs[1][i][j][k] * (exposure_times[0] /
                                                      exposure_times[1])
            HDR_img_method_3[i][j][k] = s / n

cv2.imwrite('hdr/combined/HDR_img_method_3.jpg', HDR_img_method_3)

# tone map ziqiang
gam_value = 2.2
tonemap1 = cv2.createTonemapDurand(gamma=gam_value)
res_tonemap1 = tonemap1.process(HDR_img_method_1.copy())
res_tonemap1_8bit = np.clip(res_tonemap1 * 255, 0, 255).astype('uint8')
cv2.imwrite("hdr/tonemapped/res_tonemap1_8bit.jpg", res_tonemap1_8bit)

tonemap2 = cv2.createTonemapDurand(gamma=gam_value)
res_tonemap2 = tonemap2.process(HDR_img_method_2.copy())
res_tonemap2_8bit = np.clip(res_tonemap2 * 255, 0, 255).astype('uint8')
cv2.imwrite("hdr/tonemapped/res_tonemap2_8bit.jpg", res_tonemap2_8bit)

tonemap3 = cv2.createTonemapDurand(gamma=gam_value)
res_tonemap3 = tonemap3.process(HDR_img_method_3.copy())
res_tonemap3_8bit = np.clip(res_tonemap3 * 255, 0, 255).astype('uint8')
cv2.imwrite("hdr/tonemapped/res_tonemap3_8bit.jpg", res_tonemap3_8bit)

# Average HDR
예제 #20
0
def scale_by_max(request):
    ''' Handling the color normalization (scale by max) '''
    data = {"success": False}

    # check to see if this is a post request
    if request.method == "POST":
        # check to see if an image was uploaded
        if request.FILES.get("image", None) is not None:
            # grab the uploaded image
            image = _grab_image(stream=request.FILES["image"])

        # otherwise, return an error
        else:
            data["error"] = "No 'image' parameter found or not path provided"
            return JsonResponse(data)

        ### START WRAPPING THE APP
        image = cv2.normalize(image.astype(np.float32), None, 0.0, 1.0,
                              cv2.NORM_MINMAX)

        image[:, :, 0] = image[:, :, 0]**2.2
        image[:, :, 1] = image[:, :, 1]**2.2
        image[:, :, 2] = image[:, :, 2]**2.2

        if np.max(image[:, :, 0]) >= np.max(image[:, :, 1]) and np.max(
                image[:, :, 0]) >= np.max(image[:, :, 2]):
            print('KbMax: ' + str(np.max(image[:, :, 0])) + ', Kg: ' +
                  str(np.max(image[:, :, 1])) + ', Kr: ' +
                  str(np.max(image[:, :, 2])))
            image[:, :,
                  1] = (np.max(image[:, :, 0]) /
                        np.max(image[:, :, 1])) * np.array(image[:, :, 1])
            image[:, :,
                  2] = (np.max(image[:, :, 0]) /
                        np.max(image[:, :, 2])) * np.array(image[:, :, 2])
        else:
            if np.max(image[:, :, 1]) >= np.max(image[:, :, 0]) and np.max(
                    image[:, :, 1]) >= np.max(image[:, :, 2]):
                print('Kb: ' + str(np.max(image[:, :, 0])) + ', KgMax: ' +
                      str(np.max(image[:, :, 1])) + ', Kr: ' +
                      str(np.max(image[:, :, 2])))
                image[:, :,
                      0] = (np.max(image[:, :, 1]) /
                            np.max(image[:, :, 0])) * np.array(image[:, :, 0])
                image[:, :,
                      2] = (np.max(image[:, :, 1]) /
                            np.max(image[:, :, 2])) * np.array(image[:, :, 2])
            else:
                if np.max(image[:, :, 2]) >= np.max(image[:, :, 1]) and np.max(
                        image[:, :, 2]) >= np.max(image[:, :, 0]):
                    print('Kb: ' + str(np.max(image[:, :, 0])) + ', Kg: ' +
                          str(np.max(image[:, :, 1])) + ', KrMax: ' +
                          str(np.max(image[:, :, 2])))
                    image[:, :, 1] = (np.max(image[:, :, 2]) / np.max(
                        image[:, :, 1])) * np.array(image[:, :, 1])
                    image[:, :, 0] = (np.max(image[:, :, 2]) / np.max(
                        image[:, :, 0])) * np.array(image[:, :, 0])

        print('B: ' + str(np.max(image[:, :, 0])) + ', G: ' +
              str(np.max(image[:, :, 1])) + ', R: ' +
              str(np.max(image[:, :, 2])))

        tonemap = cv2.createTonemapDurand()
        new_image_hdr = tonemap.process(image)
        new_image_hdr_8bit = np.clip(new_image_hdr * 255, 0,
                                     255).astype('uint8')

    # return a JSON response
    return JsonResponse({'img': str(_encode_Base64(img=new_image_hdr_8bit))})
예제 #21
0
    gd= gd+g1
    gn= gn+g2
    
    bd= bd+b1
    bn= bn+b2
## HDR Image for each channel ##
I_HDR_R= np.divide(rn,rd)
I_HDR_G= np.divide(gn,gd)
I_HDR_B= np.divide(bn,bd)
     

I_HDR=cv2.merge((I_HDR_B,I_HDR_G,I_HDR_R))



 
### ToneMapping ###

I_HDR_1=I_HDR/(1.0+I_HDR)
cv2.imwrite('HDR_phototonemap.png',np.uint8(I_HDR_1*255))

### Built in Tone Mapping using Opencv ####
tonemap1 = cv2.createTonemapDurand(gamma=2.2)
res_debvec = tonemap1.process(I_HDR.astype('float32'))
res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')

cv2.imwrite('tonemap_opencv.png',res_debvec_8bit)



예제 #22
0
## [Load images and exposure times]
images, times = loadExposureSeq(args.input)
## [Load images and exposure times]

## [Estimate camera response]
calibrate = cv.createCalibrateDebevec()
response = calibrate.process(images, times)
## [Estimate camera response]

## [Make HDR image]
merge_debevec = cv.createMergeDebevec()
hdr = merge_debevec.process(images, times, response)
## [Make HDR image]

## [Tonemap HDR image]
tonemap = cv.createTonemapDurand(2.2)
ldr = tonemap.process(hdr)
## [Tonemap HDR image]

## [Perform exposure fusion]
merge_mertens = cv.createMergeMertens()
fusion = merge_mertens.process(images)
## [Perform exposure fusion]

## [Write results]
cv.imwrite('fusion.png', fusion * 255)
cv.imwrite('ldr.png', ldr * 255)
cv.imwrite('hdr.hdr', hdr)
## [Write results]
예제 #23
0
파일: hdr.py 프로젝트: FA78DWA/learnopencv
 hdrDebevec = mergeDebevec.process(images, times, responseDebevec)
 # Save HDR image.
 cv2.imwrite("hdrDebevec.hdr", hdrDebevec)
 print("saved hdrDebevec.hdr ")
 
 # Tonemap using Drago's method to obtain 24-bit color image
 print("Tonemaping using Drago's method ... ")
 tonemapDrago = cv2.createTonemapDrago(1.0, 0.7)
 ldrDrago = tonemapDrago.process(hdrDebevec)
 ldrDrago = 3 * ldrDrago
 cv2.imwrite("ldr-Drago.jpg", ldrDrago * 255)
 print("saved ldr-Drago.jpg")
 
 # Tonemap using Durand's method obtain 24-bit color image
 print("Tonemaping using Durand's method ... ")
 tonemapDurand = cv2.createTonemapDurand(1.5,4,1.0,1,1)
 ldrDurand = tonemapDurand.process(hdrDebevec)
 ldrDurand = 3 * ldrDurand
 cv2.imwrite("ldr-Durand.jpg", ldrDurand * 255)
 print("saved ldr-Durand.jpg")
 
 # Tonemap using Reinhard's method to obtain 24-bit color image
 print("Tonemaping using Reinhard's method ... ")
 tonemapReinhard = cv2.createTonemapReinhard(1.5, 0,0,0)
 ldrReinhard = tonemapReinhard.process(hdrDebevec)
 cv2.imwrite("ldr-Reinhard.jpg", ldrReinhard * 255)
 print("saved ldr-Reinhard.jpg")
 
 # Tonemap using Mantiuk's method to obtain 24-bit color image
 print("Tonemaping using Mantiuk's method ... ")
 tonemapMantiuk = cv2.createTonemapMantiuk(2.2,0.85, 1.2)
예제 #24
0
def hdr2ldr(hdr):
    Du = cv2.createTonemapDurand(2)
    ldr = Du.process(hdr)
    ldr = np.clip(ldr, 0, 1)
    return ldr
예제 #25
0
images = list([cv2.imread(f) for f in files])
# Compute the exposure times in seconds
exposures = np.float32([1. / t for t in [0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]])

# Compute the response curve
calibration = cv2.createCalibrateDebevec()
response = calibration.process(images, exposures)

# Compute the HDR image
merge = cv2.createMergeDebevec()
hdr = merge.process(images, exposures, response)

# Save it to disk
cv2.imwrite('hdr22_image.hdr', hdr)

durand = cv2.createTonemapDurand(gamma=2.5)
ldr_durand = durand.process(hdr)

# Tonemap operators create floating point images with values in the 0..1 range
# This is why we multiply the image with 255 before saving

cv2.imwrite('durand_image.png', ldr_durand * 255)

#Drago
drago = cv2.createTonemapDrago(1.0, 0.7)
ldr_drago = drago.process(hdr)
ldr_drago = 3 * ldr_drago
cv2.imwrite("drago_image.jpg", ldr_drago * 255)


예제 #26
0
# Loading exposure images into a list
img_fn = ["1tl.jpg", "2tr.jpg", "3bl.jpg", "4br.jpg"]
img_list = [cv2.imread(fn) for fn in img_fn]
exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)

# Merge exposures to HDR image
# 在这个阶段,我们将曝光序列合并成一个HDR图像,显示了我们在OpenCV中的两种可能性。第一种方法是Debvec,第二种是Robertson。请注意,HDR图像的类型为float32,而不是uint8,因为它包含所有曝光图像的完整动态范围。

merge_debvec = cv2.createMergeDebevec()
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
merge_robertson = cv2.createMergeRobertson()
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())

# Tonemap HDR image
# 我们将32位浮点HDR数据映射到范围[0..1]。实际上,在某些情况下,值可能大于1或低于0,所以注意我们以后不得不剪切数据,以避免溢出。
tonemap1 = cv2.createTonemapDurand(gamma=2.2)
res_debvec = tonemap1.process(hdr_debvec.copy())
tonemap2 = cv2.createTonemapDurand(gamma=1.3)
res_robertson = tonemap2.process(hdr_robertson.copy())

# Exposure fusion using Mertens
# 这里我们展示了一种可以合并曝光图像的替代算法,我们不需要曝光时间。我们也不需要使用任何tonemap算法,因为Mertens算法已经给出了[0..1]范围内的结果。
merge_mertens = cv2.createMergeMertens()
res_mertens = merge_mertens.process(img_list)

# Convert datatype to 8-bit and save
# 为了保存或显示结果,我们需要将数据转换为[0..255]范围内的8位整数。
res_debvec_8bit = np.clip(res_debvec * 255, 0, 255).astype('uint8')
res_robertson_8bit = np.clip(res_robertson * 255, 0, 255).astype('uint8')
res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8')
예제 #27
0
if __name__ == '__main__':

    # Loading exposure images into a list
    img_fn = ['img0.jpg', 'img1.jpg', 'img2.jpg', 'img3.jpg']
    img_list = [cv.imread(fn) for fn in img_fn]
    exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)

    # Merge exposures to HDR image
    merge_debvec = cv.createMergeDebevec()
    hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
    merge_robertson = cv.createMergeRobertson()
    hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())

    # Tonemap HDR image
    tonemap1 = cv.createTonemapDurand(gamma=2.2)
    res_debvec = tonemap1.process(hdr_debvec.copy())
    tonemap2 = cv.createTonemapDurand(gamma=1.3)
    res_robertson = tonemap2.process(hdr_robertson.copy())

    # Exposure fusion using Mertens
    merge_mertens = cv.createMergeMertens()
    res_mertens = merge_mertens.process(img_list)

    # Convert datatype to 8-bit and save
    res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
    res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8')
    res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')

    cv.imshow('ldr_debevec.jpg', res_debvec_8bit)
    cv.imwrite('ldr_debevec.jpg', res_debvec_8bit)
예제 #28
0
def rgb_specific(request):
    ''' Handling the toogling for an image's color '''
    # initialize the data dictionary to be returned by the request
    data = {"success": False}

    # check to see if this is a post request
    if request.method == "POST":
        # check to see if an image was uploaded
        print(request.FILES)
        print('\n')
        if request.FILES.get("image", None) is not None:
            # grab the uploaded image
            image = _grab_image(stream=request.FILES["image"])
            #print(image)

        # otherwise, then return an error
        else:
            data["error"] = "No 'image' parameter found or not path provided"
            return JsonResponse(data)

        ### START WRAPPING THE APP
        alpha_r = float(
            request.POST.get("alphaR"))  # Simple contrast control(alpha)
        gama_r = float(
            request.POST.get("gamaR"))  # Simple brightness control(beta)
        alpha_g = float(
            request.POST.get("alphaG"))  # Simple contrast control(alpha)
        gama_g = float(
            request.POST.get("gamaG"))  # Simple brightness control(beta)
        alpha_b = float(
            request.POST.get("alphaB"))  # Simple contrast control(alpha)
        gama_b = float(
            request.POST.get("gamaB"))  # Simple brightness control(beta)
        better_new_img = np.zeros(image.shape, image.dtype)

        # Get image ready for hdr
        image = cv2.normalize(image.astype(np.float32), None, 0.0, 1.0,
                              cv2.NORM_MINMAX)

        #do the gamma and alpha adjustments
        image[:, :, 0] = np.float_power(alpha_b * image[:, :, 0], gama_b)
        image[:, :, 1] = np.float_power(alpha_g * image[:, :, 1], gama_g)
        image[:, :, 2] = np.float_power(alpha_r * image[:, :, 2], gama_r)

        #normalize image
        better_new_img[..., 0] = better_img(img=image[:, :, 0])
        better_new_img[..., 1] = better_img(img=image[:, :, 1])
        better_new_img[..., 2] = better_img(img=image[:, :, 2])

        #convert it to hdr
        tonemap = cv2.createTonemapDurand()
        image_hdr = tonemap.process(image)
        image_hdr = np.clip(image_hdr * 255, 0, 255).astype('uint8')

        ### END WRAPPING OF APP

        # update the data dictionary
        data["success"] = True

    # return a JSON response
    return JsonResponse({
        'img': str(_encode_Base64(img=image_hdr)),
        'img_better': str(_encode_Base64(img=better_new_img))
    })
예제 #29
0
## [Load images and exposure times]
images, times = loadExposureSeq(args.input)
## [Load images and exposure times]

## [Estimate camera response]
calibrate = cv.createCalibrateDebevec()
response = calibrate.process(images, times)
## [Estimate camera response]

## [Make HDR image]
merge_debevec = cv.createMergeDebevec()
hdr = merge_debevec.process(images, times, response)
## [Make HDR image]

## [Tonemap HDR image]
tonemap = cv.createTonemapDurand(2.2)
ldr = tonemap.process(hdr)
## [Tonemap HDR image]

## [Perform exposure fusion]
merge_mertens = cv.createMergeMertens()
fusion = merge_mertens.process(images)
## [Perform exposure fusion]

## [Write results]
cv.imwrite('fusion.png', fusion * 255)
cv.imwrite('ldr.png', ldr * 255)
cv.imwrite('hdr.hdr', hdr)
## [Write results]
    cv2.imwrite("./images/HDR/hdrDebevec-example.hdr", hdrDebevec)
    print("saved hdrDebevec.hdr ")

    # Tonemap using Drago's method to obtain 24-bit color image
    print("Tonemaping using Drago's method ... ")
    tonemapDrago = cv2.createTonemapDrago(1.0, 0.7)
    ldrDrago = tonemapDrago.process(hdrDebevec)
    # The final output is multiplied by 3 just because it gave the most pleasing results.
    ldrDrago = 3 * ldrDrago
    cv2.imwrite("./images/HDR/ldr-Drago-example.jpg", ldrDrago * 255)
    cv2.imshow("ldr-Drago", ldrDrago)
    print("saved ldr-Drago.jpg")

    # Tonemap using Durand's method obtain 24-bit color image
    print("Tonemaping using Durand's method ... ")
    tonemapDurand = cv2.createTonemapDurand(1.5, 4, 1.0, 1, 1)
    ldrDurand = tonemapDurand.process(hdrDebevec)
    ldrDurand = 3 * ldrDurand
    cv2.imwrite("./images/HDR/ldr-Durand-example.jpg", ldrDurand * 255)
    cv2.imshow("ldrDurand", ldrDurand)
    print("saved ldr-Durand.jpg")

    # Tonemap using Reinhard's method to obtain 24-bit color image
    print("Tonemaping using Reinhard's method ... ")
    tonemapReinhard = cv2.createTonemapReinhard(1.5, 0, 0, 0)
    ldrReinhard = tonemapReinhard.process(hdrDebevec)
    cv2.imwrite("./images/HDR/ldr-Reinhard-example.jpg", ldrReinhard * 255)
    cv2.imshow("ldrReinhard", ldrReinhard)
    print("saved ldr-Reinhard.jpg")

    # Tonemap using Mantiuk's method to obtain 24-bit color image
예제 #31
0
# Loading exposure images into a list
img_fn = [
    "img0.jpg", "img1.jpg", "img2.jpg", "img3.jpg", "img4.jpg", "img5.jpg",
    "img6.jpg"
]
img_list = [cv2.imread(fn) for fn in img_fn]
exposure_times = np.array([0.0333, 0.25, 0.5, 1.0, 2.0, 3.0, 4.0],
                          dtype=np.float32)
# Merge exposures to HDR image
merge_debvec = cv2.createMergeDebevec()
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
merge_robertson = cv2.createMergeRobertson()
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())

# Tonemap HDR image
tonemap1 = cv2.createTonemapDurand(gamma=1.6)
res_debvec = tonemap1.process(hdr_debvec.copy())
tonemap2 = cv2.createTonemapDurand(gamma=6)
res_robertson = tonemap2.process(hdr_robertson.copy())

# Exposure fusion using Mertens
merge_mertens = cv2.createMergeMertens()
res_mertens = merge_mertens.process(img_list)

# Convert datatype to 8-bit and save
res_debvec_8bit = np.clip(res_debvec * 255, 0, 255).astype('uint8')
res_robertson_8bit = np.clip(res_robertson * 255, 0, 255).astype('uint8')
res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8')

#cv2.imwrite("ldr_debvec.jpg", res_debvec_8bit)
#cv2.imwrite("ldr_robertson.jpg", res_robertson_8bit)
예제 #32
0
def tone_mapping_durand():
    tonemapDurand = cv2.createTonemapDurand(1.5, 4, 1.0, 1, 1)
    ldrDurand = tonemapDurand.process(hdrDebevec)
    ldrDurand = 3 * ldrDurand
    return ldrDurand * 255
예제 #33
0
파일: ReadandSave.py 프로젝트: fairy233/qy
image_dataset = DirectoryDataset(preprocess=transforms)

dataloader = DataLoader(image_dataset,
                        batch_size=4,
                        num_workers=0,
                        shuffle=True,
                        drop_last=True)

for i, (concat_img, cover_img, secret_img) in enumerate(dataloader):
    cover_img = torch2cv(cover_img)
    secret_img = torch2cv(secret_img)
    cover_name = './qy/cover.hdr'
    secret_name = './qy/secret.hdr'

    # tone map  ldr
    Du = cv2.createTonemapDurand(2)
    cover_img_ldr = Du.process(cover_img)
    cover_img_ldr = np.clip(cover_img_ldr, 0, 1)

    secret_img_ldr = Du.process(secret_img)
    secret_img_ldr = np.clip(secret_img_ldr, 0, 1)

    vtich = np.vstack((cover_img_ldr, secret_img_ldr))
    name = './qy/two.jpg'

    cv2.imwrite(cover_name, cover_img)
    cv2.imwrite(cover_name + '.jpg', (cover_img_ldr * 255).astype(int))
    cv2.imwrite(secret_name + '.jpg', (secret_img_ldr * 255).astype(int))
    cv2.imwrite(name, (vtich * 255).astype(int))

    if i == 1: