def blend_images(opacity, mode, img_1, img_2): if opacity > 0: if mode == "Dodge": return blend_modes.dodge(img_1, img_2, opacity) elif mode == "Addition": return blend_modes.addition(img_1, img_2, opacity) elif mode == "Overlay": return blend_modes.overlay(img_1, img_2, opacity) elif mode == "Subtract": return blend_modes.subtract(img_1, img_2, opacity) elif mode == "Grain Extract": return blend_modes.grain_extract(img_1, img_2, opacity) elif mode == "Darken Only": return blend_modes.darken_only(img_1, img_2, opacity) elif mode == "Screen": return blend_modes.screen(img_1, img_2, opacity) elif mode == "Divide": return blend_modes.divide(img_1, img_2, opacity) elif mode == "Grain Merge": return blend_modes.grain_merge(img_1, img_2, opacity) elif mode == "Difference": return blend_modes.difference(img_1, img_2, opacity) elif mode == "Multiply": return blend_modes.multiply(img_1, img_2, opacity) elif mode == "Soft Light": return blend_modes.soft_light(img_1, img_2, opacity) elif mode == "Hard Light": return blend_modes.hard_light(img_1, img_2, opacity) elif mode == "Lighten Only": return blend_modes.lighten_only(img_1, img_2, opacity) elif mode == "None": return blend_modes.normal(img_1, img_2, opacity) else: return img_1
def mergeRGB(video_dict, codec, mode): capA = cv2.VideoCapture(video_dict['A']) capB = cv2.VideoCapture(video_dict['B']) frame_width = int(capA.get(3)) frame_height = int(capA.get(4)) frame_rate = round(capA.get(5), 2) input_name = os.path.splitext(os.path.basename(video_dict['A'])) output_name = mode + "_RGBMerge_" + input_name[0][:-4] + input_name[1] out = cv2.VideoWriter(output_name, codec, frame_rate, (frame_width, frame_height)) while (capA.isOpened()): retA, frameA = capA.read() retB, frameB = capB.read() if retA == True: ## give frames an alpha channel to prepare for blending; blend_modes requires 32bit frameA = cv2.cvtColor(frameA, cv2.COLOR_BGR2BGRA, 4).astype(np.float32) frameB = cv2.cvtColor(frameB, cv2.COLOR_BGR2BGRA, 4).astype(np.float32) if mode == "difference": extraChannel = blend_modes.difference(frameA, frameB, 1) elif mode == "multiply": extraChannel = blend_modes.multiply(frameA, frameB, 1) else: extraChannel = np.zeros((frame_width, frame_height, 3), np.uint8) extraChannel = cv2.cvtColor(extraChannel, cv2.COLOR_BGR2BGRA, 4).astype(np.float32) ## get rid of alpha channel in preparation for converting back to grayscale; opencv prefers 8bit frameA = cv2.cvtColor(frameA, cv2.COLOR_BGRA2BGR).astype(np.uint8) frameB = cv2.cvtColor(frameB, cv2.COLOR_BGRA2BGR).astype(np.uint8) extraChannel = cv2.cvtColor(extraChannel, cv2.COLOR_BGRA2BGR).astype(np.uint8) ## convert to grayscale so we can merge into 3-channel image frameA = cv2.cvtColor(frameA, cv2.COLOR_BGR2GRAY) frameB = cv2.cvtColor(frameB, cv2.COLOR_BGR2GRAY) extraChannel = cv2.cvtColor(extraChannel, cv2.COLOR_BGR2GRAY) ## merge, show and write merged = cv2.merge((extraChannel, frameB, frameA)) cv2.imshow('merged', merged) out.write(merged) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break capA.release() capB.release() out.release() cv2.destroyAllWindows() print("done!")
def apply_scan_filter( image: Image, texture: Image, position: Tuple[int, int], limit: int=10, opacity: float=0.7) -> Image: """ Merge a list of images into a gif. Parameters ---------- image: Image. A list of image objects, one for each frame. Returns ------- out: None. """ __x, __y = position __wi, __hi = image.size __wt, __ht = texture.size # add a random move_around as noise # to simulate the imperfect alignment of a picture in a scanner __dx, __dy = move_around( x=__x, y=__y, w1=__wi, h1=__hi, w2=__wt, h2=__ht, limit=limit) # don't move more than 10 pixels along each axis # crop the texture to match the input image size __mask = texture.crop(( __x + __dx, __y + __dy, __x + __dx + __wi, __y + __dy + __hi)) # blend the mask and the input image __filtered = pil.fromarray(np.uint8(difference( np.array(image).astype(float), np.array(__mask).astype(float), opacity=opacity))) # sharpen the result return ops.invert(ops.invert(__filtered.convert('RGB')).filter(fil.MaxFilter(3))).convert('RGBA')
# imgOut = blend_modes.darken_only(imgA, imgB, 1.0) # imgOut = blend_modes.multiply(imgA, imgB, 1.0) # imgOut = blend_modes.hard_light(imgA, imgB, 0.9) # imgOut = blend_modes.difference(imgA, imgB, 1.0) # imgOut = blend_modes.subtract(imgA, imgB, 1.0) # imgOut = blend_modes.grain_extract(imgA, imgB, 1.0) # imgOut = blend_modes.divide(imgA, imgB, 1.0) ## # imgOut = blend_modes.grain_merge(imgA, imgB, 1.0) # imgOut = blend_modes.grain_merge(imgOut, imgB, 1.0) ## # imgOut = blend_modes.difference(imgA, imgB, 1.0) # imgOut = blend_modes.overlay(imgOut, imgB, 1.0) ## # imgOut = blend_modes.overlay(imgA, imgB, 1.0) imgOut = blend_modes.difference(imgA, imgB, 0.1) imgOut = blend_modes.overlay(imgOut, imgB, 1.0) imgOut = blend_modes.overlay(imgOut, imgB, 1.0) # imgOut = blend_modes.overlay(imgOut, imgB, 1.0) # imgOut = blend_modes.overlay(imgOut, imgB, 1.0) # imgOut = blend_modes.overlay(imgOut, imgB, 1.0) # imgOut = blend_modes.overlay(imgOut, imgB, 1.0) ## # imgOut = blend_modes.overlay(imgOut, imgB, 0.5) # imgOut = blend_modes.multiply(imgOut, imgB, 1.0) # imgOut = blend_modes.dodge(imgOut, imgB, 0.8) # imgOut = blend_modes.addition(imgOut, imgB, 1.0) # imgOut = blend_modes.soft_light(imgOut, imgB, 1.0) # imgOut = blend_modes.hard_light(imgOut, imgB, 1.0) ## # Save images