def create_app(config_object='config.DevelopmentConfig'): app = Flask(__name__, static_folder='static', template_folder='templates') app.config.from_object(config_object) app.debug = app.config['DEBUG'] db.init_app(app) db.app = app redis.init_app(app) init_signal(app) email_init_signal(app) mail.init_app(app) mail.app = app register_filter(app) if not app.debug: Sentry(app) uploads_conf(app) return app
import cv2 import numpy as np import filters class Grayscale: def __init__(self, *args, **kwargs): pass def apply(self, *args, **kwargs): frame = kwargs['frame'].astype(np.uint8) gray_frame = cv2.cvtColor(frame[:, :, :3], cv2.COLOR_BGR2GRAY) gray_frame = cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR) frame[:, :, :3] = gray_frame.astype(np.float) return frame filters.register_filter("grayscale", Grayscale)
import filters import numpy as np position_x = 0 position_y = 0 def roll(speed_x=10, speed_y=0, *args, **kwargs): global position_x, position_y frame = kwargs['frame'] position_x = (position_x + speed_x) % frame.shape[1] position_y = (position_y + speed_y) % frame.shape[0] return np.roll(frame, shift=(position_x, position_y), axis=(1, 0)) filters.register_filter("roll", roll)
import filters import numpy as np def change_alpha(alpha_change=0, alpha_min=0, alpha_max=255.0, *args, **kwargs): frame = kwargs['frame'] if frame.shape[2] == 4: frame[:, :, 3] = np.clip(frame[:, :, 3].astype(np.int16) + alpha_change, alpha_min, alpha_max) elif frame.shape[2] == 3: frame = np.clip( np.append(frame.astype(np.int16), np.ones((frame.shape[0], frame.shape[1], 1)) * 255 + alpha_change, axis=2), alpha_min, alpha_max) return frame filters.register_filter("change_alpha", change_alpha)
self.interpolation_method) if images: self.images = images self.idx = 0 self.last_frame_time = time.time() self.mtime = video_stat.st_mtime def apply(self, *args, **kwargs): self.reload_video() if not self.images: return np.zeros((self.height, self.width, 3)) if self.lazy: # If the generator is not empty, grab the next frame try: image = next(self.generator) self.images.append(image) except StopIteration: pass frame = self.images[self.idx].copy() if time.time() - self.last_frame_time > 1.0 / self.fps: self.idx = (self.idx + 1) % len(self.images) self.last_frame_time = time.time() return frame filters.register_filter("video", Video)
if dx>0: img[:, :dx] = 0 elif dx<0: img[:, dx:] = 0 return img class Hologram: def __init__(self, *args, **kwargs): pass def apply(self, *args, **kwargs): frame = kwargs['frame'].astype(np.uint8) frame[:,:,:3] = cv2.applyColorMap(frame[:,:,:3], cv2.COLORMAP_WINTER) frame[:,:,:3] = cv2.cvtColor(frame[:,:,:3], cv2.COLOR_BGR2RGB) # add a halftone effect bandLength, bandGap = 2, 3 for y in range(frame.shape[0]): if y % (bandLength+bandGap) < bandLength: frame[y,:,:3] = frame[y,:,:3] * np.random.uniform(0.1, 0.3) # add some ghosting holo_blur = cv2.addWeighted(frame[:,:,:3], 0.2, shift_image(frame[:,:,:3], 5, 5), 0.8, 0) holo_blur = cv2.addWeighted(holo_blur, 0.4, shift_image(frame[:,:,:3], -5, -5), 0.6, 0) # combine with the original color, oversaturated frame[:,:,:3] = cv2.addWeighted(frame[:,:,:3], 0.5, holo_blur, 0.6, 0) return frame.astype(np.float) filters.register_filter("hologram", Hologram)
objs = ndimage.find_objects(face_mask) min_x, min_y, max_x, max_y = np.inf, np.inf, -np.inf, -np.inf for obj in objs: min_x, min_y = min(min_x, obj[0].start), min(min_y, obj[1].start) max_x, max_y = max(max_x, obj[0].stop), max(max_y, obj[1].stop) min_x = max(0, min_x - self.padding) min_y = max(0, min_y - self.padding) max_x = min(frame.shape[0], max_x + self.padding) max_y = min(frame.shape[1], max_y + self.padding) if np.isfinite([min_x, max_x, min_y, max_y]).all(): face_mask[min_x:max_x, min_y:max_y] = 1.0 elif self.secure: # When no face is detected, anonymize everything face_mask[:, :] = 1.0 face_mask = np.expand_dims(face_mask, axis=2) if self.blur: anonymized_frame = cv2.blur(frame, (self.blur, self.blur)) else: anonymized_frame = frame anonymized_frame[:, :, :3] = 0.0 anonymized_frame[:, :, :4] = anonymized_frame[:, :, :4] * face_mask return anonymized_frame filters.register_filter("anonymize", Anonymize)
import cv2 import filters def single_color(r=255.0, g=255.0, b=255.0, *args, **kwargs): frame = kwargs['frame'] frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) kwargs['frame'] = frame return color_filter(r=r, g=g, b=b, *args, **kwargs) def color_filter(r=255.0, g=255.0, b=255.0, *args, **kwargs): frame = kwargs['frame'] frame[:, :, 0] = frame[:, :, 0] * r / 255.0 frame[:, :, 1] = frame[:, :, 1] * g / 255.0 frame[:, :, 2] = frame[:, :, 2] * b / 255.0 return frame filters.register_filter("single_color", single_color) filters.register_filter("color_filter", color_filter)
import filters import cv2 import numpy as np def noise(*args, **kwargs): frame = kwargs['frame'] noise = np.zeros((frame.shape[0], frame.shape[1], 4)) indices = (np.random.random(frame.shape[:2]) < 0.05) frame[indices, 0] = 255 frame[indices, 1] = 255 frame[indices, 2] = 255 #noise[indices,3] = 255 return frame filters.register_filter("noise", noise)
import cv2 import filters def gaussian_blur(intensity_x=5, intensity_y=-1, *args, **kwargs): frame = kwargs['frame'] if intensity_x <= 0 and intensity_y <= 0: return frame if intensity_y < 0: intensity_y = intensity_x if (intensity_x % 2) == 0: intensity_x += 1 if (intensity_y % 2) == 0: intensity_y += 1 return cv2.GaussianBlur(frame, (intensity_x, intensity_y), 0) filters.register_filter("gaussian_blur", gaussian_blur)
assert (len(matrix[1]) == 2) assert (len(offset) == 2) def apply(self, *args, **kwargs): frame = kwargs['frame'] matrix = np.zeros((3, 3)) matrix[0, :2] = self.matrix[0] matrix[1, :2] = self.matrix[1] matrix[2, 2] = 1.0 offset = self.offset + [0] if frame.shape[2] == 3: # Add alpha channel frame = np.append(frame, np.ones( (frame.shape[0], frame.shape[1], 1)) * 255.0, axis=2) frame = ndimage.affine_transform(frame, matrix=matrix, offset=offset, order=0) return frame filters.register_filter("flip", Flip) filters.register_filter("zoom", Zoom) filters.register_filter("move", Move) filters.register_filter("affine", Affine)
import cv2 import filters import numpy as np class Webcam: def __init__(self, device, *args, **kwargs): config = kwargs['config'] self.cap = cv2.VideoCapture(device) def apply(self, *args, **kwargs): frame = kwargs['frame'] shape = frame.shape success, webcam_frame = self.cap.read() if success: frame = webcam_frame[..., ::-1] frame = cv2.resize(frame, (shape[1], shape[0])) return np.array(frame) filters.register_filter("webcam", Webcam)
import filters import numpy as np class Stripes: def __init__(self, width=5, intensity=10.0, speed=1, *args, **kwargs): self.width = width self.intensity = intensity self.speed = speed self.roll_y = 0 def apply(self, *args, **kwargs): self.roll_y = (self.roll_y + self.speed) % (self.width * 2) frame = kwargs['frame'] for i in range(self.width): frame[i + self.roll_y + 0::2 * self.width,:,:3] -= \ self.intensity frame[i + self.roll_y + self.width::2 * self.width,:,:3] += \ self.intensity return np.clip(frame, 0.0, 255.0) filters.register_filter("stripes", Stripes)
def __init__(self, r=255.0, g=255.0, b=255.0, *args, **kwargs): self.color_filter = ColorFilter(r, g, b) def apply(self, *args, **kwargs): frame = kwargs['frame'].astype(np.uint8) gray_frame = cv2.cvtColor(frame[:,:,:3], cv2.COLOR_BGR2GRAY) gray_frame = cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR) frame[:,:,:3] = gray_frame kwargs['frame'] = frame.astype(np.float) return self.color_filter.apply(*args, **kwargs) class ColorFilter: def __init__(self, r=255.0, g=255.0, b=255.0, *args, **kwargs): self.r = r self.g = g self.b = b def apply(self, *args, **kwargs): frame = kwargs['frame'] frame[:,:,0] = frame[:,:,0] * self.r / 255.0 frame[:,:,1] = frame[:,:,1] * self.g / 255.0 frame[:,:,2] = frame[:,:,2] * self.b / 255.0 frame = np.clip(frame, 0.0, 255.0) return frame filters.register_filter("solid_color", SolidColor) filters.register_filter("colorize", Colorize) filters.register_filter("color_filter", ColorFilter)
import cv2 import filters def blur(intensity_x=5, intensity_y=-1, *args, **kwargs): frame = kwargs['frame'] if intensity_x <= 0 and intensity_y <= 0: return frame if intensity_y < 0: intensity_y = intensity_x return cv2.blur(frame, (intensity_x, intensity_y)) filters.register_filter("blur", blur)
self.images_path = images_path self.fps = fps self.interpolation_method = interpolation_method self.mtime = 0 self.reload_images() def reload_images(self): images, new_mtime = reload_images(self.images_path, self.mtime, self.width, self.height, self.interpolation_method) if images: self.images = images self.mtime = new_mtime self.idx = 0 self.last_frame_time = time.time() def apply(self, *args, **kwargs): self.reload_images() frame = self.images[self.idx].copy() if time.time() - self.last_frame_time > 1.0 / self.fps: self.idx = (self.idx + 1) % len(self.images) self.last_frame_time = time.time() return frame filters.register_filter("image", Image) filters.register_filter("image_sequence", ImageSequence)
frame = np.clip(np.append( frame, np.ones((frame.shape[0], frame.shape[1], 1)) * 255 + \ self.alpha_change, axis=2), self.alpha_min, self.alpha_max) return frame class ChromaKey: def __init__(self, r=0.0, g=255.0, b=0.0, fuzz=10.0, *args, **kwargs): self.rgb_from = np.clip( np.array([r - fuzz, g - fuzz, b - fuzz]), 0, 255) self.rgb_to = np.clip( np.array([r + fuzz, g + fuzz, b + fuzz]), 0, 255) def apply(self, *args, **kwargs): frame = kwargs['frame'] if frame.shape[2] == 3: # Add alpha channel frame = np.append(frame, np.ones((frame.shape[0], frame.shape[1], 1)) * 255.0, axis=2) frame[np.min( (frame[:,:,:3] >= self.rgb_from) & (frame[:,:,:3] <= self.rgb_to), axis = 2), 3] = 0 return frame filters.register_filter("change_alpha", ChangeAlpha) filters.register_filter("chroma_key", ChromaKey)