Example #1
0
"""
# Face Recognition by OpenCV-Python (py-2.7)
# urllib.request.urlopen('http://216.58.192.142',timeout=1)
# https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml = https://bit.ly/3jFEy0v
#"""
print(__doc__)

import os
import cv2
import urllib.request
import matplotlib.pyplot as plt
import numpy as np

from _path import (get_cut_dir, stop_if_none)

dir_dnn = get_cut_dir('drowsiness_detect') + 'src_dnn\\'
describe_str = "Here, 'Haar Cascade' Founds '{0}' faces!"

model = 'haarcascade_frontalface.xml'
model_dir = dir_dnn + model

IMG_URL = "https://bit.ly/3hYctB3"  # telegraph - ski resort
IMG_URL = 'https://bit.ly/3509nsL'  # a baby with beard
IMG_URL = 'https://bit.ly/3gUZIWJ'  # many faces inc'l a beard man


def main():
    check_model_exist(model, dir_dnn)
    find_faces(IMG_URL)

Example #2
0
#     * Learn = 'res10_300x300_ssd_iter_140000_fp16.caffemodel'
#       - add to .gitIgnore
from __future__ import print_function

print(__doc__)

import sys
import time
import hashlib
import xml.etree.ElementTree as ET

from urllib.request import urlopen
from _path import get_cut_dir

# Weight 를 읽어 오는 곳
dir_dnn = get_cut_dir('catcam') + 'src_dnn\\'

xml_meta = dir_dnn + 'weights.meta4'


def main():
    sys.exit(0 if MetalinkDownloader(dir_dnn).download(xml_meta) else 1)


class HashMismatchException(Exception):
    def __init__(self, expected, actual):
        Exception.__init__(self)
        self.expected = expected
        self.actual = actual

    def __str__(self):
Example #3
0
"""
# IMAGE LABELLING : COIN-LABELLING -
"""
# dst = destination_image

import sys
import random
import numpy as np
import cv2

from _path import get_cut_dir

dir_src = get_cut_dir('openCV_TAcademy') + '/src/'
src = cv2.imread(dir_src + 'coins.png', cv2.IMREAD_GRAYSCALE)

# 이미지 로딩 실패시 시스템 종료!
if src is None:
    print('Image load failed!')
    sys.exit()

# 원본 이미지의 크기
h, w = src.shape[:2]

# 같은 사이즈의 빈 이미지 2개 생성
dst1 = np.zeros((h, w, 3), np.uint8)
dst2 = np.zeros((h, w, 3), np.uint8)

# 전처리
src = cv2.blur(src, (3, 3))
_, src_bin = cv2.threshold(src, 0, 255, cv2.THRESH_OTSU)
Example #4
0
"""
# namecard test : namecard1, 2 - what's the best threshold value?
"""
print(__doc__)

import sys
import cv2

from _path import get_cut_dir
dir_home = get_cut_dir('openCV_TAcademy')

img = cv2.imread(filename=dir_home + '/src/namecard1.jpg')

if img is None:
    print('image loaing is failed!')
    sys.exit()

# dsize=destination-size = pixels x,y
# img = cv2.resize(img, (640, 480))   # WHY NOT KICK-IN?
img = cv2.resize(img, (0, 0), fx=0.4, fy=0.4)  # WHY NOT KICK-IN?
winname1 = 'imgRGB'
cv2.imshow(winname1, img)
cv2.moveWindow(winname1, x=0, y=130)

img_gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
winname2 = 'grayScale'
cv2.imshow(winname2, img_gray)
cv2.moveWindow(winname2, x=0, y=500)

# Threashold value = 130, maxVal=255 | OTSU-Algorithm
# _, img_bin = cv2.threshold(img_gray, 130, 255, cv2.THRESH_BINARY)
Example #5
0
"""
# GOOGLE-NET Image CLASSIFICATION :
"""
print(__doc__)

import os
import sys
import cv2
import numpy as np

from _path import get_cut_dir, stop_if_none

dir_dnn = get_cut_dir('classify') + 'src_dnn\\'
dir_img = get_cut_dir('classify') + 'src_img\\'

font_color = (0, 51, 249) # BGR <- rgb(249, 51, 0)

files = [ filename
            for filename in os.listdir(dir_img)
            if filename.split('.')[-1] == 'jpg']

ask_sheets = {}
for i, filename in enumerate(files):
    ask_sheets[str(i)] = ['id', 0, filename]


model = dir_dnn + 'bvlc_googlenet.caffemodel'
config = dir_dnn + 'deploy.prototxt'
classes = dir_dnn + 'classification_classes_ILSVRC2012.txt'

# Load class names
Example #6
0
import io
import sys

sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf8')

import PIL
import easyocr
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

from PIL import ImageDraw
from _path import (get_cut_dir, stop_if_none)

dir_home = get_cut_dir('easyOCR_example')

dir_ocr = dir_home + 'easyOCR\\'
dir_read = dir_home + 'readOCR\\'
dir_result = dir_home + 'resultOCR\\'

# filename = 'korean.png'                 #  5 recogs - dir_ocr
# filename = 'english.png'                # 11 recogs - dir_ocr

# filename = 'namecard_extracted.png'     #  7 recogs - dir_read
# filename = 'booksOrig000.jpg'           #  95 recogs  - dir_read
# filename = 'booksOrig090.jpg'           # 95 recogs - dir_read
filename = 'news_daum.png'  # 41 recogs - dir_read
# filename = 'AI_compete_2020_KOGAS.jpg'  # 72 recogs - dir_read

filename_w_dir = dir_read + filename
Example #7
0
"""
# simple video capture & edge detection
"""

import sys
import cv2
from _path import get_cut_dir

dir_home = get_cut_dir('openCV_TAcademy_reboot')

# 카메라로부터 cv2.VideoCapture 객체 생성
cap = cv2.VideoCapture(0)
video_on = cap.isOpened()

if not video_on:
    print("Camera open failed!")
    sys.exit()

# 프레임 해상도 출력
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print(f'width x height = [ {width} x {height} ]')

# 새로운 프레임 사이즈로 변경
video_scale = 0.7
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width * video_scale)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height * video_scale)
cap.set(cv2.CAP_PROP_FPS, 30)

# 프레임 해상도 출력
print(f'width x height = [ {width * video_scale} x {height * video_scale} ]')
Example #8
0
"""
# tensorflow==2.2 or higher version is needed!
# 0. 사용할 패키지 불러오기

import numpy as np

from numpy import argmax
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation

from _path import get_cut_dir

modelname = 'mnist_mlp_model.h5'
dir_src = get_cut_dir('handwriting_mnist') + 'src\\'

# 1. 데이터셋 생성하기
# 훈련셋과 시험셋 불러오기
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# 데이터셋 전처리
x_train = x_train.reshape(60000, 784).astype('float32') / 255.0
x_test = x_test.reshape(10000, 784).astype('float32') / 255.0

# 원핫인코딩 (one-hot encoding) 처리
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

# 훈련셋과 검증셋 분리
x_val = x_train[:42000] # 훈련셋의 30%를 검증셋으로 사용
Example #9
0
"""
#     * model = 'opencv_face_detector_uint8.pb'
#     * Learn = 'res10_300x300_ssd_iter_140000_fp16.caffemodel'
#       - add to .gitIgnore
from __future__ import print_function

import sys
import time
import hashlib
import xml.etree.ElementTree as ET

from urllib.request import urlopen
from _path import get_cut_dir

# Weight 를 읽어 오는 곳
dir_dnn = get_cut_dir('dnnface') + 'src_dnn\\'

xml_meta = dir_dnn + 'weights.meta4'

def main():
    sys.exit(0 if MetalinkDownloader(dir_dnn).download(xml_meta) else 1)


class HashMismatchException(Exception):
    def __init__(self, expected, actual):
        Exception.__init__(self)
        self.expected = expected
        self.actual = actual
    def __str__(self):
        return 'Hash mismatch: {} vs {}'.format(self.expected, self.actual)
Example #10
0
# DNN - FACE DETECTOR -> GIVES EVERY CAT-EARS on the HEAD!
# FACE-DNN = openCV_FACE_DETECTOR
"""
# RUN at CMD-Terminal mode : OK!
# Not Script-Run mode = WARNING appear
#   -> [FIX] : Script-Run = NOW OK!

print(__doc__)

import sys
import cv2
import numpy as np

from _path import get_cut_dir, stop_if_none

dir_src = get_cut_dir('openCV_TAcademy_reboot') + 'src/'
dir_dnn = get_cut_dir('catcam') + 'src_dnn/'
dir_img = get_cut_dir('catcam') + 'src_img/'

model = dir_dnn + 'opencv_face_detector_uint8.pb'
config = dir_dnn + 'opencv_face_detector.pbtxt'

# IMAGE OBJECT LOAD! : IF OBJECT == NONE -> ERROR!
cat = cv2.imread(dir_img + 'ears_cat.png', cv2.IMREAD_UNCHANGED)
cat = stop_if_none(cat, message='Image open failed!')

# CAMERA OBJECT LOAD! : IF OBJECT == NONE -> ERROR!
# cap = cv2.VideoCapture(0)

# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
Example #11
0
"""
# DNN FACE DETECTOR : CAMERA REAL-TIME
# FACE-DNN = RES10_300x300_SSD_fp16
"""
# learnopencv/res10_300x300_ssd_iter_140000_fp16.caffemodel
#  https://bit.ly/3b5ZJ8Z

print(__doc__)

import cv2
from _path import get_cut_dir, stop_if_none

dir_src = get_cut_dir('openCV_TAcademy') + 'src\\'
dir_dnn = get_cut_dir('classify') + 'src_dnn\\'
dir_img = get_cut_dir('classify') + 'src_img\\'

# model = dir_dnn + 'opencv_face_detector_uint8.pb'
# config = dir_dnn + 'opencv_face_detector.pbtxt'

model = dir_dnn + 'res10_300x300_ssd_iter_140000_fp16.caffemodel'
config = dir_dnn + 'deploy.prototxt'

# if not cap.isOpened():
cap = cv2.VideoCapture(0)
cap = stop_if_none(cap, message='Camera open failed!')

# if net.empty():
net = cv2.dnn.readNet(model, config)
net = stop_if_none(net, 'Net open failed!')

while True: