Exemplo n.º 1
0
from typing import List

import sys
import cv2
import numpy as np

# import pytesseract    # ERRON SETUP! -> easyOCR
from _path import (DIR_SRC, get_cut_dir, stop_if_none)

# 영상 불러오기 : default = namecard1.jpg
filename = 'namecard1.jpg' if len(sys.argv) <= 1 else sys.argv[1]
dw, dh = (720, 400)  # 명함 왜곡보정 후 출력/저장 되는 사이즈

# 영상 로딩이 안될경우 시스템 종료!
src_RGB = cv2.imread(DIR_SRC + filename)
src_RGB = stop_if_none(src_RGB, message="image loading failed!")

# 입력 영상 전처리 = 그레이스케일 + 바이너리 이미지 만들기
src_gray = cv2.cvtColor(src_RGB, cv2.COLOR_BGR2GRAY)
_, src_bin = cv2.threshold(src_gray, 0, 255,
                           cv2.THRESH_BINARY | cv2.THRESH_OTSU)

# 출력 영상 설정
src_quards = np.array([[0, 0], [0, 0], [0, 0], [0, 0]], np.float32)

destin_quards = np.array([[0, 0], [0, dh], [dw, dh], [dw, 0]], np.float32)
destination = np.zeros((dh, dw), np.uint8)


def get_reorder_pts(pts: List[int]) -> List[int]:
    """ # re-dorder 4 point of rectangular"""
Exemplo n.º 2
0
"""

import sys
import cv2

from _path import (DIR_SRC, get_cut_dir, stop_if_none)

dir_avi = DIR_SRC + 'avi_test/'
video_name = '201907-03.mp4'  # scale = 0.35
sizeRate = 0.9
landscape = 1

# 동영상 파일로부터 cv2.VideoCapture 객체 생성

cap = cv2.VideoCapture(dir_avi + video_name)
cap = stop_if_none(cap, message="Camera open failed!")

if not cap.isOpened():
    cap = cv2.VideoCapture(0)  #Device #0 = CAMERA_ON

# 프레임 크기
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
fps = cap.get(cv2.CAP_PROP_FPS)

# 프레임 해상도, 전체 프레임수, FPS 출력
print('Frame width:', width)
print('Frame height:', height)
print('Frame count:', count)
print('FPS:', fps)
Exemplo n.º 3
0
"""
import cv2
import numpy as np

from _path import (DIR_SRC, stop_if_none)

dir_avi = DIR_SRC + 'avi_test/'
video_name = '201907-01.mp4'
# video_name = 'input.avi'

sizeRate = 0.35

# Video resource
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(dir_avi + video_name)
cap = stop_if_none(cap, message="Camera open failed!")

width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
winResize = (int(width * sizeRate), int(height * sizeRate))


def main():
    getGMG(canny=0)


def getGMG(canny=0):
    """  """
    winName0 = 'GMG / canny={}'.format(canny)
    winName1 = 'Bitwise / canny={}'.format(canny)
Exemplo n.º 4
0
) -> List[str]:
    """# image filenames, like JPG, PNG, BMP"""
    imgs = [file for file in os.listdir(dir_target)
            if len(file.split('.')) > 1 and \
                file.split('.')[-1] in exts_valid]

    [print(img) for img in imgs]
    return imgs


imgs = get_img_list(DIR_SRC)

# VARIOUS IMAGE READUNG : cv2 <-> plt
# 01 CV2-object
src = cv2.imread(DIR_SRC + 'cat.bmp', cv2.IMREAD_UNCHANGED)
src = stop_if_none(src, message='Image loading failed!')

# 02 PLT-object
src2 = plt.imread(DIR_SRC + 'cat.bmp', format='RGB')
src2 = stop_if_none(src2, message='Image loading failed!')

# 03 CV2-object convert BGR to RGB formand
srcRGB = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
srcRGB = stop_if_none(srcRGB, message='Image loading failed!')

# CHECK: img type = ndarray
print('\n\n', '# CHECK: img type = ndarray')
[print(f"TYPE = {type(obj)}") for obj in [src, src2, srcRGB]]

# matplotlib = check image on window
fig, ax = plt.subplots(
Exemplo n.º 5
0
filename = 'news_daum.png'  # 41 recogs - dir_read
# filename = 'AI_compete_2020_KOGAS.jpg'  # 72 recogs - dir_read

filename_w_dir = dir_read + filename
echofile_w_dir = dir_result + 'ECHO_' + filename.split('.')[0] + '.txt'

# TO WRITE : TERMINAL ECHO to ECHOFILE @ RESULT_DIR
echofile = open(file=echofile_w_dir, mode='w', encoding='utf8')
sys.stdout = echofile

size_targeted = (640, 400)  # resize scale
post_fix = filename.split('.')[0].split('_')[0]

# TO LOAD IMAGE
im = PIL.Image.open(filename_w_dir)
im = stop_if_none(im, message='Image loading failed!')

print('filename =', filename)
print('img size =', im.size, '\n\n')

im_resize = im.resize(size_targeted, PIL.Image.ANTIALIAS)
im_resize = stop_if_none(im_resize, message='Image loading failed!')

# TO SHOW IMAGES : ORIGINAL vs. RESIZE
# im_resize.show()
# im.show()             # NO need ... im_boxed image shows

reader = easyocr.Reader([
    'en',
    'ko',
])
Exemplo n.º 6
0
"""
# perspective arrangement :
"""
print(__doc__)

from _path import (DIR_SRC, echo, get_cut_dir, stop_if_none)

import cv2
import sys
import numpy as np

src = cv2.imread(DIR_SRC + 'namecard1.jpg')
src = stop_if_none(src, message='image loaging failed!')

# 왜곡 보정후 이미지 사이즈
w, h = 720, 400

# 소스 이미지의 pts
src_quards = np.array([[325, 307], [760, 369], [718, 611], [231, 515]],
                      np.float32)
dst_quards = np.array([[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]],
                      np.float32)

pers = cv2.getPerspectiveTransform(src_quards, dst_quards)
dst = cv2.warpPerspective(src, pers, (w, h))

cv2.imshow('src', src)
cv2.imshow('dst', dst)

cv2.waitKey()
cv2.destroyAllWindows()
Exemplo n.º 7
0
import sys
import cv2
import numpy as np

from _path import get_cut_dir, stop_if_none

dir_src = get_cut_dir('openCV_TAcademy_reboot') + 'src/'
dir_dnn = get_cut_dir('catcam') + 'src_dnn/'
dir_img = get_cut_dir('catcam') + 'src_img/'

model = dir_dnn + 'opencv_face_detector_uint8.pb'
config = dir_dnn + 'opencv_face_detector.pbtxt'

# IMAGE OBJECT LOAD! : IF OBJECT == NONE -> ERROR!
cat = cv2.imread(dir_img + 'ears_cat.png', cv2.IMREAD_UNCHANGED)
cat = stop_if_none(cat, message='Image open failed!')

# CAMERA OBJECT LOAD! : IF OBJECT == NONE -> ERROR!
# cap = cv2.VideoCapture(0)

# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)


def gstreamer_pipeline(
    sensor_id=0,
    sensor_mode=3,
    capture_width=1280,
    capture_height=720,
    display_width=1280,
    display_height=720,
Exemplo n.º 8
0
reye = cv2.CascadeClassifier(dir_dnn + 'haarcascade_righteye_2splits.xml')

lbl = ['CLOSED', 'OPEN']
face_x, face_y = 0, 0
detect_color = (30, 182, 15)  # BGR <- rgb(15, 182, 20)

model = tf.keras.models.load_model(dir_dnn + 'cnnCat2.h5')

count = 0
score = 0
thicc = 2

rpred, lpred = [-1], [-1]

cap = cv2.VideoCapture(0)
cap = stop_if_none(cap, "VIDEO LOAD FAILED!")

# 프레임 크기
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
fps = cap.get(cv2.CAP_PROP_FPS)

# 프레임 해상도, 전체 프레임수, FPS 출력
print('Frame width:', width)
print('Frame height:', height)
print('Frame count:', count)
print('FPS:', fps)

# font = cv2.FONT_HERSHEY_COMPLEX_SMALL
font = cv2.FONT_HERSHEY_SIMPLEX
Exemplo n.º 9
0
import cv2
from _path import get_cut_dir, stop_if_none

dir_src = get_cut_dir('openCV_TAcademy') + 'src\\'
dir_dnn = get_cut_dir('classify') + 'src_dnn\\'
dir_img = get_cut_dir('classify') + 'src_img\\'

# model = dir_dnn + 'opencv_face_detector_uint8.pb'
# config = dir_dnn + 'opencv_face_detector.pbtxt'

model = dir_dnn + 'res10_300x300_ssd_iter_140000_fp16.caffemodel'
config = dir_dnn + 'deploy.prototxt'

# if not cap.isOpened():
cap = cv2.VideoCapture(0)
cap = stop_if_none(cap, message='Camera open failed!')

# if net.empty():
net = cv2.dnn.readNet(model, config)
net = stop_if_none(net, 'Net open failed!')

while True:
    _, frame = cap.read()
    if frame is None:
        break

    blob = cv2.dnn.blobFromImage(frame, 1, (300, 300), (104, 177, 123))
    net.setInput(blob)
    detect = net.forward()

    detect = detect[0, 0, :, :]
Exemplo n.º 10
0
import cv2
import numpy as np

from tensorflow.keras.models import load_model
from _path import (get_cut_dir, stop_if_none)

# filename = 'hand-writing-00.jpg'     # orig - 1,083 kb
filename = 'hand-writing-01.jpg'  # LOW  -   256 kb
# filename = 'hand-writing-02.jpg'     # HIGH -   858 kb

model_file = 'model.h5'

dir_src = get_cut_dir('handwriting_mnist') + 'src\\'

img_color = cv2.imread(dir_src + filename, cv2.IMREAD_COLOR)
img_color = stop_if_none(img_color, message='image loading is failed!')

img_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)

_, img_binary = cv2.threshold(
    img_gray,
    0,
    255,
    cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU,
)

kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))

img_binary = cv2.morphologyEx(
    img_binary,
    cv2.MORPH_DILATE,