# 导入所需要的库
from cv2 import cv2
import numpy as np
  
# 定义保存图片函数
# image:要保存的图片名字
# addr;图片地址与相片名字的前部分
# num: 相片,名字的后缀。int 类型
def save_image(image,addr,num):
  address = addr + str(num)+ '.jpg'
  cv2.imwrite(address,image)
  
# 读取视频文件
videoCapture = cv2.VideoCapture("input.mp4")
# 通过摄像头的方式
# videoCapture=cv2.VideoCapture(1)
#读帧
success, frame = videoCapture.read()
i = 0
timeF = 6
j=0
while success :
  i = i + 1
  if (i % timeF == 0):
    j = j + 1
    save_image(frame,'./pic/image',j)
    print('保存第:',i)
  success, frame = videoCapture.read()
コード例 #2
0
from cv2 import cv2
import numpy as np
from keras.models import load_model
import matplotlib.pyplot as plt
from PIL import Image

cam_id = 0
cap = cv2.VideoCapture(cam_id)
cap.set(3, 640)
cap.set(4, 480)
cap.set(10, 50)

myColors = [[166, 145, 128, 179, 255, 255], [97, 190, 202, 114, 255,
                                             255]]  #[[red],[blue]]

myColorValues = [[34, 34, 242], [242, 208, 34]]  #BGR format

myPoints = []  #[x,y,colorId]

model = load_model("Mymodel.h5")


def findColor(img, myColor, myColorValues):
    imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    count = 0
    newPoints = []
    for color in myColors:
        lower = np.array(color[0:3])
        upper = np.array(color[3:6])
        mask = cv2.inRange(imgHSV, lower, upper)
        x, y = getContours(mask)
コード例 #3
0
from primaryFun import *
from otsu_threshold import Bodyskin_Detect_Otsu
from MLP_predict import Predict
from cv2 import cv2
import numpy as np
import scfun
from time import perf_counter

last_time = 0
R = 70
length = 30
ges_num_buf = [0] * length

capture = cv2.VideoCapture(0)  #启动摄像头
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

while (1):

    ret, frame = capture.read()
    if ret == False:
        print("无法获取帧")

    sta = perf_counter()
    skin = Bodyskin_Detect_Otsu(frame)
    fin = perf_counter()

    print(fin - sta)

    contour_ls, convexHull, Hu_list = Find_Contour([skin])
コード例 #4
0
 def fps(self):
     if self._fps is None:
         cap = cv2.VideoCapture(self.file_path)
         self._fps = cap.get(cv2.CAP_PROP_FPS)
     return self._fps
コード例 #5
0
    def collectVideoData(self,
                         video_url: str,
                         cameraMapping: CameraMapping,
                         attribute_arrs: VehicleAttributes,
                         conf_threshold: float = 0.5) -> VehicleAttributes:
        '''Detects objects and collects the different attributes of a video and the vehicles in the video.
        Parameters
        ---
        video_url : string
            A reference to the video location to read.
        attribute_arrs : VehicleAttributes
            Appends any new data to the attribute list.
        conf_threshold : float
            The confidence threshold to accept an object as identified. If the object has a confidence less than the threshold, then no bounding box is drawn.
        Returns
        ---
        collectVideoData : VehicleAttributes
            A container for the different attributes of detected images, videos and their detected vehicles.
        '''
        video = cv2.VideoCapture(video_url)

        # Read the first frame
        response, frame = video.read()
        attribute_arrs.updateImageSize(frame.shape[:2])
        attribute_arrs.updateImageReference(frame)
        frame_number = 0
        car_counter = VehicleCounter(frame.shape[:2],
                                     video.get(cv2.CAP_PROP_FPS),
                                     cameraMapping)

        # Loop while there is a frame
        while response:

            # Get detections for the frame
            matches, _, class_ids = self.get_img_detections(
                frame, conf_threshold=conf_threshold)
            sizes = [(math.sqrt(w**2 + h**2)) for _, _, w, h in matches]

            # Update the car counter
            car_counter.update_count(matches, frame_number, frame, draw=False)

            attribute_arrs.appendValue(VehicleAttributes.ATTR_SIZE, sizes)
            attribute_arrs.appendValue(VehicleAttributes.ATTR_NOV,
                                       [len(matches)])
            attribute_arrs.appendValue(
                VehicleAttributes.ATTR_CLA,
                [str(self.classes[class_id]) for class_id in class_ids])

            frame_number += 1
            response, frame = video.read()
        car_counter.finalize_vehicles()

        for veh in car_counter.persistent_vehicles:
            veh.speed.insert(0, veh.speed[0])
            veh.angles.insert(0, veh.angles[0])
            attribute_arrs.appendValue(VehicleAttributes.ATTR_LOC,
                                       veh.positions)
            attribute_arrs.appendValue(VehicleAttributes.ATTR_SPE, veh.speed)
            attribute_arrs.appendValue(VehicleAttributes.ATTR_DIR, veh.angles)

        video.release()
        return attribute_arrs
コード例 #6
0
def get_length(filename):
    cap = cv2.VideoCapture(filename)
    fps = cap.get(cv2.CAP_PROP_FPS)  # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    duration = frame_count / fps
    return duration
コード例 #7
0
def generate_input(video_path):
    print('Generate output')

    if not os.path.exists('input'):
        os.makedirs('input')

    cap = cv2.VideoCapture(video_path)

    # Check if camera opened successfully
    if not cap.isOpened():
        print("Error opening video stream or file")

    # Read until video is completed
    i = 0
    input_array = []
    test_input_array = []
    while cap.isOpened():
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret:
            i = i + 1

            if i == (100 + input_size + test_size):
                break

            print('Currently working on ' + str(i))
            if i < 100:
                print('Continue: ' + str(i))
                continue

            # Display the resulting frame
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            array = []
            for j in tqdm(range(0, frame.shape[0] // factor)):
                temp_array = []
                for k in range(0, frame.shape[1] // factor):
                    temp = np.array(frame)
                    temp = temp[j * factor:(j + 1) * factor,
                                k * factor:(k + 1) * factor]
                    temp = temp.reshape(factor * factor).astype('float32')
                    temp /= 255

                    if temp_array == []:
                        temp_array = np.array([temp])
                    else:
                        temp_array = np.append(temp_array, [temp], axis=0)

                if array == []:
                    array = np.array(temp_array)
                else:
                    array = np.append(array, temp_array, axis=0)
            # cv2.imshow('Frame', frame)

            if i < (100 + input_size):
                # print('Filling train input data: '+str(i))
                if input_array == []:
                    input_array = array
                else:
                    input_array = np.append(input_array, array, axis=0)
            elif i < (100 + input_size + test_size):
                if test_input_array == []:
                    test_input_array = array
                else:
                    test_input_array = np.append(test_input_array,
                                                 array,
                                                 axis=0)

            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        # Break the loop
        else:
            break

    # When everything done, release the video capture object
    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()

    np.save('input/input', input_array)
    np.save('input/input_test', test_input_array)
コード例 #8
0
 def __init_video_capture(self, camera_idx: int, frame_w: int,
                          frame_h: int):
     self.vidCapture = cv2.VideoCapture(camera_idx)
     self.vidCapture.set(cv2.CAP_PROP_FRAME_WIDTH, frame_w)
     self.vidCapture.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_h)
コード例 #9
0
    print ("Error on ip address input")
    sys.exit()

# Specify the type messaging and receiver ip address
# 1 for REQ/REP messaging
# 2 for PUB/SUB messaging
if (int(sys.argv[1])) == 1 :
    sender = imagezmq.ImageSender(connect_to=ipAddress)
elif (int(sys.argv[1])) == 2 :
    sender = imagezmq.ImageSender(connect_to='tcp://*:6666', REQ_REP=False)

# send sender hostname with each image
sender_name = socket.gethostname()  

# Opening webcam, you could change to a local video if you want
cap = cv2.VideoCapture('test_video.mp4')
time.sleep(2.0)  # allow camera some time before sending

try:
    while True:  # send images as stream until Ctrl-C
        ret, image = cap.read()
        
        # Put processing of image before sending if you want below here
        # for example, rotation, ROI selection, conversion to grayscale, etc.

        # now = datetime.now() # Get timestamp of when the image is sent
        # current_time = now.strftime("%d/%m/%y %H:%M:%S.%f") # Change datetime to string

        msg = [sender_name, datetime.now().strftime("%d/%m/%y %H:%M:%S.%f")]  # Put sender name and timestamp on one message

        # Send images and timestamp and save the reply from receiver if you are using REQ/REP
コード例 #10
0
ファイル: mp_pose.py プロジェクト: CyberMaryVer/detectron-doc
        # if _TRACKING_DEBUG:
        #     import pickle as pk
        #     with open("tracking.pickle", "wb") as f:
        #         pk.dump(video.tracking, f)
        #     print(len(video.tracking), video.tracking[0])

    elif _EXERCISE is not None:

        task_finished = False
        output_video = None
        keypoint_sets = []
        frame = 0

        if _WEBCAM:
            cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)

            while cv2.waitKey(1) != 27:
                ret, frame = cam.read()

                try:
                    outputs = predictor.get_keypoints(frame)
                    print(len(outputs))
                    kps = get_updated_keypoint_dict(outputs)
                    frame = visualize_keypoints_mp(kps,
                                                   frame,
                                                   skeleton=_CONNECTIONS,
                                                   dict_is_updated=True,
                                                   threshold=.8,
                                                   side=_SIDE,
                                                   mode=_MODE,
コード例 #11
0
    #server = smtplib.SMTP_SSL(smtp_server, portssl)
    server = smtplib.SMTP(smtp_server, port)
    server.ehlo_or_helo_if_needed()
    server.starttls()
    server.ehlo_or_helo_if_needed()
    ret, m = server.login(userid, passwd)
    if ret != 235:
        print("login fail")
        return
    server.sendmail('me', to, msg.as_string())
    server.quit()

if __name__ == "__main__":
    thresh = 16
    cam = cv.VideoCapture(0)
    cam.set(cv.CAP_PROP_FRAME_WIDTH, 320)
    cam.set(cv.CAP_PROP_FRAME_HEIGHT, 240)
    if cam.isOpened() == False:
        print("Cam isn't opened")
        exit()
    i = [None, None, None]
    for n in range(3):
        i[n] = motion_picv.getGrayCameraImage(cam)
    flag = False
    checkFlag = 0
    while True:
        diff = motion_picv.diffImage(i)
        ret,thrimg=cv.threshold(diff, thresh, 1, cv.THRESH_BINARY)
        count = cv.countNonZero(thrimg)
        # if invader is checked
コード例 #12
0
# Put here the same .cfg file that the training
config_path = "cfg/yolov4-csp.cfg"
weights_path = "weights/yolov4-csp.weights"
font_scale = 1
thickness = 1
LABELS = open(".names/coco.names").read().strip().split("\n")
print(LABELS)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")

net = cv2.dnn.readNetFromDarknet(config_path, weights_path)

ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

cap = cv2.VideoCapture(
    "rtsp://*****:*****@192.168.0.105:554/cam/realmonitor?channel=1&subtype=0"
)
# cap = cv2.VideoCapture(0)

while True:
    _, image = cap.read()

    h, w = image.shape[:2]
    blob = cv2.dnn.blobFromImage(image,
                                 1 / 255.0, (416, 416),
                                 swapRB=True,
                                 crop=False)
    net.setInput(blob)
    start = time.perf_counter()
    layer_outputs = net.forward(ln)
    # time_took = time.perf_counter() - start
コード例 #13
0
from cv2 import cv2 as cv
import mediapipe as mp
import time

cap = cv.VideoCapture(0)

mpHands = mp.solutions.hands
hands = mpHands.Hands()
mpDraw = mp.solutions.drawing_utils

ptime = 0
ctime = 0

while True:
    success, img = cap.read()

    imgRGB = cv.cvtColor(img, cv.COLOR_BGR2RGB)
    results = hands.process(imgRGB)

    #print(results.multi_hand_landmarks)
    if (results.multi_hand_landmarks):
        for handlms in results.multi_hand_landmarks:
            # for (pid,lm) in enumerate(handlms.landmark):
            # print(pid,lm)
            # (h,w,c) = img. shape
            # (cx,cy) = int(lm.x*w),int(lm.y*h)
            # print(cx,cy)
            # if(pid == 0):
            #     cv.circle(img,(cx,cy),10,(255,0,255),cv.FILLED)

            mpDraw.draw_landmarks(img, handlms, mpHands.HAND_CONNECTIONS)
コード例 #14
0
ファイル: camera.py プロジェクト: valorun/CIPHER-vision
 def __init__(self):
     self.vc = cv2.VideoCapture(0)
     self.post_process = []
     self.opened = False
コード例 #15
0
ファイル: main2.py プロジェクト: Ansukalenka/Code-Ground
    if lines is not None:
        for x1, y1, x2, y2 in lines:
            cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 15)
    return line_image


def region_of_interest(image):
    height = image.shape[0]
    polygons = np.array([(200, height), (1100, height), (550, 250)])
    mask = np.zeros_like(image)
    cv2.fillPoly(mask, np.array([polygons], dtype=np.int32), 255)
    masked_image = cv2.bitwise_and(image, mask)
    return masked_image


cap = cv2.VideoCapture('E:/VSgarage/Lane line detection/test2.mp4')

while (cap.isOpened()):

    _, frame = cap.read()
    canny_image = canny(frame)
    cropped_image = region_of_interest(canny_image)
    lines = cv2.HoughLinesP(cropped_image,
                            2,
                            np.pi / 180,
                            100,
                            np.array([()]),
                            minLineLength=40,
                            maxLineGap=5)
    averaged_lines = average_slope_intercept(frame, lines)
    line_image = display_lines(frame, averaged_lines)
コード例 #16
0
from cv2 import cv2
from matplotlib import pyplot

filename = "traffic1.avi"
capture = cv2.VideoCapture(filename)
reference_frame = None
image_area = None


def build_image_blur(image):
    return cv2.medianBlur(image, 31)


def build_image_gray(image):
    return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)


def build_image_threshold(image):
    _, threshold = cv2.threshold(image, 20, 255, cv2.THRESH_BINARY)

    return threshold


def build_contours(image):
    filtered_contours = []
    contours, _ = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
    for i in contours:
        filtered_contours.append(i)

    return contours
コード例 #17
0
ファイル: video_to_frames.py プロジェクト: Senwang98/MOF
    city = i.split('/')[0]
    video_number = i.split('/')[1]
    print(city)
    print(video_number)

    city_dir = ''.join([
        save_path,
        city,
    ])
    if not os.path.exists(city_dir):
        os.mkdir(city_dir)
    video_dir = ''.join([city_dir, '/', video_number])
    if not os.path.exists(video_dir):
        os.mkdir(video_dir)

    tmp_save_path = '/home/wangsen/ws/video2frame/' + i + '/'
    video_path = '/home/wangsen/ws/citywalks/clips/' + i
    cap = cv2.VideoCapture(video_path)

    n = 0
    while (1):
        ret, frame = cap.read()
        if ret:
            cv2.imshow('', frame)
            cv2.waitKey(30)
            cv2.imwrite(tmp_save_path + str(n) + '.png', frame)
            n = n + 1
            print("Processing: ", i, " ---- ", n)
        else:
            break
    cap.release()
コード例 #18
0
 def __init__(self):
     self.cap = cv2.VideoCapture(0)
     print("camera warming up")
コード例 #19
0
parser = argparse.ArgumentParser(description='Search face in a Video.')
parser.add_argument("image_name",help="path of image with a single face.",type=file_exists)
parser.add_argument("video_name",help="path of video to search the provided face.",type=file_exists)
parser.add_argument("--process",help="Number of process,default is 2",type=int,default=2)

args = parser.parse_args()


from multiprocessing import Queue, Process, Value
from cv2 import cv2
from PIL import Image, ImageDraw
import face_recognition

image = face_recognition.load_image_file(args.image_name)
video = cv2.VideoCapture(args.video_name)
frames = Queue()
matched_frames = Queue()
no_of_frames = Value('i')
processed_frames = Value('i')
no_of_process = args.process

face_encoding = face_recognition.face_encodings(image)[0]

def display_frame(frame,face_locations=None):
    image = Image.fromarray(frame)
    if face_locations is None:   
        draw = ImageDraw.Draw(image)
        for (top,right,bottom,left) in face_locations:
            draw.rectangle(((left,top),(right,bottom)), outline=(0,255,0))
        del draw
コード例 #20
0
  elif trackerType == trackerTypes[7]:
    tracker = cv2.TrackerCSRT_create()
  else:
    tracker = None
    print('Incorrect tracker name')
    print('Available trackers are:')
    for t in trackerTypes:
      print(t)
     
  return tracker

# Set video to load
videoPath = "videos/run.mp4"
 
# Create a video capture object to read videos
cap = cv2.VideoCapture(videoPath)
 
# Read first frame
success, frame = cap.read()
# quit if unable to read the video file
if not success:
  print('Failed to read video')
  sys.exit(1)


## Select boxes
bboxes = []
colors = [] 
 
# OpenCV's selectROI function doesn't work for selecting multiple objects in Python
# So we will call this function in a loop till we are done selecting all objects
コード例 #21
0
 def init_hardware():
     web_cam = cv2.VideoCapture(0)
     return web_cam
コード例 #22
0
    return line_image


# image = cv2.imread('assets/test_image.jpg')
# lane_image = np.copy(image)
# canny_image = canny(lane_image)
# cropped_image = region_of_interest(canny_image)
# lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=5)
# averaged_lines = average_slope_intercept(lane_image, lines)
# line_image = display_lines(lane_image, averaged_lines)
# combo_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)

# cv2.imshow("result", combo_image)
# cv2.waitKey(0)

cap = cv2.VideoCapture("assets/test2.mp4")
while (cap.isOpened()):
    _, frame = cap.read()

    canny_image = canny(frame)
    cropped_image = region_of_interest(canny_image)
    lines = cv2.HoughLinesP(cropped_image,
                            2,
                            np.pi / 180,
                            100,
                            np.array([]),
                            minLineLength=40,
                            maxLineGap=5)
    averaged_lines = average_slope_intercept(frame, lines)
    line_image = display_lines(frame, averaged_lines)
    combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
コード例 #23
0
ファイル: main.py プロジェクト: abc873693/video-to-images
        '-r', '--resize',
        action='store',
        type=int,
        default=-0,
        help='resize to resize * resize image, default 0 is original size.')

    args = parser.parse_args()
    # print('[ INFO ] Options:', args)

    source_path = args.input_path
    size = args.resize

    if not os.path.exists(out_path):
        os.makedirs(out_path)
    # '/Users/rainvisitor/Movies/黃英哲老師實驗室提供影片/可見光/濁.avi'
    vc = cv.VideoCapture(source_path)
    index = 0
    rval = vc.isOpened()
    while rval:
        index = index + 1
        rval, frame = vc.read()
        if rval:
            if(size != 0):
                frame = cv.resize(frame, (size, size),
                                  interpolation=cv.INTER_CUBIC)
            #frame = cv.rotate(frame, cv.ROTATE_90_COUNTERCLOCKWISE)
            cv.imwrite('./out/frame'+str(index).zfill(5) + '.jpg', frame)
            cv.waitKey(1)
        else:
            break
    vc.release()
コード例 #24
0
import numpy as np
#import matplotlib
#import matplotlib.pyplot as plt
#import argparse
from cv2 import cv2 as cv
import imutils
import time

# preliminary attempt at lane following system
# largely derived from: https://medium.com/pharos-production/
# road-lane-recognition-with-opencv-and-ios-a892a3ab635c

# identify filename of video to be analyzed
cap = cv.VideoCapture('datas/videos/roadway_01.mp4')

# loop through until entire video file is played
while(cap.isOpened()):

    # read video frame & show on screen
    ret, frame = cap.read()
    # cv.imshow("Original Scene", frame)

    # snip section of video frame of interest & show on screen
    if frame is None:
        cv.waitKey(0)
        break
    snip = frame[500:700, 300:900]
    cv.imshow("Snip", snip)

    # create polygon (trapezoid) mask to select region of interest
    mask = np.zeros((snip.shape[0], snip.shape[1]), dtype="uint8")
コード例 #25
0
from cv2 import cv2
import time, pandas, os
from datetime import datetime

first_frame = None
status_list = [None, None]
times = []
df = pandas.DataFrame(columns=["Start", "End"])
video = cv2.VideoCapture(0)
file_output = 'output.avi'
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(file_output, fourcc, 50.0, (int(width), int(height)))

while True:
    check, frame = video.read()
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(frame, "Motion Detector App " + str(datetime.now()), (50, 50),
                font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)

    status = 0
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    if first_frame is None:
        first_frame = gray
        continue

    delta_frame = cv2.absdiff(first_frame, gray)
    thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
コード例 #26
0
# Function: Find the midpoint between two points
def midpoint(ptA, ptB):
    return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)


# Function: Find cos(phi)
def pos(a, b, c):
    cosphi = (a**2 + c**2 - b**2) / (2 * a * c)
    y = a * float(cosphi)
    x = a * float(math.sqrt(abs(1 - cosphi * cosphi)))
    return (x, y)


#Take link of ip webcam
url = "http://192.168.0.100:8080/"
cap = cv2.VideoCapture(url + "/video")
width = 150  # Define the width of the picture frame. Unit: cm

# Define names of each possible ArUco tag OpenCV supports
ARUCO_DICT = {
    "DICT_4X4_50": cv2.aruco.DICT_4X4_50,
    "DICT_4X4_100": cv2.aruco.DICT_4X4_100,
    "DICT_4X4_250": cv2.aruco.DICT_4X4_250,
    "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
    "DICT_5X5_50": cv2.aruco.DICT_5X5_50,
    "DICT_5X5_100": cv2.aruco.DICT_5X5_100,
    "DICT_5X5_250": cv2.aruco.DICT_5X5_250,
    "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
    "DICT_6X6_50": cv2.aruco.DICT_6X6_50,
    "DICT_6X6_100": cv2.aruco.DICT_6X6_100,
    "DICT_6X6_250": cv2.aruco.DICT_6X6_250,
コード例 #27
0
from cv2 import cv2
import numpy as np

facecascade = cv2.CascadeClassifier("Resources/haarcascades/haarcascade_frontalface_default.xml")

cap = cv2.VideoCapture(0)
while True:
    success, frame = cap.read()
    grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = facecascade.detectMultiScale(grayFrame, 1.1, 4)
    for x,y,w,h in faces:
        cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 3)
    cv2.imshow("Video", frame)
    cv2.waitKey(1)
コード例 #28
0
ファイル: ocr.py プロジェクト: Fethbita/eMRTD_face_access
def capture_mrz(window: sg.Window,
                camera_id: int) -> Tuple[List[str], Image.Image]:
    """
    Capture the MRZ by using OCR and the camera footage.

    :returns: MRZ lines in a list
    """

    cap = cv2.VideoCapture(camera_id)

    tess_api = PyTessBaseAPI(init=False, psm=PSM.SINGLE_BLOCK_VERT_TEXT)
    tess_api.InitFull(
        # https://github.com/DoubangoTelecom/ultimateMRZ-SDK/tree/master/assets/models
        path="text_detection",
        lang="mrz",
        variables={
            "load_system_dawg": "false",
            "load_freq_dawg": "false",
            "tessedit_char_whitelist": "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ<",
        },
    )
    # mrz_list: List[List[str]] = []

    pool = ThreadPool(processes=1)
    ocr_running = False
    while True:
        _, frame = cap.read()

        mrz = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        mrz = cv2.adaptiveThreshold(mrz, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 21, 10)
        # mrz = cv2.adaptiveThreshold(mrz, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,3,2)
        # mrz = cv2.GaussianBlur(mrz, (5,5), 0)
        # _, mrz = cv2.threshold(mrz, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        # mrz = cv2.GaussianBlur(mrz, (5,5), 0)
        # mrz = cv2.medianBlur(mrz, 3)
        frame_shown = copy.deepcopy(mrz)
        width = 320
        height = int(frame_shown.shape[0] * (320 / frame_shown.shape[1]))
        frame_shown = cv2.resize(frame_shown, (width, height))

        alpha = 0.8
        frame_overlay = add_mrz_overlay(copy.deepcopy(frame_shown),
                                        "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<", 3,
                                        0.9, False)
        frame_overlay = add_mrz_overlay(
            frame_overlay, "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<", 2,
            0.9, True)
        cv2.addWeighted(frame_shown, alpha, frame_overlay, 1 - alpha, 0,
                        frame_shown)

        imgbytes = cv2.imencode(".png", frame_shown)[1].tobytes()
        window.write_event_value("-SHOW MRZ-", [imgbytes])

        mrz = Image.fromarray(mrz)
        if not ocr_running:
            checked_frame = Image.fromarray(frame[:, :, ::-1])
            tess_api.SetImage(mrz)
            async_result = pool.apply_async(tess_api.GetUTF8Text)
            ocr_running = True

        if async_result.ready():
            ocr_running = False
            mrz_text = async_result.get()
            result = parse_mrz_ocr(mrz_text)

            if result is not None:
                break

            # if result and len(mrz_list) < 3:
            #     mrz_list.append(result)
            # elif not result:
            #     mrz_list = []
            # else:
            #     if all(x == mrz_list[0] for x in mrz_list):
            #         break

    # When everything done, release the capture
    cap.release()
    # cv2.destroyAllWindows()
    tess_api.End()

    # return mrz_list[0]
    window.write_event_value("-HIDE MRZ-", "")

    return (result, checked_frame)
コード例 #29
0
import sys
sys.path.append(
    '/Users/Dell/AppData/Local/Programs/Python/Python37/Lib/site-packages')
# sys.path.append('/Users/Dell/AppData/Local/Programs/Python/Python37/Lib/site-packages/pytesseract')

# sys needed to import pacakges like cv2 from site-package in lib folder
from cv2 import cv2  #pip install opencv-python
import numpy as np
from PIL import Image
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:/Program Files/Tesseract-OCR/tesseract.exe'
import matplotlib.pyplot as plt

cam = cv2.VideoCapture("./WhatsApp Video 2020-06-26 at 9.48.00 AM.mp4")
licence = cv2.CascadeClassifier('./indian_license_plate.xml')

while True:
    ret, img = cam.read()
    if ret == False:
        print("Something Went Wrong!")
        continue

    key_pressed = cv2.waitKey(1) & 0xFF  #Bitmasking to get last 8 bits
    if key_pressed == ord('q'):
        break

    plates = licence.detectMultiScale(img, 1.3, 7)

    if (len(plates) == 0):
        continue
コード例 #30
0
#parser = argparse.ArgumentParser()
#parser.add_argument('--image', help = 'Path to the image to be predicted', required = True)
#parser.add_argument('--saved_model', help = 'Path of the saved model', required = True)

#args = parser.parse_args()

modelPath = os.path.join("model.h5")

model = build_model('inference', model_path=modelPath)

#cap = cv2.VideoCapture(0)

#ret, img1 = cap.read()

cap = cv2.VideoCapture('new.mp4')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
counting = 0
outputsX = ["banana", "apple", r"['salt', 'flour', 'sugar']"]
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                      10, (frame_width, frame_height))
ret, img1 = cap.read()

while (cap.isOpened()):
    # Capture frame-by-frame
    ret, img2 = cap.read()

    if ret == True:
        img2Clone = img2.copy()