Beispiel #1
0
def main():

    print('Creating video capture')

    cap = cv2.VideoCapture(1)

    while cap.isOpened():
        timer = cv2.getTickCount()
        have_frame, showFrame = cap.read()
        if have_frame:
            pipeline.process(showFrame)
            extra_processing(showFrame)

        cv2.putText(showFrame, "FPS : " + str(int(cv2.getTickFrequency() / (cv2.getTickCount() - timer))), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
        cv2.imshow("match?", showFrame)
        cv2.waitKey(1)
    print('Capture closed')
Beispiel #2
0
import numpy as np


def access_pixels(image):
    print(image.shape)
    height = image.shape[0]
    width = image.shape[1]
    channels = image.shape[2]
    print("width:%s,height:%s,channels:%s" % (width, height, channels))
    for row in range(height):
        for col in range(width):
            for c in range(channels):
                pv = image[row, col, c]
                image[row, col, c] = 255 - pv
    cv.imshow("pixels_demo", image)


def creat_image():
    img = np.zeros([400, 400, 3], np.uint8)
    img[:, :, 0] = np.ones([400, 400]) * 255
    cv.imshow("new image", img)


src = cv.imread("C:/Users/32936/Desktop/2/1.jpg")
t1 = cv.getTickCount()
creat_image()
t2 = cv.getTickCount()
print("time%s" % ((t2 - t1) / cv.getTickFrequency()))
cv.waitKey(0)

cv.destroyAllWindows()
Beispiel #3
0
def clock():
    return cv.getTickCount() / cv.getTickFrequency()
Beispiel #4
0
def videoCap():
    # pytesseract.pytesseract.tesseract_cmd = "C:/Program Files/Tesseract-OCR/tesseract"
    cap = cv2.VideoCapture(
        'C:/Users/jeawa/Desktop/project/LPR_Project/project/video/test_car.mp4'
    )

    count = 0
    before_chars = ""

    while True:
        el = cv2.getTickCount()
        fps = cap.get(5)

        ret, img_ori = cap.read()

        height, width, channel = img_ori.shape  # 높이, 너비, 채널 확보

        gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

        # img_ori = cv2.imread('LPR_Project/project/image/3.jpg')  # 이미지 불러오기
        # gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

        img_blurred = cv2.GaussianBlur(gray, ksize=(3, 3), sigmaX=0)  # 노이즈 제거

        img_thresh = cv2.adaptiveThreshold(
            img_blurred,
            maxValue=255.0,
            adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
            thresholdType=cv2.THRESH_BINARY_INV,
            blockSize=19,  # 12 통과 15,20
            C=9)

        contours, _ = cv2.findContours(  # 윤곽선을 찾기
            img_thresh,
            mode=cv2.RETR_LIST,  #
            method=cv2.CHAIN_APPROX_TC89_KCOS  #
        )

        temp_result = np.zeros((height, width, channel), dtype=np.uint8)

        cv2.drawContours(
            temp_result,  # 원본이미지
            contours=contours,  # contours 정보
            contourIdx=-1,  # -1 : 전체
            color=(255, 255, 255),
            thickness=1)

        contours_dict = []
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)  # 컴투어를 감싸는 사각형
            # cv2.rectangle(
            #     temp_result,
            #     pt1=(x, y),
            #     pt2=(x+w, y+h),
            #     color=(255, 255, 255),
            #     thickness=1
            # )
            contours_dict.append({
                'contour': contour,
                'x': x,
                'y': y,
                'w': w,
                'h': h,
                'cx': x + (w / 2),  # 중심 좌표
                'cy': y + (h / 2)
            })
        MIN_AREA, MAX_AREA = 100, 1000  # boundingRect(사각형)의 최소넓이
        MIN_WIDTH, MIN_HEIGHT = 2, 8  # boundingRect의 최소 넓이, 높이 2, 8
        MIN_RATIO, MAX_RATIO = 0.3, 0.9  # boundingRect의 가로 세로 비율

        possible_contours = []  # 위의 조건의 만족하는 사각형
        cnt = 0
        for d in contours_dict:
            area = d['w'] * d['h']  # 가로 * 세로 = 면적
            ratio = d['w'] / d['h']  # 가로 / 세로 = 비율

            if MIN_AREA < area < MAX_AREA and d['w'] > MIN_WIDTH and d[
                    'h'] > MIN_HEIGHT and MIN_RATIO < ratio < MAX_RATIO:
                d['idx'] = cnt  # 조건의 맞는 값을 idx에 저장한다.
                cnt += 1
                possible_contours.append(d)  # possible_contour를 업데이트한다.

        # temp_result = np.zeros((height, width, channel), dtype=np.uint8)

        for d in possible_contours:
            # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
            cv2.rectangle(temp_result,
                          pt1=(d['x'], d['y']),
                          pt2=(d['x'] + d['w'], d['y'] + d['h']),
                          color=(0, 0, 255),
                          thickness=1)

        MAX_DIAG_MULTIPLYER = 4  # 대각선의 5배 안에 있어야함
        MAX_ANGLE_DIFF = 12.0  # 12.0            #세타의 최대값         12, 0.7, 0.5, 0.6, 3
        MAX_AREA_DIFF = 0.3  # 0.5               #면적의 차이
        MAX_WIDTH_DIFF = 0.5  # 너비차이
        MAX_HEIGHT_DIFF = 0.6  # 높이차이
        MIN_N_MATCHED = 4  # 3                   #위의 조건이 3개 미만이면 뺀다

        def find_chars(contour_list):  #
            matched_result_idx = []  # idx 값 저장
            for d1 in contour_list:
                matched_contours_idx = []
                for d2 in contour_list:
                    if d1['idx'] == d2['idx']:  # d1 과 d2가 같으면 컨티뉴
                        continue

                    dx = abs(d1['cx'] - d2['cx'])
                    dy = abs(d1['cy'] - d2['cy'])

                    diagonal_length1 = np.sqrt(d1['w']**2 + d1['h']**2)

                    distance = np.linalg.norm(
                        np.array([d1['cx'], d1['cy']]) -
                        np.array([d2['cx'], d2['cy']]))  # 대각선 길이
                    if dx == 0:
                        angle_diff = 90  # 0일 때 각도 90도 (예외처리)
                    else:
                        angle_diff = np.degrees(np.arctan(dy / dx))  # 세타 구하기
                    area_diff = abs(d1['w'] * d1['h'] - d2['w'] * d2['h']) / (
                        d1['w'] * d1['h'])  # 면적 비율
                    width_diff = abs(d1['w'] - d2['w']) / d1['w']  # 너비비율
                    height_diff = abs(d1['h'] - d2['h']) / d1['h']  # 높이비율
                    # 조건들
                    if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER \
                            and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
                            and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
                        # d2만 넣었기 때문에 마지막으로 d1을 넣은다
                        matched_contours_idx.append(d2['idx'])

                # append this contour
                matched_contours_idx.append(d1['idx'])

                if len(matched_contours_idx) < MIN_N_MATCHED:  # 3개 이하이면 번호판 x
                    continue

                matched_result_idx.append(matched_contours_idx)  # 최종 후보군

                unmatched_contour_idx = []  # 최종후보군이 아닌 애들
                for d4 in contour_list:
                    if d4['idx'] not in matched_contours_idx:
                        unmatched_contour_idx.append(d4['idx'])

                unmatched_contour = np.take(possible_contours,
                                            unmatched_contour_idx)

                # recursive
                recursive_contour_list = find_chars(unmatched_contour)

                for idx in recursive_contour_list:
                    matched_result_idx.append(idx)  # 번호판 이외의 값을 재정의

                break
            return matched_result_idx

        result_idx = find_chars(possible_contours)
        matched_result = []
        for idx_list in result_idx:
            matched_result.append(np.take(possible_contours, idx_list))
        # visualize possible contours
        temp_result = np.zeros((height, width, channel), dtype=np.uint8)
        for r in matched_result:
            for d in r:
                cv2.rectangle(temp_result,
                              pt1=(d['x'], d['y']),
                              pt2=(d['x'] + d['w'], d['y'] + d['h']),
                              color=(0, 0, 255),
                              thickness=1)

        PLATE_WIDTH_PADDING = 1.2  # 1.3
        PLATE_HEIGHT_PADDING = 1.5  # 1.5
        MIN_PLATE_RATIO = 3
        MAX_PLATE_RATIO = 7  #10

        plate_imgs = []
        plate_infos = []

        for i, matched_chars in enumerate(matched_result):
            sorted_chars = sorted(matched_chars,
                                  key=lambda x: x['cx'])  # x방향으로 순차적으로 정렬

            plate_cx = (sorted_chars[0]['cx'] +
                        sorted_chars[-1]['cx']) / 2  # 센터 좌표 구하기
            plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2

            plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] -
                           sorted_chars[0]['x']) * PLATE_WIDTH_PADDING  # 너비

            # sum_height = 0
            # for d in sorted_chars:
            #     sum_height += d['h']

            # plate_height = int(sum_height / len(sorted_chars)
            #                    * PLATE_HEIGHT_PADDING)  # 높이
            plate_height = (sorted_chars[-1]['y'] - sorted_chars[0]['y'] +
                            sorted_chars[-1]['h']) * PLATE_HEIGHT_PADDING
            #
            triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy']
            triangle_hypotenus = np.linalg.norm(
                np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -
                np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']]))

            angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))

            rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx,
                                                              plate_cy),
                                                      angle=angle,
                                                      scale=1.0)

            # 회전
            img_rotated = cv2.warpAffine(img_thresh,
                                         M=rotation_matrix,
                                         dsize=(width, height))

            img_cropped = cv2.getRectSubPix(img_rotated,
                                            patchSize=(int(plate_width),
                                                       int(plate_height)),
                                            center=(int(plate_cx),
                                                    int(plate_cy)))

            if img_cropped.shape[1] / img_cropped.shape[
                    0] < MIN_PLATE_RATIO or img_cropped.shape[
                        1] / img_cropped.shape[
                            0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:
                continue

            plate_imgs.append(img_cropped)
            plate_infos.append({
                'x': int(plate_cx - plate_width / 2),
                'y': int(plate_cy - plate_height / 2),
                'w': int(plate_width),
                'h': int(plate_height)
            })
            for i, plate_img in enumerate(plate_imgs):

                plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
                _, plate_img = cv2.threshold(plate_img,
                                             thresh=0.0,
                                             maxval=255.0,
                                             type=cv2.THRESH_BINARY
                                             | cv2.THRESH_OTSU)
                # plt.imshow(plate_img, cmap='gray')
                # plt.show()
                # find contours again (same as above)
                contours, _ = cv2.findContours(plate_img,
                                               mode=cv2.RETR_LIST,
                                               method=cv2.CHAIN_APPROX_SIMPLE)

                plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[
                    0]
                plate_max_x, plate_max_y = 0, 0

                for contour in contours:
                    x, y, w, h = cv2.boundingRect(contour)

                    area = w * h
                    ratio = w / h

                    if area > MIN_AREA \
                            and w > MIN_WIDTH and h > MIN_HEIGHT \
                            and MIN_RATIO < ratio < MAX_RATIO:
                        if x < plate_min_x:
                            plate_min_x = x
                        if y < plate_min_y:
                            plate_min_y = y
                        if x + w > plate_max_x:
                            plate_max_x = x + w
                        if y + h > plate_max_y:
                            plate_max_y = y + h

            img_result = plate_img[plate_min_y:plate_max_y,
                                   plate_min_x:plate_max_x]
            img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0)
            _, img_result = cv2.threshold(img_result,
                                          thresh=0.0,
                                          maxval=255.0,
                                          type=cv2.THRESH_BINARY
                                          | cv2.THRESH_OTSU)
            img_result = cv2.copyMakeBorder(img_result,
                                            top=10,
                                            bottom=10,
                                            left=20,
                                            right=10,
                                            borderType=cv2.BORDER_CONSTANT,
                                            value=(0, 0, 0))
            chars = pytesseract.image_to_string(img_result,
                                                lang='kor',
                                                config='--psm 7 --oem 0')

            result_chars = ''

            has_digit = False
            # <= ord('힣')
            for c in chars:
                if \
                    + ord('가') == ord(c) or \
                    + ord('나') == ord(c) or \
                    + ord('다') == ord(c) or \
                    + ord('라') == ord(c) or \
                    + ord('마') == ord(c) or \
                    + ord('거') == ord(c) or \
                    + ord('너') == ord(c) or \
                    + ord('더') == ord(c) or \
                    + ord('러') == ord(c) or \
                    + ord('머') == ord(c) or \
                    + ord('버') == ord(c) or \
                    + ord('서') == ord(c) or \
                    + ord('어') == ord(c) or \
                    + ord('저') == ord(c) or \
                    + ord('고') == ord(c) or \
                    + ord('노') == ord(c) or \
                    + ord('도') == ord(c) or \
                    + ord('로') == ord(c) or \
                    + ord('모') == ord(c) or \
                    + ord('보') == ord(c) or \
                    + ord('소') == ord(c) or \
                    + ord('오') == ord(c) or \
                    + ord('조') == ord(c) or \
                    + ord('구') == ord(c) or \
                    + ord('누') == ord(c) or \
                    + ord('두') == ord(c) or \
                    + ord('루') == ord(c) or \
                    + ord('무') == ord(c) or \
                    + ord('부') == ord(c) or \
                    + ord('수') == ord(c) or \
                    + ord('우') == ord(c) or \
                    + ord('주') == ord(c) or \
                    + ord('아') == ord(c) or \
                    + ord('바') == ord(c) or \
                    + ord('사') == ord(c) or \
                    + ord('자') == ord(c) or \
                    + ord('배') == ord(c) or \
                    + ord('하') == ord(c) or \
                    + ord('허') == ord(c) or \
                        + ord('호') == ord(c) or c.isdigit():
                    if c.isdigit():
                        has_digit = True
                    result_chars += c
            # print(result_chars)
            # print(len(result_chars))

            if result_chars == "":
                pass
            else:
                if before_chars == result_chars and 6 < len(result_chars) < 9:
                    count += 1
                else:
                    before_chars = result_chars
                    count = 0

            if count == 1:
                print(result_chars)
                count = 0
                before_chars = ""

        # print(fps + " : 프레임 영상입니다.")
        print(fps)
        a = np.hstack((img_ori, temp_result))
        cv2.imshow("Go", a)
        if cv2.waitKey(42) == ord('q'):
            break
        e2 = cv2.getTickCount()
        time = (e2 - el) / cv2.getTickFrequency()
        print(time)
        # print("1프레임 : 1초당 처리속도는 " + time + "입니다.")
    cap.release()
    cv2.destroyAllWindows()
Beispiel #5
0
    if pv > 255:
        return 255
    elif pv < 0:
        return 0
    return pv


def gaussian_noise(image):  #为图像加上高斯噪声
    h, w = image.shape
    for row in range(h):
        for col in range(w):
            s = np.random.normal(0, 20, 3)
            b = image[row, col, 0]
            g = image[row, col, 1]
            r = image[row, col, 2]
            image[row, col, 0] = clamp(b + s[0])
            image[row, col, 1] = clamp(g + s[1])
            image[row, col, 2] = clamp(r + s[2])
    cv.imshow("image", image)
    return image


src = cv.imread("C:/Users/32936/Desktop/2/1.jpg")
t1 = cv.getTickCount()
rst = cv.GaussianBlur(gaussian_noise(src), (0, 0), 15)
t2 = cv.getTickCount()
t = (t2 - t1) / cv.getTickFrequency()
print("spend time:%s" % (t))
cv.imshow("result", rst)
cv.waitKey(0)
cv.destroyAllWindows()
Beispiel #6
0
def main():
    global drone
    prediction_class = ObjectDetectionPredict(model_name=MODEL_NAME)

    try:
        drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        # skip first 300 frames
        frame_skip = 1000
        bbox = (287, 23, 86, 320)
        c = 0

        # drone.takeoff()
        # sleep(5)

        th = threading.Thread(target=droneControl)
        th.start()

        landflag = False
        while True:
            print("******************\nNEW WHILE\n******************** ")
            for frame in container.decode(video=0):

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

                if c < 2:
                    c += 1
                    continue
                else:
                    c = 0

                pil_im = Image.fromarray(np.array(frame.to_image()))
                print("pil_im :", pil_im)

                timer = cv2.getTickCount()

                scores, classes, img, boxes = prediction_class.predict_single_image(
                    pil_im)
                opencvImage = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
                print('FPS : ' + str(float(fps)))
                cv2.putText(opencvImage, "FPS : " + str(int(fps)), (50, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
                cv2.imshow('opencvImage', opencvImage)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    landflag = True
                    cv2.destroyAllWindows()
                    break
            if landflag:
                print('down')
                drone.down(50)
                sleep(3)
                drone.land()
                sleep(1)
                break

        # prediction_class.sess.close()

        print('down again')
        drone.land()
        sleep(1)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
interpreter.allocate_tensors()

# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]

floating_model = (input_details[0]['dtype'] == np.float32)

input_mean = 127.5
input_std = 127.5

# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()

# Initialize video stream
videostream = VideoStream(resolution=(imW, imH)).start()
time.sleep(1)


def get_color(name):
    if name == 'car':
        return 10, 255, 0
    elif name == 'person':
        return 0, 10, 255
    elif name == 'cycle':
        return 255, 10, 0
    return 0, 127, 255
from cv2 import cv2
import numpy as np
'''
#模板
e1 = cv2.getTickCount()
# 你的代码
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
print(time)
'''
img1 = cv2.imread('img/bird.jpg')
e1 = cv2.getTickCount()

for i in range(5, 49, 2):
    img1 = cv2.medianBlur(img1, i)

e2 = cv2.getTickCount()
t = (e2 - e1) / cv2.getTickFrequency()
print(t)
Beispiel #9
0
 def __init__(self, buffer_len=1):
     self._start_tick = cv2.getTickCount()
     self._freq = 1000.0 / cv2.getTickFrequency()
     # deque = collections.deque
     self._difftimes = deque(maxlen=buffer_len)
Beispiel #10
0
    img = np.zeros([400,400,1],np.uint8)  #创建一个单通道图像,初值为0,可将zeros改为ones,则初值为1
    img[:,:,0] = np.ones([400,400])*127
    #img = img*127
    cv.imshow("new image",img)

    '''
    n1 = np.ones([3,3],np.uint8)
    n1.fill(12222.388)
    print(n1)
    
    n2 = n1.reshape([1,9])
    print(n2)
    
    n3 = np.array([[2,3,4],[4,5,6],[7,8,9]],np.int32)
    n3.fill(9)
    print(n3)
    '''

src = cv.imread("F:/photo/tx.jpg")
cv.namedWindow("input image", cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src)
t1 = cv.getTickCount()
#access_pixels(src)
inverse(src)
t2 = cv.getTickCount()
time = (t2-t1)/cv.getTickFrequency();   #计算时间
print("time:%s ms"%(time*1000))
create_image()
cv.waitKey(0)
cv.destroyAllWindows()
print("Hi,python")
Beispiel #11
0
    ok = tracker.init(frame, bbox)

    while True:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
Beispiel #12
0
from cv2 import cv2 as cv
import numpy as np
import time
img = cv.imread("star.jpg")
e1 = cv.getTickCount()  # 也可用python的time模块计数

# 估量代码执行效率
for i in range(5, 49, 2):
    img = cv.medianBlur(img, i)
e2 = cv.getTickCount()
time = (e2 - e1) / cv.getTickFrequency()
print(time)

# 查看是否开启优化
ret = cv.useOptimized()
print(ret)

# 魔法命令%time(使用ipython)