def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        while True:
            frame = container.decode(video=0).next()

            image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
            cv2.imshow('Original', image)
            cv2.imshow('Canny', cv2.Canny(image, 100, 200))
            cv2.waitKey(1)

    except Exception as ex:
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Beispiel #2
0
    img = cv2.resize(frame, (800, 800))
    return img


def normalize(arr):
    range_ = arr.max() - arr.min()
    arr = arr / range_
    return arr


def pattern(img, radius=3, points=8):
    # Params
    n_points = points * radius
    img = normalize(img)
    img1 = lbp(img, radius, n_points)
    img1 = normalize(img1)
    original_image = resize(img)
    lbp_image = resize(img1)
    stack = np.hstack([original_image, lbp_image])
    return stack


if __name__ == "__main__":
    # read original image in gray mode
    lena = cv2.imread("./lena.jpg")
    # color BGR --> Gray
    gray = cv2.cvtColor(lena, cv2.COLOR_BGR2GRAY)
    stack = pattern(gray)
    cv2.imshow("Local Binary Pattern", stack)
    cv2.waitKey(0)
Beispiel #3
0
def main():
    # initial system config
    # pre-generate colors
    for i in range(0, 101):
        l_current = min(100, int((90 - 29) * i / 100) + 29)
        hsv = np.uint8([[[60, 148, (l_current / 100) * 255]]])
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        c = (int(bgr[0][0][0]), int(bgr[0][0][1]), int(bgr[0][0][2]))
        SELECT_COLOR[i] = c

    current_menu = MAIN_MENU
    count_dict = {
        EYE_POSITION_LEFT: 0,
        EYE_POSITION_CENTER: 0,
        EYE_POSITION_RIGHT: 0,
        EYE_POSITION_NOT_FOUND: 0,
    }
    active = ALWAYS_ON
    previous_eye_position = None
    end_text = None
    messenger = Line(
        'p4GTOS53TysxYxaJRe38v7qcV2WqZNMExbA5HGFpe12CoMmV2ugPPNTld/eNoPTiZwAiNrMdSqWaeWRCzj5z17Qzr6qtZVMJnup01T1U6aAn3SA4J+/jSVIolFpeO1TgODzNz4cZZNXXtHQTVuUpEwdB04t89/1O/w1cDnyilFU=',
        'C40345e548ee52a546052a2a56183cd44', DRY_RUN)

    # img = np.zeros((WINDOW_HEIGHT, WINDOW_WIDTH, 4), dtype=np.uint8)
    # img[:, :, :3] = BACKGROUND_COLOR
    # cv2.imshow(WINDOW_TITLE, img)

    webcam = cv2.VideoCapture(0)

    try:
        while True:

            img = np.zeros((WINDOW_HEIGHT, WINDOW_WIDTH, 4), dtype=np.uint8)
            img[:, :, :3] = BACKGROUND_COLOR

            _, frame = webcam.read()
            eye_position = run_model(frame)

            # when the screen is sleeping
            if not active:
                # only detect center while sleeping
                if eye_position == EYE_POSITION_CENTER:
                    count_dict[eye_position] += 1
                    print('DETECTED', count_dict[eye_position])

                    if eye_position in count_dict and \
                            count_dict[eye_position] > WAKE_CYCLE_THRESHOLD:
                        active = True
                        continue
                else:
                    print('IGNORE', eye_position)

            # when the screen is active
            if has_button_on_position(current_menu, eye_position) and active:

                if eye_position in count_dict:
                    count_dict[eye_position] += 1

                if count_dict[eye_position] > RESET_CYCLE_THRESHOLD:
                    for k in [
                            k for k in count_dict.keys() if k != eye_position
                    ]:
                        count_dict[k] = 0

                if eye_position == EYE_POSITION_LEFT:
                    focus_left(
                        img, count_dict[EYE_POSITION_LEFT] /
                        SELECTED_CYCLE_THRESHOLD)
                    print('LEFT')
                    if SOUND and \
                            previous_eye_position != eye_position:
                        sa.WaveObject.from_wave_file("effect/left.wav").play()

                elif eye_position == EYE_POSITION_CENTER:
                    focus_center(
                        img, count_dict[EYE_POSITION_CENTER] /
                        SELECTED_CYCLE_THRESHOLD)
                    print('CENTER')
                    if SOUND and \
                            previous_eye_position != eye_position:
                        sa.WaveObject.from_wave_file(
                            "effect/center.wav").play()

                elif eye_position == EYE_POSITION_RIGHT:
                    focus_right(
                        img, count_dict[EYE_POSITION_RIGHT] /
                        SELECTED_CYCLE_THRESHOLD)
                    print('RIGHT')
                    if SOUND and \
                            previous_eye_position != eye_position:
                        sa.WaveObject.from_wave_file("effect/right.wav").play()

                # elif eye_position == 0:
                #     Blink
                # countDict['cBlinking'] += 1
                # print('Blink')

                # TODO: Sleep monitor signal
                elif eye_position == EYE_POSITION_NOT_FOUND:
                    print('NOT_FOUND')
                else:
                    print(eye_position)

            # draw title
            title = get_title(current_menu)
            put_title(img, title)
            # put_end_text(img, get_end_text(current_menu))
            put_end_text(img, end_text)

            # draw left icon and text
            left_icon = cv2.imread(get_left_icon(current_menu),
                                   cv2.IMREAD_UNCHANGED)
            add_icon_left(img, left_icon)
            put_text_left(img, get_left_text(current_menu))

            # draw center icon and text
            center_icon = cv2.imread(get_center_icon(current_menu),
                                     cv2.IMREAD_UNCHANGED)
            add_icon_center(img, center_icon)
            put_text_center(img, get_center_text(current_menu))

            # draw right icon and text
            right_icon = cv2.imread(get_right_icon(current_menu),
                                    cv2.IMREAD_UNCHANGED)
            add_icon_right(img, right_icon)
            put_text_right(img, get_right_text(current_menu))

            if active and \
                    eye_position == EYE_POSITION_NOT_FOUND and \
                    count_dict[eye_position] >= SLEEP_CYCLE_THRESHOLD and \
                    not ALWAYS_ON:
                # sleep, turn off monitor
                if not ALWAYS_ON:
                    active = False

            if active and \
                    has_button_on_position(current_menu, eye_position):
                if count_dict[eye_position] >= SELECTED_CYCLE_THRESHOLD:
                    next_menu = get_next_menu(current_menu, eye_position)
                    # get end text
                    end_text = get_end_text(current_menu, eye_position)
                    if next_menu is not None:
                        # do action
                        action = get_action(current_menu, eye_position)
                        if action == 'emergency':
                            messenger.send(f'{MSG_PREFIX}EMERGENCY')
                        # elif action == 'toilet':
                        #     print('LINE message sent: TOILET')

                        current_menu = next_menu
                    else:
                        # do action
                        action = get_action(current_menu, eye_position)
                        if action == 'back':
                            current_menu = MAIN_MENU
                        elif action == 'hurry':
                            messenger.send(f'{MSG_PREFIX}HURRY')
                            put_end_text(img, "Another message sent")
                            current_menu = MAIN_MENU
                        elif action == 'no':
                            messenger.send(f'{MSG_PREFIX}NO')
                            current_menu = MAIN_MENU
                        elif action == 'yes':
                            messenger.send(f'{MSG_PREFIX}YES')
                            current_menu = MAIN_MENU
                        else:
                            break

                    # reset dictionary values
                    count_dict = count_dict.fromkeys(count_dict, 0)
                else:
                    put_countdown_text(img, eye_position,
                                       count_dict[eye_position])
            if active:
                cv2.imshow(WINDOW_TITLE, img)
                previous_eye_position = eye_position
            else:
                previous_eye_position = None

            print(count_dict)

            time.sleep(CYCLE_TIME)

            if cv2.waitKey(1) == 27:
                break
    finally:
        webcam.release()
Beispiel #4
0
ix, iy = -1, -1


# mouse callback function
def draw_shape(event, x, y, flags, param):
    global ix, iy, drawing

    if event == cv.EVENT_LBUTTONDOWN:
        drawing = True
        ix, iy = x, y

    elif event == cv.EVENT_MOUSEMOVE:
        if drawing == True:
            cv.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 0)

    elif event == cv.EVENT_LBUTTONUP:
        drawing = False
        cv.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)


img = np.zeros((512, 512, 3), np.uint8)
cv.namedWindow('image')
cv.setMouseCallback('image', draw_shape)

while (1):
    cv.imshow('image', img)
    if cv.waitKey(1) == ord('q'):
        break

cv.destroyAllWindows()
Beispiel #5
0
from cv2 import cv2
import numpy as np
import configuracion
import caras
import pandas as pd
import openpyxl 
ncara=configuracion.nombre_caras()
#Mando a llamar del archivo configuracion los matices a detectar
R2_rojo,R_azul,R_naranja,R_verde, R_amarillo, R_blanco=configuracion.matices()
Azul, Verde, Rojo, Naranja,Amarillo, Blanco=configuracion.colores_basic()
""""
cap = cv2.VideoCapture(0)
#p1, p2, p3, p4, pc, p6, p7, p8, p9   

for n in range(1,6):
    image=cv2.imread('Fotos_caras\Foto_cara_'+ncara[n]+'.jpg') #leemos la imagen
    for k in range(1,9):   
        frameHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        maskRed1 = cv2.inRange(frameHSV, R2_rojo[0], R2_rojo[1])
        maskRed2 = cv2.inRange(frameHSV, R2_rojo[2], R2_rojo[3])
        maskRed = cv2.add(maskRed1, maskRed2) #Como se puede ver en la imagen de HSV, el color rojo se parte
        maskBlue = cv2.inRange(frameHSV, R_azul[0], R_azul[1])
        maskOrange = cv2.inRange(frameHSV, R_naranja[0], R_naranja[1])
        maskGreen = cv2.inRange(frameHSV, R_verde[0], R_verde[1])
        maskYellow = cv2.inRange(frameHSV, R_amarillo[0], R_amarillo[1])

cv2.waitKey(0) 
cap.release()
cv2.destroyAllWindows()
"""
Beispiel #6
0
    
    status_list.append(status)
    
    status_list=status_list[-2:]

    if status_list[-1] == 1 and status_list[-2] == 0:
        times.append(datetime.now())
    if status_list[-2] == 1 and status_list[-1] == 0:
        times.append(datetime.now())
    
    cv2.imshow("Gray Frame", gray)
    cv2.imshow("Delta Frame", delta_frame)
    cv2.imshow("Threshold Frame", thresh_frame)
    cv2.imshow("Colour Frame", frame)

    key = cv2.waitKey(1)

    if key==ord('q'):
        if(status==1):
            times.append(datetime.now())
        break

print(status_list)
print(times)

for i in range(0, len(times), 2):
    df=df.append({"Start":times[i], "End":times[i+1]}, ignore_index=True)

df.to_csv("Times.csv")

video.release()
Beispiel #7
0
def cv_show(name, img):
    cv.imshow(name, img)
    k = cv.waitKey(0)
    if k == 27:
        cv.destroyAllWindows()
Beispiel #8
0
    for (top, right, bottom,
         left), face_encoding in zip(face_locations, face_encodings):

        matches = fr.compare_faces(known_face_encodings,
                                   face_encoding,
                                   tolerance=1.0)
        name = "Unknown"

        face_distances = fr.face_distance(known_face_encodings, face_encoding)

        best_match_index = np.argmin(face_distances)

        if matches[best_match_index]:
            name = known_face_names[best_match_index]

        cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)

        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0),
                      cv2.FILLED)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (0, 0, 0),
                    1)

    cv2.imshow('WebCam_faceRecognition', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

video_capture.release()
cv2.destroyAllWindows()
def mode_t():       
    #drone = tellopy.Tello()

    try:
        #drone.connect()
        # 15초간 드론 연결을 대기.. 초과되면 프로그램 종료
        #drone.wait_for_connection(15.0)
        print("드론 연결 성공.")
        #drone.takeoff()
        #time.sleep(4)
        print("드론 이륙 성공.")
        
        retry = 3
        container = None
        #while container is None and 0 < retry:
        #    retry -= 1
        #    try:
        #        container = av.open(drone.get_video_stream())
        #    except av.AVError as ave:
        #        print(ave)
        #        print('av.open err retry...')

        # 첫 420 frame은 연결 문제가 있어서 생략
        frame_skip = 420
        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                # 촬영 시작 시간
                start_time = time.time()
                # 드론이 촬영한 영상을 opencv형식으로 변환
                img = cv.cvtColor(numpy.array(frame.to_image()), cv.COLOR_RGB2BGR)
                gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
                #cv.imshow('drone cam', img)
                faces = face_cascade.detectMultiScale(gray, 1.3, 5)
                        
                for (x, y, w, h) in faces:
                    img = cv.rectangle(img, (x, y),(x + w, y + h), (255, 0, 0), 2)
                    roi_gray = gray[y : y + h, x : x + w]
                    roi_color = img[y : y + h, x : x + w]
                    eyes = eye_cascade.detectMultiScale(roi_gray)
                    for (ex, ey, ew, eh) in eyes:
                        cv.rectangle(roi_color, (ex, ey),(ex + ew, ey + eh), (0, 255, 0), 2)

                cv.imshow('face_detection', img)        
                k = cv.waitKey(1)
                if k == ord('s'):
                    break
                if frame.time_base < 1.0/60:
                    time_base = 1.0/60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time)/time_base)
                    
    # 모든 에러에 대응
    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
        print('프로그램을 종료합니다.')
        drone.quit()
        cv.destroyAllWindows()
Beispiel #10
0

#setting up the actual mouse click
cv2.setMouseCallback("Color Detection", draw_function)

while (1):

    cv2.imshow("Color Detection", img)
    if (clicked):

        #cv2.rectangle(image, startpoint, endpoint, color, thickness)-1 fills entire rectangle
        cv2.rectangle(img, (20, 20), (750, 60), (b, g, r), -1)

        #Creating text string to display( Color name and RGB values )
        text = ("R = " + str(r) + ", G = " + str(g) + ", B = " + str(b))

        #cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType )
        cv2.putText(img, text, (50, 50), 2, 0.8, (255, 255, 255), 2,
                    cv2.LINE_AA)

        #For very light colours we will display text in black colour
        if (r + g + b >= 600):
            cv2.putText(img, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA)

        clicked = False

    #leaves the program if esc is pressed
    if cv2.waitKey(20) & 0xFF == 27:
        break

cv2.destroyAllWindows()
Beispiel #11
0
from cv2 import cv2

video = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier(
    "dataset/haarcascade_frontalface_default.xml")
smileCascade = cv2.CascadeClassifier("dataset/haarcascade_smile.xml")

while True:
    success, img = video.read()
    grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(grayImg, 1.1, 4)
    cnt = 500
    keyPressed = cv2.waitKey(1)
    for x, y, w, h in faces:
        img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 3)
        smiles = smileCascade.detectMultiScale(grayImg, 1.8, 15)
        for x, y, w, h in smiles:
            img = cv2.rectangle(img, (x, y), (x + w, y + h), (100, 100, 100),
                                5)
            print("Image " + str(cnt) + "Saved")
            path = r'C:\Users\pushp\Desktop\Selphie_Capture_Python\smile-selfie-capture-project\images' + str(
                cnt) + '.jpg'
            cv2.imwrite(path, img)
            cnt += 1
            if (cnt >= 503):
                break

    cv2.imshow('live video', img)
    if (keyPressed & 0xFF == ord('q')):
        break
Beispiel #12
0
def calibrate():
    #棋盘角点数col*row
    col = 13
    row = 6

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    objp = np.zeros((row * col, 3), np.float32)
    # 用于标定的棋盘每个方格边长为22mm
    objp[:, :2] = 22*np.mgrid[0:col, 0:row].T.reshape(-1, 2)

    objpoints = []  # 世界坐标系下的点坐标
    imgpoints = []  # 像素平面坐标系下的点坐标
    print("请选择标定用到的照片所在的文件夹", "\n")

    root = tkinter.Tk()
    root.withdraw()

    global path  # 用于标定的照片所在目录
    path = tkinter.filedialog.askdirectory(
        title="选择标定用到的照片所在的文件夹")  # 选择标定用到的照片所在的文件夹
    images = glob.glob(path+"/*.jpg")
    found = 0  # 记录用于标定的图像数目
    for k, fname in enumerate(images):
        img = cv2.imread(fname)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        ret, corners = cv2.findChessboardCorners(gray, (col, row), None)
        # 角点检测

        if ret is True:
            print("读取", fname)
            objpoints.append(objp)

            
            # 角点检测精度会影响标定的精度
            corners2 = cv2.cornerSubPix(
                gray, corners, (11, 11), (-1, -1), criteria)#亚像素角点位置
            # corners2=corners
            #_,corners2=cv2.find4QuadCornerSubpix(gray, corners, (11, 11))

            imgpoints.append(corners2)
            img = cv2.drawChessboardCorners(img, (col, row), corners2, ret)#标记角点
            found += 1
            if len(images) < 16:  # 图片过多时,不在UI中展示,避免弹窗过多
                cv2.namedWindow('press any key to continue', cv2.WINDOW_NORMAL)
                cv2.imshow('press any key to continue', img)
                cv2.waitKey(0)

            #image_name = path2 + "//corner"+str(found) + '.png'
            #cv2.imwrite(image_name, img)
            #存储已标出角点的照片
            
    global path2  # 存放结果的目录(含记录相机参数的文件,和畸变矫正后的照片,3-D box照片)
    path2 = tkinter.filedialog.askdirectory(
        title="选择结果存放的文件夹(应与用于标定的照片所在的文件夹不同)")  # 选择结果存放的文件夹

    print("Number of images used for calibration: ", found)

    # 相机标定
    ret2, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                        gray.shape[::-1], None, None)

    print("reprojection error:", ret2)
    print("内参矩阵:", mtx)
    print("畸变系数:", dist)
    print("旋转向量:", rvecs)
    print("平移向量:", tvecs)

    images = glob.glob(path+"//*.jpg")
    for i, fname in enumerate(images):
        img = cv2.imread(fname)
        if img is None:
            continue
        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                            (w, h))
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)  # 矫正畸变

        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]#裁剪
        outpath = path2+"//tianyi_gao_undistorted" + str(i + 1) + ".jpg"
        cv2.imwrite(outpath, dst)
    print("新内参矩阵:", newcameramtx)
    
    data = {
        'camera_matrix': np.asarray(mtx).tolist(),
        'dist_coeff': np.asarray(dist).tolist(),
        'new_camera_matrix': np.asarray(newcameramtx).tolist(),
        'rvecs': np.asarray(rvecs).tolist(),
        'tvecs': np.asarray(tvecs).tolist(),
        'reprojection_error': np.asarray(ret2).tolist()
    }
    # 存储相机参数(yaml)
    with open(path2+"//calibration_parameters.yaml", "w") as f:
        yaml.dump(data, f)
    # 存储相机参数(txt)
    with open(path2+"//tianyi_gao_cam.txt", "w") as f2:
        name = list(data.keys())
        value = list(data.values())
        for i in range(len(name)):
            f2.write(name[i] + ":" + "\n" + str(value[i]) + "\n")

    print('Calibrate Done')
    cv2.destroyAllWindows()
    return mtx, dist, rvecs, tvecs, ret2, path2
Beispiel #13
0
import numpy as np
import cv2.cv2 as cv2  #fixing pylint
from matplotlib import pyplot as plt
#import file with precoded values to look neater
import GenUtils as gen

# Make blank file
img = np.zeros((50, 50, 3), np.uint8)
img = cv2.imread('FieldImages/imgGrass.png')

# Naming Scheme & directory location declaration
directory = './TestShapes'
title = 'testing.png'
filename = directory + '/' + title
gen.drawRectangle(img, gen.Palet[9])
gen.drawLetter(img, 'Q', gen.Palet[5], 3, (12, 37), 1.2)

# Display Window
cv2.imshow(title, img)

# Testing purposes
k = cv2.waitKey(0)
if k == 27:  # wait for ESC key to exit
    cv2.destroyAllWindows()
elif k == ord('s'):  # wait for 's' key to save and exit
    cv2.imwrite(filename, img)
    cv2.destroyAllWindows()
Beispiel #14
0
    contours, hierarchy = cv2.findContours(_img, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area > 500:
            cnt_length = cv2.arcLength(cnt, True)
            cnt_vertexes_approx = cv2.approxPolyDP(cnt, 0.02 * cnt_length,
                                                   True)
            rects.append(cv2.boundingRect(cnt_vertexes_approx))
    return rects


while True:
    success, frame = cap.read()

    frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    for color in myColors:
        mask = cv2.inRange(frame_hsv, myColors[color][0], myColors[color][1])
        rect = getContours(mask)

        for position in rect:
            x, y, w, h = position
            circles.append(((x + w // 2, y), myColors[color][2]))

    for circle in circles:
        cv2.circle(frame, circle[0], 5, circle[1], cv2.FILLED)

    cv2.imshow("Result", frame)
    cv2.waitKey(1)
    def loop_forever(self):
        firstFrame = None
        notDetectedCounter = self.border_notDetected
        frameCounter = 0
        motionDetected = False

        while True:
            if (self.capture.isOpened()):
                ret, frame = self.capture.read()
                frameCounter += 1
                # text = "Unoccupied"
                if ret == True:

                    # resize the frame, convert it to grayscale, and blur it
                    scaledFrame = imutils.resize(frame, width=500)
                    gray = cv2.cvtColor(scaledFrame, cv2.COLOR_BGR2GRAY)
                    gray = cv2.GaussianBlur(gray, (21, 21), 0)

                    if firstFrame is None:
                        firstFrame = gray
                        continue

                    # compute the absolute difference between the current frame and
                    # first frame
                    frameDelta = cv2.absdiff(firstFrame, gray)
                    thresh = cv2.threshold(frameDelta, 25, 255,
                                           cv2.THRESH_BINARY)[1]
                    # dilate the thresholded image to fill in holes, then find contours
                    # on thresholded image
                    thresh = cv2.dilate(thresh, None, iterations=2)
                    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)
                    cnts = imutils.grab_contours(cnts)
                    motionDetected = False
                    for c in cnts:
                        if cv2.contourArea(c) > self.border_contourArea:
                            motionDetected = True
                            notDetectedCounter = 0

                    if motionDetected or notDetectedCounter < self.border_notDetected:
                        #print("schreibe Datei")
                        # write frame to output file
                        if not self.writer.isOpened():
                            # setup file writer if is not opened
                            file_path = self.generateFilePath()
                            frame_width = int(self.capture.get(3))
                            frame_height = int(self.capture.get(4))

                            fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
                            self.writer = cv2.VideoWriter(
                                file_path, fourcc, self.fps,
                                (frame_width, frame_height))
                        self.writer.write(frame)
                        notDetectedCounter += 1
                    else:
                        if self.writer.isOpened():
                            self.writer.release()

                        if frameCounter >= self.border_resetFirstFrame:
                            firstFrame = gray
                            frameCounter = 0
                            print("reset")
                    """
                    # loop over the contours
                    for c in cnts:
                        # if the contour is too small, ignore it
                        if cv2.contourArea(c) < 500:#args["min_area"]:
                            continue
                        # compute the bounding box for the contour, draw it on the frame,
                        # and update the text
                        (x, y, w, h) = cv2.boundingRect(c)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                        text = "Occupied"

                   # draw the text and timestamp on the frame
                    cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                    cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                        (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
                    # show the frame and record if the user presses a key
                    #"""

                    # Bilder anzeigen

                    cv2.imshow("Security Feed", gray)
                    cv2.imshow("Thresh", thresh)
                    cv2.imshow("Frame Delta", frameDelta)

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

                    #"""
                    # print(counter_frames)
                    # recording completed
                    #if counter_frames >= self.videolength_frames:
                    #logging.info("Recording completed.")
                    #self.mqtt.sendProcessMessage(self.user_name, self.mqtt.info_list[self.module_name]["RecordedFile"],file=self.file_name)
                    #    break

                else:
                    if not self.capture.isOpened():
                        #self.mqtt.sendProcessMessage(self.user_name, self.mqtt.info_list[self.module_name]["RecordLostConnection"],file=self.file_name)
                        #logging.error("Lost connection to camera.")
                        pass
                    #self.mqtt.sendProcessMessage(self.user_name, self.mqtt.info_list[self.module_name]["RecordFileError"],file=self.file_name)
                    #logging.error("Can not read from VideoCapture.")
                    break
        cv2.destroyAllWindows()
Beispiel #16
0
 def _instrument_debug(self) -> None:
     from cv2 import cv2
     cv2.imshow('debug image', self.capture())
     cv2.waitKey(0)
import cv2.cv2 as cv2
import numpy as np

path = 'a.jpg'
img = cv2.imread(path, 0)
gaussimg = cv2.GaussianBlur(img, (3, 3), 0)  #高斯滤波,(3,3)为高斯半径
medianimg = cv2.medianBlur(gaussimg, 7)  #中值滤波
cannyimg = cv2.Canny(medianimg, 0, 148)  #canny边缘检测
cv2.imshow('image', cannyimg)
cv2.waitKey(1000)
circles = cv2.HoughCircles(cannyimg,
                           cv2.HOUGH_GRADIENT,
                           1,
                           20,
                           param1=50,
                           param2=10,
                           minRadius=0,
                           maxRadius=300)
circles = np.uint16(np.around(circles))
img2bgr = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
count = 1  #作为信号,只取第一个霍夫圆,试着用数组取了但是没成功
for i in circles[0]:  #得到的霍夫圆是按照与所给轮廓拟合度排序的,一般取第一个就是与轮廓最拟合的那个
    #如果有多个圆识别,可以试试用多次霍夫圆,试着用半径过滤到同一块的圆,再取第一个,不过这样不稳定
    cv2.circle(img2bgr, (i[0], i[1]), i[2], (0, 0, 255), 5)  #在图像上画出这个霍夫圆
    count += 1
    if count == 2:
        break
cv2.imshow('image', img2bgr)
cv2.waitKey(1000)
Beispiel #18
0
# 创建回调函数
def draw_circle(event, x, y, flags, param):
    global ix, iy, drawing, mode
# 按下左键返回起始坐标位置坐标
    if event == cv.EVENT_LBUTTONDOWN:
        drawing = True
        ix, iy = x, y

# 鼠标左键按下并移动绘制图形,event查看移动,flag查看是否按下
    elif event == cv.EVENT_MOUSEMOVE and flags == cv.EVENT_FLAG_LBUTTON:
        if drawing == True:
            if mode == True:
                cv.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 1)
            else:
                # 绘制圆圈
                cv.circle(img, (x, y), 3, (0, 0, 255), -1)

# 鼠标松开停止绘画
    elif event == cv.EVENT_LBUTTONUP:
        drawing = False

cv.namedWindow('image')
cv.setMouseCallback('image', draw_circle)
while True:
    cv.imshow('image', img)
    k = cv.waitKey(1) & 0xFF
    if k == ord('m'):
        mode = not mode
    elif k == 27:
        break
from cv2 import cv2
import numpy as np
import imutils
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'

#read the image into cv object
path = 'frontPlate.jpeg'  #our image will be from video most likely jpg
og = cv2.imread(
    path)  #https://www.geeksforgeeks.org/python-opencv-cv2-imread-method/\
cv2.imshow("Original", og)
cv2.waitKey()
#Efficiency Possibility: try cv2.imread(path, cv2.IMREAD_GRAYSCALE)

#Resize on standardized license plate zone (later)
#Greyscale the image
grey = cv2.cvtColor(
    og, cv2.COLOR_BGR2GRAY
)  #https://docs.opencv.org/3.4/d8/d01/group__imgproc__color__conversions.html
cv2.imshow("Gray-Scale", grey)
cv2.waitKey()

#blur the image
image = cv2.bilateralFilter(
    grey, 5, 60,
    60)  #https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html
cv2.imshow("Bilateral Filter", image)  # Numbers determined from docs above
cv2.waitKey()
#Efficiency Possibility: 5 is the recommended for real-time systems, but this could be reduced

#Perform edge detection
Beispiel #20
0
from cv2 import cv2, time
import numpy as np

# opencv video 입출력
capture = cv2.VideoCapture(0)  # 비디오 캡처
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)  # width
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)  # height

while True:
    ret, frame = capture.read()  # camera 읽기
    cv2.imshow("VideoFrame", frame)  # 영상 출력
    if cv2.waitKey(1) > 0: break  # 입력이 들어오면 종료

capture.release()
cv2.destroyAllWindows()
Beispiel #21
0
        # extract (crop out the required image) : region of interest
        offset = 10
        face_selection = frame[y-offset:y+h+offset, x-offset:x+w+offset]
        face_selection = cv2.resize(face_selection, (100, 100))

        # store every 10th face
        skip += 1
        if(skip % 10 == 0):
            face_data.append(face_selection)
            print(len(face_data))

    cv2.imshow("Frame", frame)
    #cv2.imshow("face selection",face_selection)

    key_pressed = cv2.waitKey(1) & 0xFF
    if(key_pressed == ord("s")):
        break
# convert face list in numpy array
face_data = numpy.asarray(face_data)
face_data = face_data.reshape((face_data.shape[0], -1))
print(face_data.shape)

# save into file system

numpy.save(dataset_path+file_name+".npy", face_data)
print("data succesfully saved")

cap.release()
cv2.destroyAllWindows()
Beispiel #22
0
import numpy as np
from cv2 import cv2
from matplotlib import pyplot as plt

img = cv2.imread("/home/vatsal/Downloads/numImage1.jpg")
cv2.imshow('image', img)
cv2.waitKey(6000)
cv2.destroyAllWindows()
Beispiel #23
0
    sess)  # set this TensorFlow session as the default session for Keras

#------------------------------
#tensorflow lite model
import numpy as np
import tensorflow as tf
from cv2 import cv2
import time

interpreter = tf.lite.Interpreter(model_path='l_detect.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

cap = cv2.VideoCapture("VID1_15fps.mp4")
while cv2.waitKey(1) < 0:
    times = time.time()
    hasFrame, frame = cap.read()
    if not hasFrame:
        print("Done processing !!!")
        break

    frame = cv2.resize(frame, (300, 300))
    frameo = frame.copy()
    frame = np.expand_dims(frame, axis=0)
    frame = (2.0 / 255.0) * frame - 1.0
    frame = frame.astype('float32')

    interpreter.set_tensor(input_details[0]['index'], frame)
    interpreter.invoke()
Beispiel #24
0
def detect_text(path): # text detection function definition with "path" argument
    """Detects text in the file."""
    with io.open(path, 'rb') as image_file: # open and read image_file in binary
        content = image_file.read() # read and stores image_file content in content var

    image = types.Image(content=content) # image to be processed request
    response = client.text_detection(image=image) # if image is present, function returns detections
    texts = response.text_annotations # detections from image
    string = '' # initializes string variable

    for text in texts: # for all individual content within image detection
        string+=' ' + text.description # store new individual content with space 
    return string # return newest string

cap = cv2.VideoCapture(0) # stores captured webcam video, frame-by-frame

while(True):
    # for each captured frame
    ret, frame = cap.read() # ret = returns true if frame is availible, frame = image array vector
    file = 'live.png' # initialzes file to store a .png image 
    cv2.imwrite( file,frame) # read captured frame stored in file variable

    print(detect_text(file)) # calls detect text file defined above with new file path and prints string

    cv2.imshow('frame',frame) # names dispay frame and displays image array vector for cap
    if cv2.waitKey(1) & 0xFF == ord('q'): # frame waits 1 ms for keyboard even to equal keystroke q
        break # if q is pressed, breaks form loop

cap.release() # releases captured video
cv2.destroyAllWindows() # destory display
from cv2 import cv2
# THIS IS WEBCAM
cap = cv2.VideoCapture(0)

while cap.isOpened():
    ret, back = cap.read(
    )  # READING FROM WEBCAM, BACK IS WHAT WEBCAM IS READING., RET CHECKS IF THE READING IS SUCCESFUL OR NOT.
    if ret:
        cv2.imshow("image", back)
        if cv2.waitKey(5) == ord(
                'q'
        ):  # CLICK A PICTURE AFTER EVERY 5ms.. PRESS 'Q' TO SAVE THE IMAGE
            cv2.imwrite('image.jpg', back)
            break  # AFTER SAVING IMAGE BREAK FROM LOOP.

cap.release()
cv2.destroyAllWindows()
Beispiel #26
0
 is_destroy = False
 particle_list = []
 if capture.isOpened():
     while(True):
         ret,prev = capture.read()
         if ret == True:
             if is_cropped == False:
                 firstFrame = copy.deepcopy(prev)
                 the_firstFrame = copy.deepcopy(prev)
                 image_w,image_h = prev.shape[1],prev.shape[0]
             while(is_cropped == False):
                 cv2.namedWindow("choose_image",flags = 0)
                 cv2.resizeWindow('choose_image', 1080, 800) 
                 cv2.setMouseCallback('choose_image', choose_frame)
                 cv2.imshow('choose_image',firstFrame)
                 cv2.waitKey(10)&0xff
             if is_destroy == False :
                 cv2.destroyAllWindows()
                 is_destroy = True
                 particle_list = initial_particle()
                 cv2.rectangle(prev, (int(ix - w * 0.5), int(iy - 0.5 * h)), (int(ix + w * 0.5), int(iy + 0.5 * h)), (0,255,0), 1)
                 cv2.namedWindow('video', flags=0)  
                 cv2.resizeWindow('video', 1080, 800) 
                 cv2.imshow('video',prev)
             else:
                 particle_list,prev = particlefilter(particle_list,copy.deepcopy(prev))
                 cv2.imshow('video',prev)
         else:
             break
         if cv2.waitKey(1) & 0xFF == ord('q'):
             break
Beispiel #27
0
import imutils
import numpy as np
from cv2 import cv2

image = cv2.imread("./resources/img/test0.jpg")  # Load image

(h, w, d) = image.shape  # Extract image dimensions to variables h, w, & d
size = image.size  # Extrace image size (num. pixles) to variable size
dtype = image.dtype  # Extract image data type to variable dtype

#Print Useful Information to Console
print("width={}, height={}, depth={}".format(w, h, d))
print("size={}, data type={}".format(size, dtype))

oImage = image  # Save a copy of the origional image in memory

# Define range of target colors in HSV
lowerColor = np.array([44, 63, 63])
upperColor = np.array([52, 255, 255])

hsv = cv2.cvtColor(
    image, cv2.COLOR_BGR2HSV)  # Convert image to HSV colorspace for processing
mask = cv2.inRange(hsv, lowerColor,
                   upperColor)  # Generate mask from color range
image = cv2.bitwise_and(image, image, mask=mask)  # Mask origional image

cv2.imshow("Origional", oImage)  # Display origional image
cv2.imshow("Mask", mask)  # Display image mask
cv2.imshow("Filtered", image)  # Display filtered image
cv2.waitKey(0)  # Wait for keypress to close
                             True)
    cv.drawContours(img, [approx], 0, (0, 0, 0), 3)
    x = approx.ravel()[0]
    y = approx.ravel()[1] - 5

    if len(approx) == 3:
        cv.putText(img, "Triangle", (x, y), cv.FONT_HERSHEY_SIMPLEX, .5,
                   (0, 0, 255))
    elif len(approx) == 4:
        x, y, w, h = cv.boundingRect(approx)
        is_square = w / h
        if is_square >= 0.95 and is_square <= 1.05:
            cv.putText(img, "Square", (x, y), cv.FONT_HERSHEY_SIMPLEX, .5,
                       (0, 0, 255))
        else:
            cv.putText(img, "Rectangle", (x, y), cv.FONT_HERSHEY_SIMPLEX, .5,
                       (0, 0, 255))

    elif len(approx) == 5:
        cv.putText(img, "Pentagon", (x, y), cv.FONT_HERSHEY_SIMPLEX, .5,
                   (0, 0, 255))
    elif len(approx) == 10:
        cv.putText(img, "Star", (x, y), cv.FONT_HERSHEY_SIMPLEX, .5,
                   (0, 0, 255))
    else:
        cv.putText(img, "circle", (x, y), cv.FONT_HERSHEY_SIMPLEX, .5,
                   (0, 0, 255))

cv.imshow("Shapes", img)
cv.waitKey(0)
cv.destroyAllWindows()
Beispiel #29
0
from cv2 import cv2 as cv
import numpy as np
import sys, io

stream = io.BytesIO()

with picamera.PiCamera() as camera:
    camera.resolution = (640, 480)
    camera.framerate = 30
    while True:
        # Capture image from camera
        camera.capture(stream, format='jpeg', use_video_port=True)

        # Convert image from camera to a numpy array
        data = np.fromstring(stream.getvalue(), dtype=np.uint8)

        # Decode the numpy array image
        image = cv.imdecode(data, cv.CV_LOAD_IMAGE_COLOR)

        # Empty and return the in-memory stream to beginning
        stream.seek(0)
        stream.truncate(0)

        # Display the image
        cv.imshow('image', image)

        # Wait for ESC to end program
        key = cv.waitKey(10)
        if key == 27:
            break
Beispiel #30
0
# try, except 문 사용해보기
# tyy, excpet문은 내가 예상했던 에러들이 아닌 그 외의 에러들을 캐치하기 위해 사용한다.

from cv2 import cv2 as cv

frameWidth = 640
frameHeight = 480
cap = cv.VideoCapture(2)

try:
    while (cap.isOpened()):
        results, frame = cap.read()
        cv.imshow('Display Result', frame)
        if cv.waitKey(
                1
        ) == 27:  # ASCII code로 숫자 27은 키보드의 'ESC'를 의미, 즉, "ESC"를 누르면 waitKey(1)(0.001초)멈추었다가 break문을 통해 나가짐
            break

except:
    pass

finally:
    cap.release()  # when everything done, release the capture
    cv.destroyAllWindows()  # 요즘은 버전이 업그레이드 되면서 잘 사용하지는 않는다.
Beispiel #31
0
                        '\n')  #write Green.txt
            Red.write(np.binary_repr(r[i][j], width=8) + '\n')  #write Red.tx
            Blue.write(np.binary_repr(b[i][j], width=8) +
                       '\n')  #write Blue.txt
            Gray.write(float_to_bin(gray[i][j]) + '\n')  #write Gray.txt
    print("frame" + str(number) + " --> done")


cap = cv2.VideoCapture("video/source/one_punch.mp4")
if (cap.isOpened() == False):
    print("video open fail")
else:
    number = 0
    ret, frame = cap.read()
    y, x, _ = frame.shape
    while (True):
        if ret == True:
            b, g, r = cv2.split(frame)
            grayscale_frame = rgb_to_grayscale(r, g, b)
            cv2.imshow('origin video', frame)
            cv2.imshow('grayscale video', grayscale_frame)
            save_img(x, y, number, r, g, b, grayscale_frame)
            number = number + 1
            ret, frame = cap.read()
            if cv2.waitKey(25) & 0xFF == ord('q'):  # press Q to exit
                break
        else:
            save_info(x, y, number)
            break
cap.release()
cv2.destroyAllWindows()