Ejemplo n.º 1
0
    gray = cv2.GaussianBlur(gray, (21, 21), 0)
    cv2.imshow("Blur", gray)
    state = 0

    if frame_1 is None:
        frame_1 = gray
        continue
    delta_frame = cv2.absdiff(gray, frame_1)

    cv2.imshow("Delta", delta_frame)

    thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
    thresh_frame = cv2.dilate(thresh_frame, None, iterations=3)

    (cnts, _) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)
    for contour in cnts:
        if cv2.contourArea(contour) < 2000:
            continue
        state = 1
        x, y, w, h = cv2.boundingRect(contour)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 3)
    status.append(state)

    if status[-1] == 1 and status[-2] == 0:
        times.append(dt.now())
    elif status[-1] == 0 and status[-2] == 1:
        times.append(dt.now())

    cv2.imshow("Initial Frame", frame_1)
    cv2.imshow("Threshold", thresh_frame)
Ejemplo n.º 2
0
# hull = cv2.convexHull(cnt,returnPoints = False)
# defects = cv2.convexityDefects(cnt,hull)
# Note

# Remember we have to pass returnPoints = False while finding convex hull, in order to find convexity defects.
# It returns an array where each row contains these values - [ start point, end point, farthest point, approximate distance to farthest point ]. We can visualize it using an image. We draw a line joining start point and end point, then draw a circle at the farthest point. Remember first three values returned are indices of cnt. So we have to bring those values from cnt.

from cv2 import cv2
import numpy as np

img_star = cv2.imread('resource/star.png')
img = img_star.copy()
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img_gray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, 2, 1)
cnt = contours[0]

hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)

for i in range(defects.shape[0]):
    s, e, f, d = defects[i, 0]
    start = tuple(cnt[s][0])
    end = tuple(cnt[e][0])
    far = tuple(cnt[f][0])
    cv2.line(img, start, end, [0, 255, 0], 2)
    cv2.circle(img, far, 5, [0, 0, 255], -1)

cv2.imshow('img', img)
import numpy as np
from cv2 import cv2

img = cv2.imread('detect_blob.png', 1)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

thresh_img = cv2.adaptiveThreshold(
    gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115,
    1)  #guassion threshold with output binary threshold type
#Make sure to binarize image before using contour functions
cv2.imshow("Adaptive Thresh Image", thresh_img)

#Contours
contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

img2 = img.copy()
index = -1  #if negative all contours are drawn, index of contour to be drawn
thickness = 4
color = (255, 0, 255)
cv2.drawContours(img2, contours, index, color, thickness)
cv2.imshow('Contours', img2)
print("Contours", contours)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ejemplo n.º 4
0
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

# GAUSSIAN MIXTURE BASED BACKGROUND AND FOREGROUND
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()

while True:
    ret, frame = cap.read()

    if ret == True:
        # apply fgbg
        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

        # find countours
        dilation = cv2.dilate(fgmask, kernel_dil, iterations=1)
        contours, _ = cv2.findContours(dilation, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

        # loop and draw box
        for con in contours:
            x, y, w, h = cv2.boundingRect(con)
            # if area countours < 1350 dont draw box
            if cv2.contourArea(con) < 1350:
                continue
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            # draw text on frame
            cv2.putText(frame, "Status : {}".format('Movement'), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)

            cv2.putText(frame, "w={},h={}".format(w, h), (x, y - 10),
                        cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)
Ejemplo n.º 5
0
def main():
    previousFrame = None
    previous_x, previous_y = -1, -1
    previous_dx, previous_dy = 0, 0
    largest_area_x = 0
    largest_area_y = 0

    direction_log = []
    last_movement = time.time() * 1000
    while True:

        grabbed, frame = camera.read()
        frame = imutils.resize(frame, width=500)
        frame = cv2.flip(frame, 1)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.1, 3, 0)
        gray = cv2.GaussianBlur(gray, (args["blur"], args["blur"]), 0)
        # if the first frame is None, initialize it
        if previousFrame is None:
            previousFrame = gray
            continue
        # compute the absolute difference between the current frame and
        # first frame
        frameDelta = cv2.absdiff(previousFrame, gray)
        thresh = cv2.threshold(frameDelta, args["threshold"], 255,
                               cv2.THRESH_BINARY)[1]
        # dilate the thresholded image to fill in holes, then find contours
        # on thresholded image
        thresh = cv2.dilate(thresh, None, iterations=args["dilation"])
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        previousFrame = gray
        frameDelta = cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2RGB)
        # loop over the contours
        largest_area = 0
        largest_area_x, largest_area_y = -1, -1
        for c in cnts:
            # if the contour is too small, ignore it
            area = cv2.contourArea(c)
            if area < args["min_area"]:
                continue
            if area > args["max_area"]:
                continue

            # compute the bounding box for the contour, draw it on the frame,
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.putText(
                frame,
                str(area),
                (x, y),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (0, 255, 0),
                2,
            )
            if area > largest_area:
                largest_area_x, largest_area_y = x, y
                largest_area = area
            last_movement = time.time() * 1000

        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        cv2.imshow("Frame", frame)
        cv2.imshow("Frame Delta", frameDelta)
        cv2.imshow("Thresh", thresh)

        dx, dy = largest_area_x - previous_x, largest_area_y - previous_y
        previous_dx, previous_dy = dx, dy
        previous_x, previous_y = largest_area_x, largest_area_y

        dx, dy = previous_dx * 0.3 + dx * 0.7, previous_dy * 0.5 + dy * 0.7
        # dx, dy = dx/2, dy/2
        dx, dy = int(dx), int(dy)

        if dx != 0 or dy != 0:
            direction = Direction.Null
            if abs(dx) > abs(dy):
                direction = Direction.Left if dx < 0 else Direction.Right
            else:
                direction = Direction.Up if dy < 0 else Direction.Down
            logging.debug("{}, dx={}, dy={}".format(direction, dx, dy))
            direction_log.append(direction)

        if time.time() * 1000 - last_movement > args["wait"]:
            if len(direction_log) > args["min_sample"]:
                direction_result = max(direction_log, key=direction_log.count)
                logging.info("Result: {}".format(direction_result))
                if direction_result == Direction.Right:
                    WindowAction.previousDesktop()
                if direction_result == Direction.Left:
                    WindowAction.nextDesktop()
                if direction_result == Direction.Up:
                    WindowAction.maximizeWindow()
                if direction_result == Direction.Down:
                    WindowAction.minimizeWindow()
                # for e in Direction:
                #     logging.info("Result: {} {}x".format(e, direction_log.count(e)))
            direction_log.clear()

        k = cv2.waitKey(10) & 0xFF
        if k == 27:
            break
    camera.release()
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
paper1 = cv2.cvtColor(org1, cv2.COLOR_BGR2GRAY)

#Canny边缘检测
paper1 = cv2.Canny(paper1, 80, 150)
if show_process:
    imshow(paper1)

#膨胀操作
kernel = np.ones((3, 3), np.uint8)
paper1 = cv2.dilate(paper1, kernel, iterations=1)
if show_process:
    imshow(paper1)

#轮廓检测
cnts = cv2.findContours(paper1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]

#提取面积最大的轮廓
cnts = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True)

#绘制面积最大的轮廓
show = org1.copy()
show = cv2.drawContours(show, cnts, 0, (0, 255, 0), 1)

if show_process:
    imshow(show)

cnt = []

#步长设置为周长的0.0001倍,一般来说取epsilon = 0.001倍周长
step = 0.0001 * cv2.arcLength(cnts[0], True)
def gestos_mano():
    cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture(1)

    bg = None
    # COLORES PARA VISUALIZACIÓN
    color_start = (204, 204, 0)
    color_end = (204, 0, 204)
    color_far = (255, 0, 0)
    color_start_far = (204, 204, 0)
    color_far_end = (204, 0, 204)
    color_start_end = (0, 255, 255)
    color_contorno = (0, 255, 0)
    color_ymin = (0, 130, 255)  # Punto más alto del contorno
    # color_angulo = (0,255,255)
    # color_d = (0,255,255)
    color_fingers = (0, 255, 255)

    while True:
        ret, frame = cap.read()
        if ret == False: break
        # Redimensionar la imagen para que tenga un ancho de 640
        frame = imutils.resize(frame, width=640)
        frame = cv2.flip(frame, 1)
        frameAux = frame.copy()

        if bg is not None:

            # Determinar la región de interés
            ROI = frame[50:300, 380:600]
            cv2.rectangle(frame, (380 - 2, 50 - 2), (600 + 2, 300 + 2),
                          color_fingers, 1)
            grayROI = cv2.cvtColor(ROI, cv2.COLOR_BGR2GRAY)

            # Región de interés del fondo de la imagen
            bgROI = bg[50:300, 380:600]

            # Determinar la imagen binaria (background vs foreground)
            dif = cv2.absdiff(grayROI, bgROI)
            _, th = cv2.threshold(dif, 30, 255, cv2.THRESH_BINARY)
            th = cv2.medianBlur(th, 7)

            # Encontrando los contornos de la imagen binaria
            cnts, _ = cv2.findContours(th, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:1]
            for cnt in cnts:
                # Encontrar el centro del contorno
                M = cv2.moments(cnt)
                if M["m00"] == 0: M["m00"] = 1
                x = int(M["m10"] / M["m00"])
                y = int(M["m01"] / M["m00"])
                cv2.circle(ROI, tuple([x, y]), 5, (0, 255, 0), -1)

                # Punto más alto del contorno
                ymin = cnt.min(axis=1)
                cv2.circle(ROI, tuple(ymin[0]), 5, color_ymin, -1)

                # Contorno encontrado a través de cv2.convexHull
                hull1 = cv2.convexHull(cnt)
                cv2.drawContours(ROI, [hull1], 0, color_contorno, 2)

                # Defectos convexos
                hull2 = cv2.convexHull(cnt, returnPoints=False)
                defects = cv2.convexityDefects(cnt, hull2)

                # Seguimos con la condición si es que existen defectos convexos
                if defects is not None:
                    inicio = [
                    ]  # Contenedor en donde se almacenarán los puntos iniciales de los defectos convexos
                    fin = [
                    ]  # Contenedor en donde se almacenarán los puntos finales de los defectos convexos
                    fingers = 0  # Contador para el número de dedos levantados
                    for i in range(defects.shape[0]):

                        s, e, f, d = defects[i, 0]
                        start = cnt[s][0]
                        end = cnt[e][0]
                        far = cnt[f][0]
                        # Encontrar el triángulo asociado a cada defecto convexo para determinar ángulo
                        a = np.linalg.norm(far - end)
                        b = np.linalg.norm(far - start)
                        c = np.linalg.norm(start - end)

                        angulo = np.arccos(
                            (np.power(a, 2) + np.power(b, 2) - np.power(c, 2))
                            / (2 * a * b))
                        angulo = np.degrees(angulo)
                        angulo = int(angulo)

                        # Se descartarán los defectos convexos encontrados de acuerdo a la distnacia
                        # entre los puntos inicial, final y más alelago, por el ángulo y d
                        if np.linalg.norm(start - end
                                          ) > 20 and angulo < 90 and d > 12000:
                            # Almacenamos todos los puntos iniciales y finales que han sido
                            # obtenidos
                            inicio.append(start)
                            fin.append(end)

                            # Visualización de distintos datos obtenidos
                            # cv2.putText(ROI,'{}'.format(angulo),tuple(far), 1, 1.5,color_angulo,2,cv2.LINE_AA)
                            # cv2.putText(ROI,'{}'.format(d),tuple(far), 1, 1.1,color_d,1,cv2.LINE_AA)
                            cv2.circle(ROI, tuple(start), 5, color_start, 2)
                            cv2.circle(ROI, tuple(end), 5, color_end, 2)
                            cv2.circle(ROI, tuple(far), 7, color_far, -1)
                            # cv2.line(ROI,tuple(start),tuple(far),color_start_far,2)
                            # cv2.line(ROI,tuple(far),tuple(end),color_far_end,2)
                            # cv2.line(ROI,tuple(start),tuple(end),color_start_end,2)
                    # Si no se han almacenado puntos de inicio (o fin), puede tratarse de
                    # 0 dedos levantados o 1 dedo levantado
                    if len(inicio) == 0:
                        minY = np.linalg.norm(ymin[0] - [x, y])
                        if minY >= 110:
                            fingers = fingers + 1
                            cv2.putText(ROI, '{}'.format(fingers),
                                        tuple(ymin[0]), 1, 1.7,
                                        (color_fingers), 1, cv2.LINE_AA)

                    # Si se han almacenado puntos de inicio, se contará el número de dedos levantados
                    for i in range(len(inicio)):
                        fingers = fingers + 1
                        cv2.putText(ROI, '{}'.format(fingers),
                                    tuple(inicio[i]), 1, 1.7, (color_fingers),
                                    1, cv2.LINE_AA)
                        if i == len(inicio) - 1:
                            fingers = fingers + 1
                            cv2.putText(ROI, '{}'.format(fingers),
                                        tuple(fin[i]), 1, 1.7, (color_fingers),
                                        1, cv2.LINE_AA)

                    # Se visualiza el número de dedos levantados en el rectángulo izquierdo
                    if fingers == 0:
                        cv2.putText(frame, 'Auxilio', (390, 45), 1, 2,
                                    (color_fingers), 2, cv2.LINE_AA)
                    else:
                        if fingers == 1:
                            cv2.putText(frame, 'Estoy bien!', (390, 45), 1, 2,
                                        (color_fingers), 2, cv2.LINE_AA)

                        else:
                            if fingers == 2:
                                if angulo > 60:
                                    cv2.putText(frame, 'Tengo hambre',
                                                (390, 45), 1, 2,
                                                (color_fingers), 2,
                                                cv2.LINE_AA)
                                else:
                                    if angulo < 40:
                                        cv2.putText(frame, 'Tengo sueño',
                                                    (390, 45), 1, 2,
                                                    (color_fingers), 2,
                                                    cv2.LINE_AA)
                            else:
                                if fingers == 3:
                                    cv2.putText(frame, 'Te quiero', (390, 45),
                                                1, 2, (color_fingers), 2,
                                                cv2.LINE_AA)
                                else:
                                    if fingers == 4:
                                        cv2.putText(frame, 'Gesto desconocido',
                                                    (300, 45), 1, 2,
                                                    (color_fingers), 2,
                                                    cv2.LINE_AA)
                                    else:
                                        cv2.putText(frame, 'Hola mundo!',
                                                    (300, 45), 1, 2,
                                                    (color_fingers), 2,
                                                    cv2.LINE_AA)

            cv2.imshow('th', th)
        cv2.imshow('Frame', frame)
        k = cv2.waitKey(20)
        if k == ord('i'):
            bg = cv2.cvtColor(frameAux, cv2.COLOR_BGR2GRAY)
        if k == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 8
0
def main():
    drone = tellopy.Tello()
    try:
        drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
        drone.connect()
        drone.wait_for_connection(30.0)
        # 设置连接等待的时间,若超时则抛出错误

        drone.takeoff()
        time.sleep(3)

        container = av.open(drone.get_video_stream())
        # skip first 300 frames
        frame_skip = 300
        while True:

            for frame in container.decode(video=0):

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

                start_time = time.time()
                hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv, redLower, redUpper)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]

                if len(cnts) > 0:
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
                    if radius > 10:
                        cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
                        cv2.circle(frame, center, 5, (0, 0, 255), -1)
                        pts.appendleft(center)
                else:
                    pts.clear()

                length = len(pts)
                for i in range(1, length):
                    if pts[i - 1] is None or pts[i] is None:
                        continue
                    thickness = int(np.sqrt(mybuffer / float(i + 1)) * 2.5)
                    cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
                cv2.imshow('Frame', frame)
                image = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2GRAY)
                cv2.imshow('Original', image)
                # cv2.imshow('Canny', cv2.Canny(image, 100, 200))

                interrupt = cv2.waitKey(10)
                if interrupt & 0xFF == ord('q'):
                    drone.down(30)
                    time.sleep(3)
                    drone.land()
                    time.sleep(3)
                    print('successfully land!')
                    break
                frame_skip = int((time.time() - start_time)/frame.time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Ejemplo n.º 9
0
# Convert the image into black and white for edge detection
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# For edge detection it is recommended to smoothen the image for better results
blur = cv2.GaussianBlur(gray, (5, 5), 0)

# Fetch the edges of the image
# In this algorithm the edges with intensity greater than maximum threshold are fetched
# Also the values of intensity between minimum and maximum threshold are checked for any connection with edge having maximum intensity

edge = cv2.Canny(blur, 50, 80)
cv2.imshow("Canny Edge", edge)
cv2.waitKey(0)

# Find the contours in image
# Here we used CHAIN_APPROX_SIMPLE to just store the main points of the contour instead  of storing the whole contour
contour, hierarchy = cv2.findContours(edge, cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_SIMPLE)
contour = sorted(contour, key=cv2.contourArea, reverse=True)

# Each contour is checked and the contour which is closed and forming an square or rectangle are named as target
for c in contour:
    p = cv2.arcLength(c, True)
    # Not all contour's are perfect so little approixmation can be used
    approx = cv2.approxPolyDP(c, 0.02 * p, True)

    if len(approx) == 4:
        target = approx
        break

approx = mapp(target)

# We define the range of the window (Top left, Top Right, Bottom Right, Bottom Left)
Ejemplo n.º 10
0
        ####################################
        # Grünerkennung mit Konturen       #
        ####################################

        frame_als_hsv = cv2.cvtColor(dieser_frame, cv2.COLOR_BGR2HSV)
        #                                H  S  V
        untere_schwelle = numpy.uint8([40, 80, 20])
        obere_schwelle = numpy.uint8([80, 255, 255])
        gruen_maske = cv2.inRange(frame_als_hsv, untere_schwelle, obere_schwelle)

        BLAU = (255, 0, 0)
        ROT = (0, 0, 255)
        GRUEN = (0, 255, 0)
        # Konturen extrahieren
        _, konturen, _ = cv2.findContours(gruen_maske, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        # Konturen (Polygone) in blau einzeichnen
        # („-1“ bedeutet alle Konturen, „3“, „2“ ist die Dicke der einzuzeichnenden Linie)
        cv2.drawContours(dieser_frame, konturen, -1, BLAU, 2)
        for kontur in konturen:
            # zu kleine Konturen wegfiltern
            if cv2.contourArea(kontur) > 4000:
                # Dimensionen des Rechtecks um die Kontur extrahieren
                x, y, w, h = cv2.boundingRect(kontur)
                # Rechteck um die Kontur zeichnen
                cv2.rectangle(dieser_frame, (x, y), (x + w, y + h), ROT, 2)

        # Kamerabild anzeigen
        cv2.imshow("Maze Runner", dieser_frame)

        # Motoren ansteuern
Ejemplo n.º 11
0
img = cv2.imread("fuzzy.png",1) 

hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)

hsv_split = np.concatenate((hsv[:,:,0],hsv[:,:,1],hsv[:,:,2]),axis=1)
cv2.imshow("HSV Split",hsv_split)

#Dilating Saturation channel and thresholding to get segmentation
kernel=np.ones((3,3),'uint8')
dilate=cv2.dilate(hsv[:,:,1],kernel,iterations=1)
cv2.imshow("S Dilation",dilate)
ret,dilate_thresh = cv2.threshold(dilate,200,255,cv2.THRESH_BINARY)
cv2.imshow("S Dilate Trheshold",dilate_thresh)


#Contours
canvas = np.ones(img.shape,'uint8') * 255
contours,heirarchy = cv2.findContours(dilate_thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
    area = cv2.contourArea(c)
    perim = cv2.arcLength(c,True)
    print("Perimeter: {}, Area: {}".format(perim,area))
    if area > 1000:
        cv2.drawContours(canvas,[c],-1,(rand.randint(0,255),0,rand.randint(0,255)),-1)

cv2.imshow("FINAL RESULT",canvas)


cv2.waitKey(0)
cv2.destroyAllWindows()
Ejemplo n.º 12
0
path = 'C:/Users/Aldrin Gwapo/Documents/Thesis 1/processed_frames'
count = 0
ret = 1
while ret:  #CTRL C at console/terminal to force stop loop

    ret, frame = video.read()

    frame = cv2.resize(frame, (640, 480))
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    l_green = np.array([46, 34, 27])
    u_green = np.array([82, 255, 255])

    mask = cv2.inRange(hsv, l_green, u_green)
    edges = cv2.Canny(mask, 150, 200)
    res = cv2.bitwise_and(frame, frame, mask=mask)
    contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    f = frame - res
    f = np.where(f == 0, blackimage, image)
    cv2.drawContours(f, contours, -1, (255, 255, 255), 3)

    count += 1
    cv2.imwrite(os.path.join(path, "ALframe" + str(count) + ".jpg"), f)

    #CTRL C at console/terminal to force stop loop

    #cv2.imshow("video", f)           #comment out to view continous frames
    #cv2.imshow("edges", edges)

    #k=cv2.waitKey(1)
    #if cv2.waitKey(25) == 27: #comment out if viewing frames and press escape to stop.
Ejemplo n.º 13
0
def main():
  drone = tellopy.Tello()
   
  try:
    drone.connect()
    drone.wait_for_connection(60.0)
    
    retry = 3
    container = None
    while container is None and 0 < retry:
      retry -= 1
      try:
        # 動画の受信開始を処理
        container = av.open(drone.get_video_stream())
      except av.AVError as ave:
        print(ave)
        print('retry...')
          
    frame_skip = 300
    while True:
      for frame in container.decode(video=0):
        if 0 < frame_skip: #フレームスキップ処理
          frame_skip = frame_skip - 1
          continue
        start_time = time.time()
        #img_gray  = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        image_origin = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
        
        img_mask = cv2.GaussianBlur(image_origin, (15, 15), 0)  #フィルタの中の引数
        canny = cv2.Canny(img_mask, 100, 150)

        #色の抽出
        
        def red_detect(canny):
        
          RGBLower1 = np.array([62, 60, 100])    # 抽出する色の下限(BGR)
          RGBUpper1 = np.array([75 ,80, 137])   # 抽出する色の上限(BGR)
          mask1 = cv2.inRange(img_mask, RGBLower1, RGBUpper1) 

          #RGBLower2 = np.array([202, 185, 115])
          #RGBUpper2 = np.array([255, 255, 148])
          #mask2 = cv2.inRange(image_origin,RGBLower2, RGBUpper2)

          return mask1 

        mask = red_detect(canny)

        #img_mask = cv2.GaussianBlur(mask, (15, 15), 0)  #フィルタの中の引数
        #canny = cv2.Canny(img_mask, 200, 300)

        fig = plt.figure(figsize=(15,15))
        ax = fig.add_subplot(1,1,1)
        ax.imshow(mask)
        ax.axis("off")


        #輪郭の抽出
        #輪郭の階層情報
        image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        i = 0 
        for contour in contours: 
          
          #for i in range(6):

          cv2.drawContours(image_origin, contours, i, (255,0,0), 5)

          S = cv2.contourArea(contours[i])

          if S>0:
            print(S)
          
          i += 1
      
        #for counter in counters:
              #x, y, w, h ~ cv2.boundingRect(contour)
              #cv2.rectangle(image_origin, (x,y), (x+w, y+h), (0,255,0),3)
              
              #print(x)
              #print(y)
              #priny(w)
              #print(h)   

  # Harrisコーナ検出

        #mask = np.uint16(mask)
        #gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
        #mask = np.float32(mask)

        #dst = cv2.cornerHarris(mask, 2, 9, 0.16)

        # 青の点を頂点に打つ
        #mask[dst>0.01*dst.max()] = [255,0,0]

        #青のpixelを探す
        #coord = np.where(np.all(mask == (255, 0, 0), axis = -1))

        #printt coorsdinate
        #for i in range(len(coord[0])):
              #print("X:%s Y:%s"%(coord[1][i],coord[0][i]))

      #for countour in contours:
        #x, y, w, h = cv2.boundingRect(contour)
       # cv2.rectangle(image_origin, (x,y), (x+w, y+h), (255,0,0),3)

        #S = w * h

       # print("x: " + str(x))
       # print("y: " + str(y))
        #print("w: " + str(w))
       # print("h: " + str(h))
       # print("S: " + str(S))

      # GoodFeaturesToTrack 

        #corners = cv2.goodFeaturesToTrack(canny,100000000,0.01,10)
        #corners = np.int0(corners)

        #for i in corners:
            #x,y = i.ravel()
            #cv2.circle(img,(x,y),3,255,-1)

       #print(x,y)

        cv2.imshow('mask', mask) 
        cv2.imshow('image_origin', image_origin)
        cv2.waitKey(1)
        
        if frame.time_base < 1.0/60:
            time_base = 1.0/60
        else:
            time_base = frame.time_base
        # フレームスキップ値を算出
        frame_skip = int((time.time() - start_time) / time_base)

  except Exception as ex:
    exc_type, exc_value, exc_traceback = sys.exc_info()
    traceback.print_exception(exc_type, exc_value, exc_traceback)
    print(ex)
  finally:
    drone.quit()
    cv2.destroyAllWindows()
Ejemplo n.º 14
0
def tracker(video_path,
            background_full,
            rois,
            threshold=5,
            display=True,
            area_size=0,
            split_range=False):
    """ Function that takes a video path, a background file, rois, threshold and display switch. This then uses
    background subtraction and centroid tracking to find the XZ coordinates of the largest contour. Saves out a csv file
     with frame #, X, Y, contour area"""

    print("tracking {}".format(video_path))

    # As camera is often excluded, check here and buffer if not included
    if len(rois) == 1:
        rois['cam'] = 'unknown'

    # load video
    video = cv2.VideoCapture(video_path)

    if display:
        # create display window
        cv2.namedWindow("Live thresholded")
        cv2.namedWindow("Live")

    # as there can be multiple rois the writer, data and moviename are kept in lists
    data = list()
    frame_id = 0
    for roi in np.arange(0, len(rois) - 1):
        data.append(list())

    if split_range is False:
        total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
        split_range = [0, total + 1]
        split_name = False
    else:
        split_name = True

    while video.isOpened():
        ret, frame = video.read()
        if not ret:
            print("reached end of video")
            video.release()
            break
        if frame_id in np.arange(split_range[0], split_range[1]):
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frameDelta_full = cv2.absdiff(background_full, gray)

            # tracking
            cx = list()
            cy = list()
            contourOI = list()
            contourOI_ = list()
            for roi in range(0, len(rois) - 1):
                # for the frame define an ROI and crop image
                curr_roi = rois["roi_" + str(roi)]
                frameDelta = frameDelta_full[curr_roi[1]:curr_roi[1] +
                                             curr_roi[3],
                                             curr_roi[0]:curr_roi[0] +
                                             curr_roi[2]]
                image_thresholded = cv2.threshold(frameDelta, threshold, 255,
                                                  cv2.THRESH_TOZERO)[1]
                (contours, _) = cv2.findContours(image_thresholded,
                                                 cv2.RETR_EXTERNAL,
                                                 cv2.CHAIN_APPROX_SIMPLE)
                if len(contours) > 0:
                    contourOI_.append(max(contours, key=cv2.contourArea))
                    area = cv2.contourArea(contourOI_[roi])
                    if area > area_size:
                        contourOI.append(cv2.convexHull(contourOI_[roi]))
                        M = cv2.moments(contourOI[roi])
                        cx.append(int(M["m10"] / M["m00"]))
                        cy.append(int(M["m01"] / M["m00"]))
                        data[roi].append((frame_id, cx[roi], cy[roi], area))
                    else:
                        print(
                            "no large enough contour found for roi {}!".format(
                                roi))
                        data[roi].append((frame_id, np.nan, np.nan, np.nan))
                        contourOI_[-1] = False
                        contourOI.append(False)
                        cx.append(np.nan)
                        cy.append(np.nan)
                else:
                    print("no contour found for roi {}!".format(roi))
                    data[roi].append((frame_id, np.nan, np.nan, np.nan))
                    contourOI_.append(False)
                    contourOI.append(False)
                    cx.append(np.nan)
                    cy.append(np.nan)

            if frame_id % 500 == 0:
                print("Frame {}".format(frame_id))
            if display:
                full_image_thresholded = (cv2.threshold(
                    frameDelta_full, threshold, 255, cv2.THRESH_TOZERO)[1])
                # Live display of full resolution and ROIs
                cv2.putText(full_image_thresholded,
                            "Framenum: {}".format(frame_id),
                            (30, full_image_thresholded.shape[0] - 30),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=0.5,
                            color=255)

                for roi in range(0, len(rois) - 1):
                    if np.all(contourOI_[roi] != False):
                        curr_roi = rois["roi_" + str(roi)]
                        # add in contours
                        corrected_contour = np.empty(contourOI_[roi].shape)
                        corrected_contour[:, 0,
                                          0] = contourOI_[roi][:, 0,
                                                               0] + curr_roi[0]
                        corrected_contour[:, 0,
                                          1] = contourOI_[roi][:, 0,
                                                               1] + curr_roi[1]
                        cv2.drawContours(full_image_thresholded,
                                         corrected_contour.astype(int), -1,
                                         255, 1)

                        # add in centroid
                        cv2.circle(
                            full_image_thresholded,
                            (cx[roi] + curr_roi[0], cy[roi] + curr_roi[1]), 8,
                            255, 1)

                cv2.imshow("Live thresholded", full_image_thresholded)
                cv2.imshow("Live", gray)
                cv2.waitKey(1)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        frame_id += 1

    # saving data
    print("Saving data output")
    date = datetime.datetime.now().strftime("%Y%m%d")

    for roi in range(0, len(rois) - 1):
        datanp = np.array(data[roi])
        if split_name is False:
            filename = video_path[
                0:-4] + "_tracks_{}_Thresh_{}_Area_{}_roi-{}.csv".format(
                    date, threshold, area_size, roi)
        else:
            range_s = str(split_range[0]).zfill(5)
            range_e = str(split_range[1]).zfill(5)
            filename = video_path[
                0:-4] + "_tracks_{}_Thresh_{}_Area_{}_Range{}-{}_.csv".format(
                    date, threshold, area_size, range_s, range_e)
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        np.savetxt(filename, datanp, delimiter=",")

    print("Tracking finished on video cleaning up")
    cv2.destroyAllWindows()
Ejemplo n.º 15
0
    mask = cv2.inRange(hsv_img, (29, 102, 0), (48, 255, 255))

    ## Visualize the mask
    res = cv2.bitwise_and(frame, frame, mask=mask)
    cv2.imshow("result", res)
    cv2.imshow("mask", mask)

    ## erosion and dilation (not necessary)
    ## Processing to get better contours
    kernel = np.ones((3, 3))
    erosion = cv2.erode(mask, kernel, iterations=1)
    dilation = cv2.dilate(erosion, kernel, iterations=1)
    mask = dilation.copy()

    ## Finding contours
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)[-2]
    if cnts:
        # Find the maximum area contour and drawing bounding rectangle
        C_max = max(cnts, key=cv2.contourArea)
        x, y, w, h = cv2.boundingRect(C_max)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
        mid_x = int(x + (w / 2))
        mid_y = int(y + (h / 2))
        frame = cv2.circle(frame, (mid_x, mid_y),
                           radius=5,
                           color=(0, 0, 255),
                           thickness=-1)
        msg.x = mid_x
        msg.y = mid_y
        pub.publish(msg)
        r.sleep()
Ejemplo n.º 16
0
import numpy as np

img = cv.imread('imagen/wppstalkerlarge.jpg')

cv.imshow('stalker', img)

blank = np.zeros(img.shape, dtype='uint8')
cv.imshow('blank', blank)

gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)

grayblurred = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)

#corners
canny = cv.Canny(grayblurred, 125, 175)
cv.imshow('CannyEdges', canny)
#corners can also be found by threesholding

#threeshold
#ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
#cv.imshow('Thresh', thresh)

#contours
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours)} contour(s) found!')

cv.drawContours(blank, contours, -1, (0,255,0), 1)
cv.imshow('contoursDrawn', blank)

cv.waitKey(0)
Ejemplo n.º 17
0
    thresh = cv2.threshold(gradX, 0 , 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

    # apply the close operation this time using squrKernel to close the gaps
    # in between lines of MRZ, then perform erosion to break apart connect components
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, squrKernel)
    thresh = cv2.erode(thresh, None, iterations=4)

    # during thresholding, it's possible that border pixels were
    # included in the thresholding, so let's set 5% of the left and
    # right borders to zero
    reducePercentage = int(image.shape[1] * 0.05)
    thresh[:, 0:reducePercentage] = 0
    thresh[:, image.shape[1] - reducePercentage:] = 0

    # find contours in image ans sort them by thier size
    contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)

    ROI = []
    for contour in contours:
        # compute bounding box, aspratio, width to the width of the image
        (x, y, w, h) = cv2.boundingRect(contour)
        aspratio = w / float(h)
        crWidth = w / float(gray.shape[1])
        print(aspratio, crWidth)
        # check the acceptance criteria
        if aspratio > 5 and crWidth > 0.7 :
            # apply padding to re-grow image since used erosions
            pX = int((x + w) * 0.03)
            pY = int((y + h) * 0.03)
def gameControllerFunction(topLeft, centerLeft, bottomLeft, topCenter, middle, bottomCenter, topRight, centerRight, bottomRight):

    print("Booting the Video stream,")
    vs = cv2.VideoCapture(0)  # start the video stream.
    time.sleep(2.0)  # set sleep time to 2.0 seconds

    while True:
        frame = vs.read()  # Read off the frame from the video stream
        ret, frame = frame  # Use this if you want to load in your video
        output = "None"
        key = None

        if frame is None:  # If there is no frame, save my pc from going through any stress at all
            break
        # otherwise, if we have a frame, we proceed with the following code
        # so much easier than open cv, keeping aspect ratio intact
        frame = imutils.resize(frame, width=700)
        # i want the mirror view, it's very helpful especially if i'm streaming
        frame = cv2.flip(frame, 1)

        windowDetails = cv2.getWindowImageRect('frame')
        # print(windowDetails)
        totalWidth = windowDetails[2]
        totalHeight = windowDetails[3]
        verLine1 = {
            'start': (totalWidth//3, 0),
            'end': (totalWidth//3, totalHeight)
        }
        verLine2 = {
            'start': (totalWidth//3 * 2, 0),
            'end': (totalWidth//3 * 2, totalHeight)
        }
        horLine1 = {
            'start': (0, totalHeight//3),
            'end': (totalWidth, totalHeight//3)
        }
        horLine2 = {
            'start': (0, totalHeight//3 * 2),
            'end': (totalWidth, totalHeight//3 * 2)
        }

        # processing the frame
        # blurr helps to reduce high frequency noise, definately helps model
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        # convert my color to the HSV format
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        # Create a mask
        # mask other regions except colors in range of upper to lower (thresholding)
        mask = cv2.inRange(hsv, lower_color_boundary, upper_color_boundary)
        # Reduce noise caused by thresholding
        mask = cv2.erode(mask, None, iterations=2)
        # foreground the found object i.e futher reduce noise.
        mask = cv2.dilate(mask, None, iterations=2)

        contours = cv2.findContours(
            mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)  # find contours
        # Grab the contours using imutils
        contours = imutils.grab_contours(contours)
        center = None  # center is initially set to none
        if len(contours) > 0:  # if the contours list is not empty proceed
            # select contour with maximum Area, most likely our object
            contour = max(contours, key=cv2.contourArea)
            # pick up co-ordinates for drawing a circle around the object
            ((x, y), radius) = cv2.minEnclosingCircle(contour)
            M = cv2.moments(contour)  # Extract moments from the contour.
            # Obtain the centre of mass of the object.
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            if radius > 10:  # if we have a reasonable radius for the proposed object detected
                # Draw a circle to bound the Object
                cv2.circle(frame, (int(x), int(y)),
                           int(radius), (0, 255, 255), 2)
                # Draw a filled in dot at the centre of the circle
                cv2.circle(frame, center, 5, (0, 0, 225), -1)

        if center:
            if center[0] <= totalWidth//3:
                if center[1] <= totalHeight//3:
                    output = "Top Left"
                    key = topLeft
                elif center[1] >= totalHeight//3*2:
                    output = "Bottom Left"
                    key = bottomLeft
                else:
                    output = "Center Left"
                    key = centerLeft
            elif center[0] >= totalWidth//3*2:
                if center[1] <= totalHeight//3:
                    output = "Top Right"
                    key = topRight
                elif center[1] >= totalHeight//3*2:
                    output = "Bottom Right"
                    key = bottomRight
                else:
                    output = "Center Right"
                    key = centerRight
            else:
                if center[1] <= totalHeight//3:
                    output = "Top Center"
                    key = topCenter
                elif center[1] >= totalHeight//3*2:
                    output = "Bottom Center"
                    key = bottomCenter
                else:
                    output = "Center"
                    key = middle

        if key:
            key_arr = key.split('+')

            # Key Presses
            for k in key_arr:
                pyautogui.keyDown(k.strip())

            time.sleep(0.08)

            for k in key_arr:
                pyautogui.keyUp(k.strip())

        # Drawing the grid
        cv2.putText(frame,  output,  (50, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1,  (0, 0, 0),  2,  cv2.LINE_4)

        cv2.line(frame, verLine1['start'], verLine1['end'], (255, 255, 255), 5)
        cv2.line(frame, verLine2['start'], verLine2['end'], (255, 255, 255), 5)
        cv2.line(frame, horLine1['start'], horLine1['end'], (255, 255, 255), 5)
        cv2.line(frame, horLine2['start'], horLine2['end'], (255, 255, 255), 5)
        cv2.imshow("frame", frame)  # let's see the frame X frame

        # Closing a video frame
        key = cv2.waitKey(1)  # wait for the cv key
        if key == ord("q"):  # If the x button is pressed
            break  # Break from the loop

    vs.release()  # Let opencv release the video loader
    cv2.destroyAllWindows()  # Destroy all windows to close it
Ejemplo n.º 19
0

root_path = os.getcwd()

img = cv.imread(root_path + '/image_processing/datas/images/namecard.png')

if img is None:
    print("Error opening video stream or file")
    sys.exit()

img = cv.resize(img, (0, 0), fx=0.3, fy=0.3)
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

_, img_binary = cv.threshold(img_gray, 0, 255,
                             cv.THRESH_BINARY | cv.THRESH_OTSU)
contours, _ = cv.findContours(img_binary, cv.RETR_EXTERNAL,
                              cv.CHAIN_APPROX_NONE)

dw, dh = 900, 500
import pytesseract
for pts in contours:
    if cv.contourArea(pts) > 5000:
        approx = cv.approxPolyDP(pts, cv.arcLength(pts, True) * 0.02, True)
        if len(approx) == 4 and cv.isContourConvex(approx):
            cv.polylines(img, pts, True, (0, 0, 255))
            srcQuad = reorderPts(approx.reshape(4, 2))
            dstQuad = np.array([[0, dh], [dw, dh], [dw, 0], [0, 0]],
                               dtype=np.float32)

            pers = cv.getPerspectiveTransform(srcQuad, dstQuad)
            img_dst = cv.warpPerspective(img,
                                         pers, (dw, dh),
Ejemplo n.º 20
0
# Load image
img = cv2.imread('test2.jpg', cv2.IMREAD_COLOR)

# Resize image
#img = cv2.resize(img, (700, 580)) #Done with img = test5
img = cv2.resize(img, (620, 480)) #Done with img = test2

# Edge detection
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # convert to grey scale
gray = cv2.bilateralFilter(gray, 11, 17, 17)  # Blur to reduce noise
edged = cv2.Canny(gray, 30, 200)  # Perform Edge detection

# find contours in the edged image, keep only the largest
# ones, and initialize our screen contour
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
screenCnt = None

# loop over our contours
for c in cnts:
    # approximate the contour
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.05 * peri, True)

    # if our approximated contour has four points, then
    # we can assume that we have found our screen
    if len(approx) == 4 and max_size > cv2.contourArea(c) > min_size:
        screenCnt = approx
        break
img = cv2.GaussianBlur(bla,(3,3),0)

laplacian = cv2.Laplacian(img,cv2.CV_8UC1)

edges = cv2.Canny(img,100,100)

plot_images(blur, edges)
cv2.imwrite("data/images/blr.png", laplacian)


image_pat = "{}/{}".format(images_dir, "blr.png")

edg = cv2.imread(image_pat)

_, cnts, new = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

image_copy = image.copy()

_ = cv2.drawContours(image_copy, cnts, -1, (106,0,255),2)

plot_images(image, image_copy)

cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:1000]

image_copy = image.copy()
_ = cv2.drawContours(image_copy, cnts, -1, (255,0,255),2)


plot_images(image, image_copy)
plate = None
Ejemplo n.º 22
0
from cv2 import cv2
from PIL import Image
from pylab import *
from scipy.ndimage import filters
import pytesseract

pytesseract.tesseract_cmd = '/usr/local/bin/tesseract'
image = cv2.imread('ind_1.jpg')
cv2.imshow("Original", image)
im2 = filters.gaussian_filter(image, 0.2)

gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

edged = cv2.Canny(im2, 170, 200)

contours, heirarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                       cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:30]

for c in contours:
    length = cv2.arcLength(c, closed=True)
    approx = cv2.approxPolyDP(c, 0.02 * length, closed=True)
    if len(approx) == 4:  # Select the contour with 4 corners
        numberplate = approx  #This is our approx Number Plate Contour
        break
    else:
        numberplate = None

# Drawing the selected contour on the original image
#cv2.drawContours(image, [numberplate], -1, (0,255,0), 3)
x = numberplate
a = min(x[0][0][1], x[2][0][1])