Esempio n. 1
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        while True:
            frame = container.decode(video=0).next()

            image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
            cv2.imshow('Original', image)
            cv2.imshow('Canny', cv2.Canny(image, 100, 200))
            cv2.waitKey(1)

    except Exception as ex:
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Esempio n. 2
0
while True:
    ret, frame_vga = videoIn.read()
    #ret2, frame2 = camera2.read()
    frame1 = frame_vga
    frame2 = frame_vga

    if not ret:
        break

    # 根据更正map对图片进行重构
    img1_rectified = cv2.remap(frame1, left_map1, left_map2, cv2.INTER_LINEAR)
    img2_rectified = cv2.remap(frame2, right_map1, right_map2,
                               cv2.INTER_LINEAR)

    # 将图片置为灰度图,为StereoBM作准备
    imgL = cv2.cvtColor(img1_rectified, cv2.COLOR_BGR2GRAY)
    imgR = cv2.cvtColor(img2_rectified, cv2.COLOR_BGR2GRAY)
    #imgGrayL = cv2.equalizeHist(imgL)
    #imgGrayR = cv2.equalizeHist(imgR)

    # through gausiann filter
    imgGrayL = cv2.GaussianBlur(imgL, (5, 5), 0)  #高斯滤波
    imgGrayR = cv2.GaussianBlur(imgR, (5, 5), 0)

    # 两个trackbar用来调节不同的参数查看效果
    # num = cv2.getTrackbarPos("num", "depth")
    # blockSize = cv2.getTrackbarPos("blockSize", "depth")
    # if blockSize % 2 == 0:
    #     blockSize += 1
    # if blockSize < 5:
    #     blockSize = 5
Esempio n. 3
0
font = cv2.FONT_HERSHEY_SIMPLEX

#iniciate id counter
id = 0
# names related to IDs
names = ['None', 'Nirmal', 'Raj', 'Lochan', 'Gita', 'Ramesh']

webcam = cv2.VideoCapture(0)

while True:

    #Read current frame/picture
    successful_frame_read, frame = webcam.read()

    # Must convert images to grey-scale
    grayscaled_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Detect faces
    face_coordinates = trained_face_data.detectMultiScale(grayscaled_img,
                                                          scaleFactor=1.2,
                                                          minNeighbors=5,
                                                          minSize=(20, 20))
    for (x, y, w, h) in face_coordinates:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255), 2)
        id, confidence = face_recognizer.predict(grayscaled_img[y:y + h,
                                                                x:x + w])

        if (confidence < 100):
            id = names[id]
            confidence = "  {0}%".format(round(100 - confidence))
        else:
smile_detector = cv2.CascadeClassifier('data/haarcascade_smile.xml')


def detect(gray, frame):
    faces = face_detector.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (255, 0, 0), 2)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = frame[y:y + h, x:x + w]
        smiles = smile_detector.detectMultiScale(roi_gray, 1.8, 20)

        for (sx, sy, sw, sh) in smiles:
            cv2.rectangle(roi_color, (sx, sy), ((sx + sw), (sy + sh)),
                          (0, 0, 255), 2)
    return frame


video_capture = cv2.VideoCapture(0)

while True:
    _, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    canvas = detect(gray, frame)
    cv2.imshow('video', canvas)
    if cv2.waitKey(1) & 0xff == ord('q'):
        break
    if cv2.getWindowProperty('video', 1) == -1:
        break

video_capture.release()
cv2.destroyAllWindows()
import numpy as np
from cv2 import cv2
from matplotlib import pyplot as plt

imgL = cv2.imread('resource/scene_l.jpg')
imgR = cv2.imread('resource/scene_r.jpg')
imgL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
imgR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL, imgR)
plt.imshow(disparity, 'gray')
plt.show()
Esempio n. 6
0
        print(e)

        continue

global keep_going
keep_going = True

time_of_last_detect = time.time()
time_before_search = 1000
while keep_going:
    for frame in container.decode(video=0):
        if frame_skip > 0:
            frame_skip -= 1
            continue
        start_time = time.time()
        image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)

        # resizing
        height, width, _ = image.shape
        image = cv2.resize(image, (int(720 * width / height), 720))
        height, width, _ = image.shape

        # human detector part
        boxes, scores, classes, num = odapi.processFrame(image)

        # displaying bounding boxes amd midpoint dot
        for i in range(len(boxes)):
            # Class 1 represents human
            if classes[i] == 1 and scores[i] > threshold:
                drone.clockwise(0)
                time_of_last_detect = time.time()
Esempio n. 7
0
 def ChangeImage(self, image):
     res = cv2.resize(image, (640, 480))
     img = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
     img = Image.fromarray(img)
     self.src = ImageTk.PhotoImage(image=img)
     self.frame['image'] = self.src
Esempio n. 8
0
def PILToCV2(image):
    """
    :param image: PIL image object
    :return: numpy array (cv2 accpet)
    """
    return cv.cvtColor(np.asarray(image), cv.COLOR_LAB2BGR)
Esempio n. 9
0
def _extract_hsl_value(image_path):
    image = cv2.imread(image_path)
    hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
    h, l, s = cv2.split(hls)
    return np.mean(h), np.mean(s), np.mean(l)
Esempio n. 10
0
def _extract_hsv_value(image_path):
    image = cv2.imread(image_path)
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    return np.mean(h), np.mean(s), np.mean(v)
def process(im):
    global _old_x
    global _old_y
    global _old_w
    global _old_h
    global _old_lostsight
    #
    # Convert the image to HSV and then apply Thresholding - OpenCV standard is Blue-Green-Red
    #
    out = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    #
    out = cv2.inRange(
        out,
        (_hsv_threshold_hue[0], _hsv_threshold_sat[0], _hsv_threshold_val[0]),
        (_hsv_threshold_hue[1], _hsv_threshold_sat[1], _hsv_threshold_val[1]))
    #
    # masking out the area that we wanted by just taking an entire slice of the area and leave it in the 'out' image
    out = out[_y1:_y2, _x1:_x2]
    # also draw this slice on the original image in Red (BGR)
    cv2.rectangle(im, _mask_p1, _mask_p2, (0, 0, 255), 1)

    # Now, let's find the contour - we only keeping/using contours for this time
    contours, _ = cv2.findContours(out,
                                   mode=cv2.RETR_EXTERNAL,
                                   method=cv2.CHAIN_APPROX_SIMPLE)
    # still need to do a 'STOP' condition check before going into this loop...but for now, skip that
    if len(contours) > 0:
        # Line Merging Test

        # locate the largest contour - for now, we only going to use/deal with the largest contours - kinda big assumption for now
        c = max(contours, key=cv2.contourArea)
        # And then find the bounding rectangle that fit this contours...this is going to enclosed everything that fits inside a rect.
        # - will be put in x, y, width and height [_x, _y, _w and _h]
        cb_x, cb_y, cb_w, cb_h = cv2.boundingRect(c)
        # put this into the 'current or old' variables
        _old_x = cb_x
        _old_y = cb_y
        _old_w = cb_w
        _old_z = cb_h
        _old_lostsight = 0
    else:
        # couldn't find any contours, for now, we just assumed to use the last one - note - need to perform the stop condition
        cb_x = _old_x
        cb_y = _old_y
        cb_w = _old_w
        cb_h = _old_h
        _old_lostsight = _old_lostsight + 1

    # the new bounding box = contains the min/max of the targetted/tracked line point
    # cbbox = out[cb_y:cb_y+cb_h, cb_x:cb_x+cb_w]
    # Now, draw this new bounding box on the original image in GREEN
    cv2.rectangle(im, (cb_x, cb_y + _y1), (cb_x + cb_w, cb_y + cb_h + _y1),
                  (0, 255, 0), 1)

    sp = genMask(out, (cb_x, cb_y), (cb_x + cb_w, cb_y + cb_h))
    #cv2.rectangle(out, (cb_x, cb_y), (cb_x + cb_w, cb_y + cb_h), (0, 255, 255), 1)
    #print(contours)

    # debugging use - comment out for production
    # cv2.imshow("Filtered", out)
    # return out
    ## return cbbox
    return sp
import sys
import numpy as np

from cv_helpers import show_compared_imgs, plt_show_img
from binarization import otsu_binarization, mask_binarization
from edge_detection import apply_prewitt
from cv2 import cv2

if __name__ == '__main__':
    try:
        img_path = sys.argv[1]
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)

        binarized_img = otsu_binarization(img)
        edges_img = apply_prewitt(img)

        normalized_mask = mask_binarization(
            cv2.cvtColor(img, cv2.COLOR_RGB2GRAY))
        binary_mask = edges_img * normalized_mask

        show_compared_imgs(edges_img, binarized_img, binary_mask,
                           'Edges, Binary, Mask')

    except Exception as error:
        print(error)
Esempio n. 13
0
    k = cv.waitKey(0)
    if k == 27:
        cv.destroyAllWindows()


def nothing(x):
    pass


cv.namedWindow('win', cv.WINDOW_NORMAL)
# 创建滑动条
bar = cv.createTrackbar('equ', 'win', 0, 10, nothing)
img = cv.imread('image/hist.jpg', 0)

while True:
    x = cv.getTrackbarPos('equ', 'win')
    # 进行均衡化
    clahe = cv.createCLAHE(clipLimit=x, tileGridSize=(8, 8))
    cl = clahe.apply(img)
    cv.imshow('win', cl)
    key = cv.waitKey(10)
    if (key == 27):
        break

# 2D直方图展示
img = cv.imread('image/hist.jpg')
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
hist = cv.calcHist([hsv], [0, 1], None, [150, 256], [0, 180, 0, 256])
plt.imshow(hist, interpolation='nearest')
plt.show()
Esempio n. 14
0
    result_image = res.reshape((img.shape))

    figure_size = 15
    plt.figure(figsize=(figure_size / 2, figure_size / 2))
    plt.subplot(1, 2, 1), plt.imshow(img)
    plt.title('Original Image'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 2, 2), plt.imshow(result_image)
    plt.title('Segmented Image when K = %i' % K), plt.xticks([]), plt.yticks(
        [])
    plt.show()


original_image = cv2.imread(
    "2-venice-landmark-burano-island-canal-colorful-houses-and-boats-stevanzz-photography.jpg"
)
orig = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)

coord = crop_half(orig)
start_row = coord[0]
end_row = coord[1]
start_col = coord[2]
end_col = coord[3]

# Training image
img = orig[start_row:end_row, start_col:end_col]

# Testing image
greyimg = orig[coord[4]:coord[5], coord[6]:coord[7]]
greyimg = cv2.cvtColor(greyimg, cv2.COLOR_BGR2GRAY)
greyimg = cv2.cvtColor(greyimg, cv2.COLOR_GRAY2BGR)
Esempio n. 15
0
from __future__ import print_function
import cv2.cv2 as cv
import argparse

valorMedio = 127
valorMaximo = 255
window_name = 'Saida Limiar'

src = cv.imread('ancora.jpg')

if src is None:
    print('Erro ao abrir o arquivo!')
    exit(0)

src_cinza = cv.cvtColor(src, cv.COLOR_BGR2GRAY)

dst = cv.adaptiveThreshold(src_cinza, valorMaximo,
                           cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11,
                           2)

for i in range(0, 9):
    dst = cv.adaptiveThreshold(dst, valorMaximo, cv.ADAPTIVE_THRESH_GAUSSIAN_C,
                               cv.THRESH_BINARY, 11, 2)

cv.imshow(window_name, dst)
cv.waitKey()
'''
max_value = 255
max_type = 4
max_binary_value = 255
trackbar_type = 'Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted'
Esempio n. 16
0
def CV2ToPIL(image):
    """
    :param image: numpy array (cv2 accept)
    :return: PIL image object
    """
    return Image.fromarray(cv.cvtColor(image, cv.COLOR_BGR2LAB))
Esempio n. 17
0
def _extract_Lab_value(image_path):
    image = cv2.imread(image_path)
    Lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
    L, a, b = cv2.split(Lab)
    return np.mean(L), np.mean(a), np.mean(b)
Esempio n. 18
0
 def MakeImg(self, path):
     src = cv2.imread(path)
     src = cv2.resize(src, (640, 480))
     img = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
     img = Image.fromarray(img)
     self.src = ImageTk.PhotoImage(image=img)
 def convert_bga_to_rgb(self, input_image):
     return cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
Esempio n. 20
0
def main():
    global star
    global cascPath
    posx = 0
    counter = 0
    drone = tellopy.Tello()
    counter1 = 0
    step = 200  #Seed Value, Dont Care :3
    step1 = 200
    stop = 0
    flag1 = True

    while star == 1:
        cc = str(ser.readline())
        if cc[2:][:-5] == "Calling Drone":
            print(cc[2:][:-5])
            star = 2
            break

    try:
        #Start Protocol
        drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
        drone.connect()
        drone.wait_for_connection(60.0)
        container = av.open(drone.get_video_stream())
        frame_skip = 300
        xdis = 200
        ydis = 150
        drone.takeoff()
        sleep(5)
        drone.up(18)
        sleep(5)
        drone.up(0)
        sleep(1)
        countface = 0
        # End Start Protocol

        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                #imagep= cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_BGR2GRAY)

                faces = faceCascade.detectMultiScale(
                    image,
                    scaleFactor=1.7,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags=cv2.CASCADE_SCALE_IMAGE)

                if (len(faces) == 0):
                    countface = 0
                    if (step != 0):
                        if (step == 1):
                            drone.backward(0)
                        elif (step == 3):
                            drone.up(0)
                        elif (step == 4):
                            drone.down(0)
                        elif (step == 5):
                            drone.left(0)
                        elif (step == 6):
                            drone.right(0)
                        elif (step == 7):
                            drone.up(0)
                            drone.right(0)
                        elif (step == 8):
                            drone.up(0)
                            drone.left(0)
                        elif (step == 9):
                            drone.down(0)
                            drone.right(0)
                        elif (step == 10):
                            drone.down(0)
                            drone.left(0)
                        elif (step == 11):
                            drone.forward(0)
                        else:
                            ...  #Nothing
                    step = 0
                    counter1 += 1
                    if counter1 >= 10 and flag1 == True:
                        flag1 = False
                        if posx < 430:
                            drone.counter_clockwise(0)
                        else:
                            drone.clockwise(0)
                else:
                    flag1 = True
                    counter1 = 0
                    for (x, y, w, h) in faces:
                        cv2.rectangle(image, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                        place = image[y:y + h, x:x + w]

                        #Save last X position in Memory
                        if (countface > 3):
                            posx = (x + (w / 2))
                        else:
                            countface += 1

                        signal = signalCascade.detectMultiScale(
                            image[y:y + h, x:x + w],
                            scaleFactor=3.5,
                            minNeighbors=20,
                            minSize=(20, 20),
                            flags=cv2.CASCADE_SCALE_IMAGE)

                        for (x, y, w, h) in signal:
                            cv2.rectangle(place, (x, y), (x + w, y + h),
                                          (255, 255, 0), 2)

                            if (counter == 10):
                                ...
                                #raise ValueError('Close Connection')
                                #break
                            elif (len(signal) != 0):
                                counter += 1
                            else:
                                counter = 0

                        if (w * h > 60000 and step != 1):
                            if (step == 0):
                                drone.clockwise(0)
                            elif (step == 1):
                                ...
                                #drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                                drone.counter_clockwise(10)
                                cascPath = cascPathStop
                                stop = 1
                            step = 1

                        elif (w * h > 40000 and step != 2):

                            if (step != 2):
                                if (step == 0):
                                    drone.counter_clockwise(0)
                                    drone.clockwise(0)
                                elif (step == 1):
                                    drone.backward(0)
                                elif (step == 3):
                                    drone.up(0)
                                elif (step == 4):
                                    drone.down(0)
                                elif (step == 5):
                                    drone.left(0)
                                elif (step == 6):
                                    drone.right(0)
                                elif (step == 7):
                                    drone.down(0)
                                    drone.right(0)
                                elif (step == 8):
                                    drone.down(0)
                                    drone.left(0)
                                elif (step == 9):
                                    drone.up(0)
                                    drone.right(0)
                                elif (step == 10):
                                    drone.up(0)
                                    drone.left(0)
                                elif (step == 11):
                                    drone.forward(0)
                                else:
                                    if (stop == 1):
                                        raise ValueError('Close Connection')
                                        break

                            if (y < ydis and step1 != 1 and x > xdis
                                    and (960 - x - w) > xdis):
                                step1 = 1
                                drone.up(12)
                                print("Up Stable")
                            elif ((720 - y - h) < ydis and step1 != 2
                                  and x > xdis and (960 - x - w) > xdis):
                                step1 = 2
                                drone.down(12)
                                print("Down Stable")
                            elif (x < xdis and step1 != 3
                                  and (720 - y - h) > ydis and y > ydis):
                                step1 = 3
                                drone.left(12)
                                print("Left Stable")
                            elif ((960 - x - w) < xdis and step1 != 4
                                  and (720 - y - h) > ydis and y > ydis):
                                step1 = 4
                                drone.right(12)
                                print("Right Stable")
                            elif (y < ydis and step1 != 5 and x > xdis
                                  and (960 - x - w) < xdis):
                                step1 = 5
                                drone.right(12)
                                drone.up(12)
                                print("Right Up Stable")
                            elif ((720 - y - h) < ydis and step1 != 6
                                  and x > xdis and (960 - x - w) < xdis):
                                step1 = 6
                                drone.right(12)
                                drone.down(12)
                                print("Right Down Stable")
                            elif (y < ydis and step1 != 7 and x < xdis
                                  and (960 - x - w) > xdis):
                                step1 = 7
                                drone.left(12)
                                drone.up(12)
                                print("Left Up Stable")
                            elif ((720 - y - h) < ydis and step1 != 8
                                  and x < xdis and (960 - x - w) > xdis):
                                step1 = 8
                                drone.left(12)
                                drone.down(12)
                                print("Left Down Stable")
                            elif (step1 != 9 and (720 - y - h) > ydis
                                  and y > ydis and x > xdis
                                  and (960 - x - w) > xdis):
                                if (step1 == 1):
                                    drone.up(0)
                                elif (step1 == 2):
                                    drone.down(0)
                                elif (step1 == 3):
                                    drone.left(0)
                                elif (step1 == 4):
                                    drone.right(0)
                                elif (step1 == 5):
                                    drone.up(0)
                                    drone.right(0)
                                elif (step1 == 6):
                                    drone.right(0)
                                    drone.down(0)
                                elif (step1 == 7):
                                    drone.left(0)
                                    drone.up(0)
                                elif (step1 == 8):
                                    drone.left(0)
                                    drone.down(0)
                                else:
                                    ...  #Nothing
                                step1 = 9
                                print("Stable")
                            else:
                                ...  #Nothing
                            step = 2
                        elif (y < ydis and step != 3 and x > xdis
                              and (960 - x - w) > xdis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 3
                            drone.up(12)
                            print("Up")

                        elif ((720 - y - h) < ydis and step != 4 and x > xdis
                              and (960 - x - w) > xdis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                ...
                                #drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 4
                            drone.down(12)
                            print("Down")

                        elif (x < xdis and step != 5 and (720 - y - h) > ydis
                              and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                ...
                                #drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            print(step)
                            step = 5
                            drone.left(12)
                            print("Left")
                        elif ((960 - x - w) < xdis and step != 6
                              and (720 - y - h) > ydis and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                ...
                                #drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 6
                            drone.right(12)
                            print("Right")
                        elif ((960 - x - w) < xdis and step != 7
                              and (720 - y - h) < ydis and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                ...
                                #drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                ...
                                #drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 7
                            drone.down(12)
                            drone.right(12)
                            print("Right Down")
                        elif (x < xdis and step != 8 and (720 - y - h) < ydis
                              and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                ...
                                #drone.down(0)
                            elif (step == 5):
                                ...
                                #drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 8
                            drone.down(12)
                            drone.left(12)
                            print("Left Down")
                        elif ((960 - x - w) < xdis and step != 9
                              and (720 - y - h) > ydis and y < ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                ...
                                #drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                ...
                                #drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                ...
                                #drone.up(0)
                                #drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 9
                            drone.up(12)
                            drone.right(12)
                            print("Right Up")
                        elif (x < xdis and step != 10 and (720 - y - h) > ydis
                              and y < ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                ...
                                #drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                ...
                                #drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                ...
                                #drone.up(0)
                                #drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 10
                            drone.up(12)
                            drone.left(12)
                            print("Left Up")
                        elif (step != 11 and (720 - y - h) > ydis and y > ydis
                              and x > xdis and (960 - x - w) > xdis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                ...
                                #drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 11
                            drone.forward(12)
                            print("Adelante")
                        else:
                            ...  #Nothing

    # Display the resulting frame
                cv2.putText(image, "Battery %:" + str(battery), (10, 30), font,
                            1, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.imshow('Original', image)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    raise ValueError('Close Connection')
                    break
                elif frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time) / time_base)

    except:
        drone.forward(0)
        drone.backward(0)
        drone.right(0)
        drone.left(0)
        drone.down(0)
        drone.up(0)
        drone.counter_clockwise(100)
        drone.land()
        sleep(5)
        drone.quit()
        cv2.destroyAllWindows()
        exit()
                vid = cv2.VideoCapture(
                    filename)  #get video by input's filename.
                total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT)
                            )  #counts the total number of video's frames.
                fourcc = cv2.VideoWriter_fourcc(*'XVID')
                outputfile = input("\nGive the file name of the output video: "
                                   )  #name the output video.
                out = cv2.VideoWriter(outputfile + '.avi', fourcc, vid.get(5),
                                      (int(vid.get(3)), int(vid.get(4))),
                                      False)
                start = time.time()  #time calculation

                print("\nCreating output video with name %s.avi..." %
                      outputfile)  #information message.
                playing, ref = vid.read()
                ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)  #grayscale
                #fill the black pixels.
                ref = np.pad(
                    ref,
                    ((0, int((np.ceil(vid.get(4) / 16) * 16) - vid.get(4))),
                     (0, int((np.ceil(vid.get(3) / 16) * 16) - vid.get(3)))),
                    'constant',
                    constant_values=0)
                array = []  #entropy array.

                print("Frame #1 of " + str(total) +
                      " Completed.")  #information message.

                #whlie loop statement for the next frames.
                while vid.isOpened():
                    playing, frame = vid.read()
Esempio n. 22
0
        cnt = x[0]

    return cnt


cap = cv2.VideoCapture(0)

while 1:
    _, frame = cap.read()
    frame = cv2.flip(frame, 1)

    roi = frame[50:350, 200:400]  #[y1:y2,x1:x2]

    cv2.rectangle(frame, (200, 50), (400, 350), (0, 0, 255), 0)

    hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    lower_color = np.array([0, 45, 79], dtype=np.uint8)
    upper_color = np.array([17, 255, 255], dtype=np.uint8)

    mask = cv2.inRange(hsv, lower_color, upper_color)
    kernel = np.ones((3, 3), np.uint8)
    mask = cv2.dilate(mask, kernel, iterations=1)
    mask = cv2.medianBlur(mask, 15)

    contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        try:
            c = findMaxContour(contours)
            extLeft = tuple(c[c[:, :, 0].argmin()][0])
            extRight = tuple(c[c[:, :, 0].argmax()][0])
def OpenCV():

    retry = 3
    container = None
    while container is None and 0 < retry:
        retry -= 1
        try:
            # 動画の受信開始を処理
            container = av.open(drone.get_video_stream()
                                )  # container = tello video 映像の圧縮データを展開
        except av.AVError as ave:
            print(ave)
            print('retry...')

    frame_skip = 300  #動画接続前

    while True:
        for frame in container.decode(video=0):  # .decode byte(映像の中身) -> 文字列
            if 0 < frame_skip:  #フレームスキップ処理
                frame_skip = frame_skip - 1
                continue

            start_time = time.time()

            image_origin = cv2.cvtColor(np.array(frame.to_image()),
                                        cv2.COLOR_RGB2BGR)  #RGB convert

            h, w, c = image_origin.shape

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            r = image[:, :, 2]
            g = image[:, :, 1]
            b = image[:, :, 0]

            R = np.array(r).flatten()
            G = np.array(g).flatten()
            B = np.array(b).flatten()

            #R = [x for x in R if x > 15]
            #G = [x for x in G if x > 15]
            #B = [x for x in B if x > 15]

            V1 = np.std(R)
            V2 = np.std(G)
            V3 = np.std(B)

            mode = sstats.mode(R)[0]
            mode1 = sstats.mode(G)[0]
            mode2 = sstats.mode(B)[0]

            threshold_img = image.copy()

            threshold_img[r < mode1 - 3.7 * V1] = 0

            threshold_img[r >= mode1 - 3.7 * V1] = 255

            threshold_img[r > mode1 + 3.7 * V1] = 0

            threshold_img[g > mode2 - 3.7 * V2] = 0

            #feature_params = {"maxCorners": 4,  "qualityLevel": 0.3,  "minDistance": 30,  "blockSize": 12}

            #feature_params = {"maxCorners": 8,  "qualityLevel": 0.3,  "minDistance": 10,  "blockSize": 12}

            feature_params = {
                "maxCorners": 12,
                "qualityLevel": 0.3,
                "minDistance": 5,
                "blockSize": 9
            }

            #feature_params = {"maxCorners": 4,  "qualityLevel": 0.3,  "minDistance": 5,  "blockSize": 9}
            #特徴点の上限数 # 閾値 (高いほど特徴点数は減る) # 特徴点間の距離 (近すぎる点は除外)

            image2 = cv2.blur(image, (3, 3))
            A = np.uint8(image[:, :, 2])

            image2 = cv2.blur(threshold_img, (3, 3))
            A = np.uint8(image2[:, :, 2])

            p0 = cv2.goodFeaturesToTrack(A, mask=None, **feature_params)

            for p in p0:  #p0 x,y zahyou 3image_origin
                x, y = p.ravel()  #p0 no youso wo bunkai
                cv2.circle(image, (x, y), 5, (0, 255, 255), -1)
            """
        cv2.imshow("image", image)
        cv2.imshow("image1", image1)
        cv2.imshow("image2", image2)
        cv2.imshow("image_thresh", threshold_img)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
        """

            x0 = p0[:, :, 0].ravel()  #x座標
            y0 = p0[:, :, 1].ravel()  #y座標

            MX = max(x0)
            mx = min(x0)

            MY = max(y0)
            my = min(y0)

            avex = (MX + mx) / 2
            avey = (MY + my) / 2
            """
        print(x0)
        print(y0)

        print(MX)
        print(mx)

        print(MY)
        print(my)

        print(avex)
        print(avey)

        print(type(avex))
        print(type(x0[0]))
        """

            MX = int(MX)
            mx = int(mx)

            MY = int(MY)
            my = int(my)

            avex = int(avex)
            avey = int(avey)

            a = [0] * 12
            b = [0] * 12

            x1 = []
            y1 = []
            for i in range(len(x0)):
                if y0[i] < avey:
                    x1.append(x0[i])
                    y1.append(y0[i])

            l11 = np.sqrt((x1[0])**2 + (y1[0])**2)
            l21 = np.sqrt((x1[1])**2 + (y1[1])**2)
            l31 = np.sqrt((x1[2])**2 + (y1[2])**2)
            l41 = np.sqrt((x1[3])**2 + (y1[3])**2)

            l1 = [l11, l21, l31, l41]

            print(l1)

            c = [0, 1, 2, 3]

            for i in range(len(l1)):
                if l1[i] == min(l1):
                    a[0] = x1[i]
                    b[0] = y1[i]
                    s = i
            c.remove(s)
            j = 0
            for j in c:
                n = c.copy()
                A = (b[0] - y1[j]) / (a[0] - x1[j])
                B = b[0] - A * a[0]
                n.remove(j)
                C = A * x1[n[0]] + B

                D = A * x1[n[1]] + B
                if C - y1[n[0]] > 0 and D - y1[n[1]] < 0:
                    a[1] = x1[n[0]]
                    b[1] = y1[n[0]]
                    a[3] = x1[n[1]]
                    b[3] = y1[n[1]]
                    a[2] = x1[j]
                    b[2] = y1[j]
                    break
                elif C - y1[n[0]] < 0 and D - y1[n[1]] > 0:
                    a[3] = x1[n[0]]
                    b[3] = y1[n[0]]
                    a[1] = x1[n[1]]
                    b[1] = y1[n[1]]
                    a[2] = x1[j]
                    b[2] = y1[j]
                    break

            d1 = np.sqrt((a[0] - a[1])**2 + (b[0] - b[1])**2)
            d2 = np.sqrt((a[1] - a[2])**2 + (b[1] - b[2])**2)
            d3 = np.sqrt((a[2] - a[3])**2 + (b[2] - b[3])**2)
            d4 = np.sqrt((a[3] - a[0])**2 + (b[3] - b[0])**2)

            line1 = cv2.line(image, (a[0], b[0]), (a[1], b[1]), 100)
            line2 = cv2.line(image, (a[1], b[1]), (a[2], b[2]), 100)
            line3 = cv2.line(image, (a[2], b[2]), (a[3], b[3]), 100)
            line4 = cv2.line(image, (a[3], b[3]), (a[0], b[0]), 100)

            x2 = []
            y2 = []
            for i in range(len(x0)):
                if y0[i] > avey and x0[i] < avex:
                    x2.append(x0[i])
                    y2.append(y0[i])

            l12 = np.sqrt((x2[0])**2 + (y2[0])**2)
            l22 = np.sqrt((x2[1])**2 + (y2[1])**2)
            l32 = np.sqrt((x2[2])**2 + (y2[2])**2)
            l42 = np.sqrt((x2[3])**2 + (y2[3])**2)

            l2 = [l12, l22, l32, l42]

            print(l2)

            d = [0, 1, 2, 3]

            for i in range(len(l2)):
                if l2[i] == min(l2):
                    a[4] = x2[i]
                    b[4] = y2[i]
                    s = i
            d.remove(s)
            k = 0
            for k in d:
                n = d.copy()
                A = (b[4] - y2[k]) / (a[4] - x2[k])
                B = b[4] - A * a[4]
                n.remove(k)
                C = A * x2[n[0]] + B

                D = A * x2[n[1]] + B
                if C - y2[n[0]] > 0 and D - y2[n[1]] < 0:
                    a[5] = x2[n[0]]
                    b[5] = y2[n[0]]
                    a[7] = x2[n[1]]
                    b[7] = y2[n[1]]
                    a[6] = x2[k]
                    b[6] = y2[k]
                    break
                elif C - y2[n[0]] < 0 and D - y2[n[1]] > 0:
                    a[5] = x2[n[0]]
                    b[5] = y2[n[0]]
                    a[6] = x1[n[1]]
                    b[6] = y1[n[1]]
                    a[7] = x1[k]
                    b[7] = y1[k]
                    break

            d4 = np.sqrt((a[4] - a[5])**2 + (b[4] - b[5])**2)
            d5 = np.sqrt((a[5] - a[6])**2 + (b[5] - b[6])**2)
            d6 = np.sqrt((a[6] - a[7])**2 + (b[6] - b[7])**2)
            d7 = np.sqrt((a[7] - a[4])**2 + (b[7] - b[4])**2)

            line4 = cv2.line(image, (a[4], b[4]), (a[5], b[5]), 100)
            line5 = cv2.line(image, (a[5], b[5]), (a[6], b[6]), 100)
            line6 = cv2.line(image, (a[6], b[6]), (a[7], b[7]), 100)
            line7 = cv2.line(image, (a[7], b[7]), (a[4], b[4]), 100)

            x3 = []
            y3 = []
            for i in range(len(x0)):
                if y0[i] > avey and x0[i] > avex:
                    x3.append(x0[i])
                    y3.append(y0[i])

            l13 = np.sqrt((x3[0])**2 + (y3[0])**2)
            l23 = np.sqrt((x3[1])**2 + (y3[1])**2)
            l33 = np.sqrt((x3[2])**2 + (y3[2])**2)
            l43 = np.sqrt((x3[3])**2 + (y3[3])**2)

            l3 = [l13, l23, l33, l43]

            print(l3)

            e = [0, 1, 2, 3]

            for i in range(len(l3)):
                if l3[i] == min(l3):
                    a[8] = x3[i]
                    b[8] = y3[i]
                    s = i
            e.remove(s)
            z = 0
            for z in e:
                n = e.copy()
                A = (b[8] - y3[z]) / (a[8] - x3[z])
                B = b[8] - A * a[8]
                n.remove(z)
                C = A * x3[n[0]] + B

                D = A * x3[n[1]] + B

                #if C - y3[n[0]] > 0 and D - y3[n[1]] < 0:
                if C - y3[n[0]] < 0 and D - y3[n[1]] > 0:
                    a[9] = x3[n[0]]
                    b[9] = y3[n[0]]
                    a[11] = x3[n[1]]
                    b[11] = y3[n[1]]
                    a[10] = x3[z]
                    b[10] = y3[z]
                    break

                #elif C - y3[n[0]] < 0 and D - y3[n[1]] > 0:
                elif C - y3[n[0]] > 0 and D - y3[n[1]] < 0:
                    a[9] = x3[n[0]]
                    b[9] = y3[n[0]]
                    a[10] = x3[n[1]]
                    b[10] = y3[n[1]]
                    a[11] = x3[z]
                    b[11] = y3[z]
                    break

            d8 = np.sqrt((a[8] - a[9])**2 + (b[8] - b[9])**2)
            d9 = np.sqrt((a[9] - a[10])**2 + (b[9] - b[10])**2)
            d10 = np.sqrt((a[10] - a[11])**2 + (b[10] - b[11])**2)
            d11 = np.sqrt((a[11] - a[8])**2 + (b[11] - b[8])**2)

            line8 = cv2.line(image, (a[8], b[8]), (a[9], b[9]), 100)
            line9 = cv2.line(image, (a[9], b[9]), (a[10], b[10]), 100)
            line10 = cv2.line(image, (a[10], b[10]), (a[11], b[11]), 100)
            line11 = cv2.line(image, (a[11], b[11]), (a[8], b[8]), 100)
            """
        print(avex)
        print(avey)

        print(x0)
        print(y0)
        print('x1[0]:%d' % x1[0])
        print('y1[0]:%d' % y1[0])
        print('x1[1]:%d' % x1[1])
        print('y1[1]:%d' % y1[1])
        print('x1[2]:%d' % x1[2])
        print('y1[2]:%d' % y1[2])
        print('x1[3]:%d' % x1[3])
        print('y1[3]:%d' % y1[3])

        print('a[0]:%d' % a[0])
        print('b[0]:%d' % b[0])
        print('a[1]:%d' % a[1])
        print('b[1]:%d' % b[1])
        print('a[2]:%d' % a[2])
        print('b[2]:%d' % b[2])
        print('a[3]:%d' % a[3])
        print('b[3]:%d' % b[3])

        print('a[4]:%d' % a[4])
        print('b[4]:%d' % b[4])
        print('a[5]:%d' % a[5])
        print('b[5]:%d' % b[5])
        print('a[6]:%d' % a[6])
        print('b[6]:%d' % b[6])
        print('a[7]:%d' % a[7])
        print('b[7]:%d' % b[7])


        print('a[8]:%d' % a[8])
        print('b[8]:%d' % b[8])
        print('a[9]:%d' % a[9])
        print('b[9]:%d' % b[9])
        print('a[10]:%d' % a[10])
        print('b[10]:%d' % b[10])
        print('a[11]:%d' % a[11])
        print('b[11]:%d' % b[11])

        cv2.imshow("image", image)
        cv2.imshow("thresh", threshold_img)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
        """

            c1 = (a[0] + a[2]) / 2
            c2 = (b[0] + b[2]) / 2
            c3 = (a[4] + a[6]) / 2
            c4 = (b[4] + b[6]) / 2
            c5 = (a[8] + a[10]) / 2
            c6 = (b[8] + b[10]) / 2

            print(x0)
            print(y0)

            c11 = int(c1)
            c12 = int(c2)
            c13 = int(c3)
            c14 = int(c4)
            c15 = int(c5)
            c16 = int(c6)

            #line1 = cv2.line(image,(c11,c12),(c13,c14),100)
            line = cv2.line(image, (c13, c14), (c15, c16), 100)
            #line3 = cv2.line(image,(c15,c16),(c11,c12),100)

            D = np.sqrt((c3 - c5)**2 + (c4 - c6)**2)

            Tx = (c3 + c5) / 2
            Ty = (c4 + c6) / 2

            Da = (c4 - c6) / (c3 - c5)
            DDa = -1 / Da

            Dx = (c6 - Da * c4 + DDa * c1 - c2) / (DDa - Da)
            Dy = Da * Dx + c6 - Da * c4

            #lineT = cv2.line(image,(int(Tx),int(Ty)),(c11,c12),100)

            lineDT = cv2.line(image, (int(Dx), int(Dy)), (c11, c12), 100)

            T = np.sqrt((Tx - c1)**2 + (Ty - c2)**2)
            DD = np.sqrt((Dx - c1)**2 + (Dy - c2)**2)

            X = (T * 0.6) / D

            XD = (DD * 0.6) / D

            print(DD)
            print(XD)

            print(T)
            print(D)
            print(X)
            """
        l1 = np.sqrt((c11-c13)**2 + (c12-c14)**2)
        l2 = np.sqrt((c13-c14)**2 + (c15-c16)**2)
        l3 = np.sqrt((c15-c16)**2 + (c11-c13)**2)

        s = (l1 + l2 + l3) / 2
        Sh = np.sqrt(s*(s-l1)*(s-l2)*(s-l3))

        sin1 = Sh / (l1*l3) 
        theta1 = math.degrees(math.asin(sin1))

        sin2 = Sh / (l2*l3)
        theta2 = math.degrees(math.asin(sin2))

        sin3 = Sh / (l2*l1)
        theta3 = math.degrees(math.asin(sin3))

        print(Sh)
        print(l1)
        print(l2)
        print(l3)
        print(theta1)
        print(theta2)
        print(theta3)
        """

            S = abs((1 / 2) * ((a[3] - a[0]) * (b[1] - b[0]) - (a[1] - a[0]) *
                               (b[3] - b[0]))) + abs(
                                   (1 / 2) * ((a[1] - a[2]) * (b[3] - b[2]) -
                                              (a[3] - a[2]) * (b[1] - b[2])))

            filename = 'telloimage' + str(frame) + '.jpg'
            cv2.imwrite(filename, image_origin)

            with open("S1 2021.3.10 8:19.txt", "a") as f:
                result = "{:.7f}\n".format(S)
                f.write(result)

            with open("d1 2021.3.10 8:19..txt", "a") as f:
                result = "{:.7f}\n".format(S)
                f.write(result)

            with open("d2 2021.3.10 8:19..txt", "a") as f:
                result = "{:.7f}\n".format(d2)
                f.write(result)

            print(S)
            print(d1)
            print(d2)

            cy = h / 2
            cx = w / 2

            data = [S, c1, c2, p0, cx, cy]
            return data

            if frame.time_base < 1.0 / 60:
                time_base = 1.0 / 60  #機械のエラーを判別するための基準
            else:
                time_base = frame.time_base
                #フレームスキップ値を算出
                frame_skip = int((time.time() - start_time) / time_base)
Esempio n. 24
0
import numpy as np
from cv2 import cv2

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

img = cv2.imread('image2.jpg')
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(grayImage, 1.1, 7)
if len(faces) == 0:
    print("no faces found")
else:
    print(faces)
    print(faces.shape)
    print("Number of faces detected: " + str(faces.shape[0]))
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
    cv2.imshow('Image with faces', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Esempio n. 25
0
    def thread(self):
        self.last_time = None
        with self.playing_cv:
            while self.running:
                self.playing_cv.wait_for(lambda: self.playing)
                if not self.running:
                    break
                with self.capture_mutex:
                    rate = 1 / self.capture.get(cv2.CAP_PROP_FPS)
                    frame_index = round(
                        self.capture.get(cv2.CAP_PROP_POS_FRAMES))
                    remaining, frame = self.capture.read()
                    data = {
                        VideoInfoRow.Frame:
                        frame_index,
                        VideoInfoRow.Time:
                        self.capture.get(cv2.CAP_PROP_POS_MSEC),
                        VideoInfoRow.Progress:
                        self.capture.get(cv2.CAP_PROP_POS_AVI_RATIO),
                    }
                    width = round(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
                    height = round(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
                if not remaining:
                    self.finished.emit(data)
                    break
                rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                h, w, ch = rgb_image.shape
                bytes_per_line = ch * w
                image = QImage(rgb_image.data, w, h, bytes_per_line,
                               QImage.Format_RGB888)
                if not self.detector_enabled:
                    results = []
                else:
                    with self.results_mutex:
                        if frame_index not in self.frames:

                            def valid_bbox(bbox):
                                return bbox[0] >= 0 and bbox[1] >= 0 and bbox[
                                    2] < width and bbox[3] < height

                            # Get all instances
                            instances = [{
                                'instance_feature': instance['reid'],
                                'instance_bbox': instance['bbox'],
                            } for instance in self.reid_model.predict(frame)
                                         if valid_bbox(instance['bbox'])]
                            if instances:
                                features = np.stack([
                                    instance['instance_feature']
                                    for instance in instances
                                ])
                                similarity = cosine(features,
                                                    self.instance_features)
                                while similarity.size:
                                    matched = np.unravel_index(
                                        np.argmin(similarity),
                                        similarity.shape)
                                    if similarity[
                                            matched] >= INSTANCE_SIMILARITY_THRESHOLD:
                                        break
                                    uuid = list(self.instances)[matched[1]]
                                    instances[matched[0]]['instance'] = uuid
                                    self.instance_features[matched[1]] = \
                                        FEATURE_DECAY * self.instance_features[matched[1]] + \
                                        (1 - FEATURE_DECAY) * features[matched[0]]
                                    # self.instance_features[matched[1]] = np.average(np.stack(
                                    #     [instance['instance_feature'] for frame in self.frames.values()
                                    #      for instance in frame if instance['instance'] == uuid] +
                                    #     [features[matched[0]]]),
                                    #     axis=0)
                                    similarity[matched[0], :] = np.full(
                                        similarity.shape[1],
                                        2)  # 2 is largest similarity
                                    similarity[:, matched[1]] = np.full(
                                        similarity.shape[0], 2)
                            for instance in instances:
                                if 'instance' not in instance:
                                    new_uuid = uuid4()
                                    new_instance = {
                                        'color': random_color(),
                                        'name': None,
                                    }
                                    self.instances[new_uuid] = new_instance
                                    instance['instance'] = new_uuid
                                    self.instance_features = np.concatenate(
                                        (self.instance_features,
                                         instance['instance_feature'][
                                             np.newaxis, :]))
                            faces_instance_index = []
                            for i, instance in enumerate(instances):
                                bbox = instance['instance_bbox']
                                instance_image = frame[bbox[1]:bbox[3],
                                                       bbox[0]:bbox[2]]
                                instance_image_rgb = cv2.cvtColor(
                                    instance_image, cv2.COLOR_BGR2RGB)
                                attributes = self.attribute_model.predict(
                                    Image.fromarray(
                                        instance_image_rgb).convert('RGB'))
                                instance['attributes'] = attributes
                                faces = self.detect_model.predict(
                                    instance_image)
                                if faces:
                                    face_bbox = max(faces, key=lambda x: x[4])
                                    face_bbox = [
                                        round(face_bbox[0]),
                                        round(face_bbox[1]),
                                        round(face_bbox[2]),
                                        round(face_bbox[3])
                                    ]
                                    face_image = instance_image[
                                        face_bbox[1]:face_bbox[3],
                                        face_bbox[0]:face_bbox[2]]
                                    face_image = cv2.cvtColor(
                                        face_image, cv2.COLOR_BGR2RGB)
                                    feature = self.recognize_model.predict_raw(
                                        Image.fromarray(face_image)).squeeze(0)
                                    faces_instance_index.append(i)
                                    instance['face_feature'] = feature
                                    instance['face_bbox'] = [
                                        bbox[0] + face_bbox[0],
                                        bbox[1] + face_bbox[1],
                                        bbox[0] + face_bbox[2],
                                        bbox[1] + face_bbox[3],
                                    ]
                                else:
                                    instance['face'] = None
                                    instance['face_feature'] = None
                                    instance['face_bbox'] = None
                            if faces_instance_index:
                                features = np.stack([
                                    instances[faces_instance_index]
                                    ['face_feature'] for faces_instance_index
                                    in faces_instance_index
                                ])
                                similarity = cosine(features,
                                                    self.face_features)
                                while similarity.size:
                                    matched = np.unravel_index(
                                        np.argmin(similarity),
                                        similarity.shape)
                                    if similarity[
                                            matched] >= FACE_SIMILARITY_THRESHOLD:
                                        break
                                    uuid = list(self.faces)[matched[1]]
                                    instances[faces_instance_index[
                                        matched[0]]]['face'] = uuid
                                    self.face_features[matched[1]] = FEATURE_DECAY * self.face_features[matched[1]] + \
                                                                     (1 - FEATURE_DECAY) * features[matched[0]]
                                    # self.face_features[matched[1]] = np.average(np.stack(
                                    #     [instance['face_feature'] for frame in self.frames.values()
                                    #      for instance in frame if instance['face'] == uuid] +
                                    #     [features[matched[0]]]),
                                    #     axis=0)
                                    similarity[matched[0], :] = np.full(
                                        similarity.shape[1],
                                        2)  # 2 is largest similarity
                                    similarity[:, matched[1]] = np.full(
                                        similarity.shape[0], 2)
                            for instance in instances:
                                if 'face' in instance or 'face_feature' not in instance:
                                    continue
                                new_uuid = uuid4()
                                new_face = {
                                    'color': random_color(),
                                    'name': None,
                                }
                                self.faces[new_uuid] = new_face
                                instance['face'] = new_uuid
                                self.face_features = np.concatenate(
                                    (self.face_features,
                                     instance['face_feature'][np.newaxis, :]))
                            self.frames[frame_index] = instances
                        results = self.generate_results(frame_index)
                new_time = time.time()
                if self.last_time is not None and new_time < self.last_time + rate:
                    time.sleep(
                        self.last_time + rate - new_time
                    )  # it is hard to sleep with a condition variable
                self.last_time = new_time
                self.frameReady.emit(image, data, results)
        self.running = False
Esempio n. 26
0
import cv2.cv2 as cv
import matplotlib.pyplot as plt

img = cv.imread("imgs/lena.jpg", -1)
img1 = cv.imread("imgs/orange.jpg", -1)

cv.imshow("original image", img)
# converting bgr 2 rgb
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img1 = cv.cvtColor(img1, cv.COLOR_BGR2RGB)

# single image
plt.imshow(img)
# removing cordinate
plt.xticks([]), plt.yticks([])

plt.show()

# multiple image
plt.subplot(1, 2, 1), plt.imshow(img)
plt.title("first image")
plt.xticks([]), plt.yticks([])
plt.subplot(1, 2, 2), plt.imshow(img1)
plt.title("second image")
plt.xticks([]), plt.yticks([])
plt.show()

cv.waitKey(0)
cv.destroyAllWindows()
Esempio n. 27
0
    kamera = simulator

    # wird zum Testen mit dem richtigen Roboter durch das AFMotorShield ersetzt
    motoren = simulator

    # CPU quälen:
    while True:
        # frame lesen
        dieser_frame = kamera.get_frame()

        ####################################
        # Summierung                       #
        ####################################

        # Kopie von dieser_frame in Graustufen konvertieren
        frame_graustufen = cv2.cvtColor(dieser_frame, cv2.COLOR_BGR2GRAY)

        schwelle = 120
        # alle Werte in dieser_frame bei Schwellenwert schwelle in 0 oder 255 zerteilen, Bild invertieren
        _, linien_maske = cv2.threshold(frame_graustufen, schwelle, 255,
                                        cv2.THRESH_BINARY_INV)

        h = 50  # Höhe
        b = 50  # Breite
        start1 = (int(100), int(100))
        ende1 = (int(start1[0] + b), int(start1[1] + h))
        start2 = (int(200), int(100))
        ende2 = (int(start2[0] + b), int(start2[1] + h))

        # Rechteck von oben links (x=100, y=100) bis unten rechts (x=150, y=150) ausschneiden:
        roi_links = linien_maske[start1[1]:ende1[1], start1[0]:ende1[0]]
Esempio n. 28
0
def process_wheel():
    global frame
    wheel_frame = frame.copy()

    hsv = cv2.cvtColor(wheel_frame, cv2.COLOR_BGR2HSV)

    lower_blue = np.array(lb.copy())  # [35, 100, 0]
    upper_blue = np.array(rb.copy())  # [255, 255, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)

    anded_res = cv2.bitwise_and(wheel_frame, wheel_frame, mask=mask)
    contours, _ = cv2.findContours(cv2.Canny(anded_res, 255 / 3, 255),
                                   cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    area_threshold = 400
    inds = []
    for i, c in enumerate(contours):
        a = cv2.contourArea(c)
        if a > area_threshold and len(inds) < 2:
            inds.append(i)

    if not inds or len(inds) != 2:
        cv2.imshow('wheel', wheel_frame)  # [165:200, 326:500])
        # cv2.imshow('mask', mask)  # [165:200, 326:500])
        return

    if cv2.contourArea(contours[inds[0]]) < cv2.contourArea(contours[inds[1]]):
        inds[0], inds[1] = inds[1], inds[0]

    moments1 = cv2.moments(contours[inds[0]])
    moments2 = cv2.moments(contours[inds[1]])

    p1 = [
        int(moments1["m10"] / moments1["m00"]),
        int(moments1["m01"] / moments1["m00"])
    ]
    p2 = [
        int(moments2["m10"] / moments2["m00"]),
        int(moments2["m01"] / moments2["m00"])
    ]

    cv2.circle(wheel_frame, (p1[0], p1[1]), 3, (255, 255, 255), -1)
    cv2.circle(wheel_frame, (p2[0], p2[1]), 3, (255, 255, 255), -1)

    cv2.line(wheel_frame, (p1[0], p1[1]), (p2[0], p2[1]), (0, 255, 0), 2)
    if p2[0] - p1[0] == 0:
        slope = 90
    else:
        slope = -np.rad2deg(np.arctan2((p2[1] - p1[1]), (p2[0] - p1[0]))) % 360

    cv2.drawContours(wheel_frame, contours, inds[0], (0, 0, 255), 2)
    cv2.drawContours(wheel_frame, contours, inds[1], (0, 255, 0), 2)

    cv2.putText(wheel_frame, "Steering angle: {}".format(np.round(slope)),
                (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 0), 2)
    cv2.putText(wheel_frame, "{}".format(get_action()), (10, 140),
                cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 0), 2)

    cv2.line(wheel_frame, (0, 200), (600, 200), (255, 255, 255), 1)
    cv2.line(wheel_frame, (0, 250), (600, 250), (255, 255, 255), 1)
    steer(slope)
    gas(p1[1])
    cv2.imshow('wheel', wheel_frame)  # [165:200, 326:500])
import numpy as np
from cv2 import cv2 as cv
from color_grids_detection import DetectColorGrids
import os

TOTAL_COLUMNS = 6
TOTAL_ROWS = 4

CURRENT_DIR = os.getcwd()

img = cv.imread(CURRENT_DIR + '\\image\\image.jpg')
output_path = CURRENT_DIR + '\\image\\'
cascade_path = CURRENT_DIR + '\\cascade.xml'

gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.equalizeHist(gray)
cascade = cv.CascadeClassifier(cascade_path)
objects = cascade.detectMultiScale(gray, 1.1, 6, 1, (129, 90))
for single_obj in objects:
    cv.rectangle(img, single_obj, (255, 255, 255))
    x, y, w, h = single_obj
    crop_img = img[y:y + h, x:x + w]

    grids_position = np.zeros((TOTAL_ROWS, TOTAL_COLUMNS, 2), dtype=np.int)

    grids_position = DetectColorGrids(crop_img)

    row_count = 0
    col_count = 0
    while (row_count < TOTAL_ROWS):
        col_count = 0
Esempio n. 30
0
def get_sym_score(img):

    height = int(img.shape[0])
    width = int(img.shape[1])

    cell_x = height // opt.cell_size[0]
    cell_y = width // opt.cell_size[1]

    # left-right
    img_left = img[:, :width // 2]
    img_right = img[:, width // 2:]

    imgl_rs = np.split(img_left, cell_x, axis=0)
    imgr_rs = np.split(img_right[:, ::-1], cell_x, axis=0)
    histl = np.ndarray((cell_x, cell_y // 2, opt.rgbbins * 3))
    histr = np.ndarray((cell_x, cell_y // 2, opt.rgbbins * 3))
    for r in range(cell_x):
        imgl_rc = np.split(imgl_rs[r], cell_y // 2, axis=1)
        imgr_rc = np.split(imgr_rs[r], cell_y // 2, axis=1)
        for c in range(cell_y // 2):
            histl_r = np.histogram(imgl_rc[c][:, :, 0], opt.rgbbins,
                                   (0, 255))[0]
            histl_g = np.histogram(imgl_rc[c][:, :, 1], opt.rgbbins,
                                   (0, 255))[0]
            histl_b = np.histogram(imgl_rc[c][:, :, 2], opt.rgbbins,
                                   (0, 255))[0]

            histr_r = np.histogram(imgr_rc[c][:, :, 0], opt.rgbbins,
                                   (0, 255))[0]
            histr_g = np.histogram(imgr_rc[c][:, :, 1], opt.rgbbins,
                                   (0, 255))[0]
            histr_b = np.histogram(imgr_rc[c][:, :, 2], opt.rgbbins,
                                   (0, 255))[0]

            histl[r, c] = np.concatenate(
                (histl_r, histl_g, histl_b)) / np.sum(histl_r) / 3
            histr[r, c] = np.concatenate(
                (histr_r, histr_g, histr_b)) / np.sum(histr_r) / 3

    binsl = myHOG(cv.cvtColor(img_left, cv.COLOR_BGR2GRAY), cell_x,
                  cell_y // 2, opt.cell_size)
    binsr = myHOG(cv.cvtColor(img_right[:, ::-1], cv.COLOR_BGR2GRAY), cell_x,
                  cell_y // 2, opt.cell_size)

    cost_lr = np.sum(np.abs(histl - histr)) / cell_x / cell_y * 2
    dist_hoglr = np.sum(np.abs(binsl - binsr)) / cell_x / cell_y * 2

    # top-bottom
    img_top = img[:height // 2]
    img_bottom = img[height // 2:]

    imgt_rs = np.split(img_top, cell_x // 2, axis=0)
    imgb_rs = np.split(img_bottom[::-1], cell_x // 2, axis=0)
    histt = np.ndarray((cell_x // 2, cell_y, opt.rgbbins * 3))
    histb = np.ndarray((cell_x // 2, cell_y, opt.rgbbins * 3))
    for r in range(cell_x // 2):
        imgt_rc = np.split(imgt_rs[r], cell_y, axis=1)
        imgb_rc = np.split(imgb_rs[r], cell_y, axis=1)
        for c in range(cell_y):
            histt_r = np.histogram(imgt_rc[c][:, :, 0], opt.rgbbins,
                                   (0, 255))[0]
            histt_g = np.histogram(imgt_rc[c][:, :, 1], opt.rgbbins,
                                   (0, 255))[0]
            histt_b = np.histogram(imgt_rc[c][:, :, 2], opt.rgbbins,
                                   (0, 255))[0]

            histb_r = np.histogram(imgb_rc[c][:, :, 0], opt.rgbbins,
                                   (0, 255))[0]
            histb_g = np.histogram(imgb_rc[c][:, :, 1], opt.rgbbins,
                                   (0, 255))[0]
            histb_b = np.histogram(imgb_rc[c][:, :, 2], opt.rgbbins,
                                   (0, 255))[0]

            histt[r, c] = np.concatenate(
                (histt_r, histt_g, histt_b)) / np.sum(histt_r) / 3
            histb[r, c] = np.concatenate(
                (histb_r, histb_g, histb_b)) / np.sum(histb_r) / 3

    binst = myHOG(cv.cvtColor(img_top, cv.COLOR_BGR2GRAY), cell_x // 2, cell_y,
                  opt.cell_size)
    binsb = myHOG(cv.cvtColor(img_bottom[::-1], cv.COLOR_BGR2GRAY),
                  cell_x // 2, cell_y, opt.cell_size)

    cost_tb = np.sum(np.abs(histt - histb)) / cell_x / cell_y * 2
    dist_hogtb = np.sum(np.abs(binst - binsb)) / cell_x / cell_y * 2

    return np.array([cost_lr, dist_hoglr, cost_tb, dist_hogtb])
Esempio n. 31
0
#--Trackbars for Changing HSV Range
cv2.createTrackbar('H Lower', 'HSV Range', 0, 255, nothing)
cv2.createTrackbar('S Lower', 'HSV Range', 0, 255, nothing)
cv2.createTrackbar('V Lower', 'HSV Range', 0, 255, nothing)
cv2.createTrackbar('H Upper', 'HSV Range', 0, 255, nothing)
cv2.createTrackbar('S Upper', 'HSV Range', 0, 255, nothing)
cv2.createTrackbar('V Upper', 'HSV Range', 0, 255, nothing)

#--Strting the Program
while True:

    # Save each frame of video and store it in fram.
    # Then convert frame BGR color type to HSV
    # Note: We need to use '_,' behind of frame because we are convert it to HSV
    _, frame = cap.read()
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Define lower and upper element as a numpy array
    lower_element = np.array([0, 0, 0])
    upper_element = np.array([0, 0, 0])

    #-----------------Read down blew text
    """
    Track The Current Position of All Trackbars
    and Switch.
    """
    r = cv2.getTrackbarPos('R', 'HSV BGR')
    g = cv2.getTrackbarPos('G', 'HSV BGR')
    b = cv2.getTrackbarPos('B', 'HSV BGR')
    h = cv2.getTrackbarPos('H', 'HSV BGR')
    s = cv2.getTrackbarPos('S', 'HSV BGR')