Esempio n. 1
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        while True:
            frame = container.decode(video=0).next()

            image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
            cv2.imshow('Original', image)
            cv2.imshow('Canny', cv2.Canny(image, 100, 200))
            cv2.waitKey(1)

    except Exception as ex:
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Esempio n. 2
0
from cv2 import cv2
import winsound
cam = cv2.VideoCapture(0)
while cam.isOpened():
    ret, frame1 = cam.read()
    ret, frame2 = cam.read()
    diff = cv2.absdiff(frame1, frame2)
    gray = cv2.cvtColor(diff, cv2.COLOR_RGB2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
    dilated = cv2.dilate(thresh, None, iterations=3)
    contours, _ = cv2.findContours(
        dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(frame1, ConnectionAbortedError, -1, (0, 255, 0), 2)
    for c in contours:
        if cv2.contourArea(c) < 5000:
            continue
        x, y, w, h = cv2.boundingRect(c)
        cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)
        winsound.PlaySound('alert.wav', winsound.SND_ASYNC)
    if cv2.waitKey(10) == ord():
        break
    cv2.imshow('Papa Cam', frame1)
Esempio n. 3
0
# img = cv2.imread("media/tokyo_snapshot.png")
# imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# faces = cascade.detectMultiScale(imgGray, 1.05, 1)

# for (x, y, w, h) in faces:
#     cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

# img = cv2.resize(img, (1280, 720))
# cv2.imshow("Result", img)
# cv2.waitKey(0)


# SAMPLE VIDEO
cap = cv2.VideoCapture("media/nyc.mp4")
while(cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        imgGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        bodies = cascade.detectMultiScale(imgGray, scaleFactor = 1.05, maxSize = (70, 100))
        for (x, y, w, h) in bodies:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        frame = cv2.resize(frame, (1280, 720))
        cv2.imshow("Result", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break
cap.release()
cv2.destroyAllWindows() 

# hello
from cv2 import cv2

capture = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
out = cv2.VideoWriter('Output.avi', fourcc, 20.0, (640, 480))

while (capture.isOpened()):
    ret, frame = capture.read()
    if ret:
        # print(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        # print(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        out.write(frame)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        cv2.imshow("Ninjavin", gray)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break

capture.release()
out.release()
cv2.destroyAllWindows()
Esempio n. 5
0
        if args.video == 'CAM':
            cam = cv2.VideoCapture(0)
        else:
            cam = cv2.VideoCapture(args.video)
    else:
        img_path = args.image_path
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)
        img_copy = img.copy()
        img_to_cut = img.copy()

    while True:
        if video:
            _, img = cam.read()
            img_to_cut = img.copy()

        cv2.imshow('image', img)
        key = cv2.waitKey(100)

        if key == 115:  # 's'
            cv2.namedWindow('select points')
            cv2.setMouseCallback('select points', get_mouse_points)
            selecting_points = True

        if selecting_points:
            if get_frame and video:
                img_copy = img.copy()
                get_frame = False
            cv2.imshow('select points', img_copy)

        if key == 99 and len(all_points) > 2 and selecting_points:  # 'c'
            selected_points = True
from cv2 import cv2
import numpy as np

image = cv2.imread("./image.jpg")

# Store hight and width of the image
height, width = image.shape[:2]

quarter_height, quarter_width = height / 4, width / 4

#     | 1 0 Tx |
# T = | 0 1 Ty |

# T is our translation matrix
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])

# We use warpAffine to transform the image using the matrix, T
img_translation = cv2.warpAffine(image, T, (width, height))
cv2.imshow("Translation", img_translation)
cv2.waitKey()
cv2.destroyAllWindows()
Esempio n. 7
0
    gt_bbox = get_gt_box(gt_box_file)
    for i in range(gt_bbox.shape[0]):
        label[gt_bbox[i, 2]:gt_bbox[i, 4], gt_bbox[i, 1]:gt_bbox[i, 3], 0] = gt_bbox[i, 0]
    return label

def get_image(image_file):
    image = np.array(Image.open(image_file))
    height, width, channels = image.shape
    image = image.astype((np.int32))
    return image, height, width

main_box_dir = 'E:/02竞赛/水下目标检测/water_optical_comp/train/train/box/'
main_image_dir = 'E:/02竞赛/水下目标检测/water_optical_comp/train/train/image/'

for i in range(1,1000):
    image_file = main_image_dir + str("%06d" % i) + '.jpg'
    gt_box_file = main_box_dir + str("%06d" %  i) + '.xml'
    image, height, width = get_image(image_file)
    label = get_label(gt_box_file, height, width) * 60
    label = np.concatenate((label, label,label),axis=-1).astype(np.uint8)
    image = image.astype(np.uint8)
    cv2.namedWindow("image", 0)
    cv2.resizeWindow("image", 640, 480)
    cv2.imshow("image", image)
    cv2.namedWindow("label", 0)
    cv2.resizeWindow("label", 640, 480)
    cv2.imshow('label' ,label)
    cv2.waitKey(0)


Esempio n. 8
0
    #deixa a imagem em tons de cinza
    frameGrayPink = cv2.cvtColor(resultadoPink, cv2.COLOR_BGR2GRAY)
    #deixa a imagem melhor, menos poluida
    _, threshPink = cv2.threshold(frameGrayPink, 3, 255, cv2.THRESH_BINARY)
    #faz o contorno do objeto rosa
    contornosPink, _ = cv2.findContours(threshPink, cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)

    #for para trabalhar com os contornos
    for contornoPink in contornosPink:
        # boundingRect retorna o ponto inicial de x, y e os tamanhos de largura e altura
        (xPink, yPink, wPink, hPink) = cv2.boundingRect(contornoPink)
        #define a variavel "area" com os contornos
        areaPink = cv2.contourArea(contornoPink)
        #Caso apareça a cor rosa, a variavel acabar será igual a um e ela acaba com o programa.
        if areaPink > 1000:
            acabar = 1

    #mostra o frame na camera
    cv2.imshow("Camera", frame)
    #atualiza a camera a 60 fps
    key = cv2.waitKey(60)
    #se a variavel acabar for igual a 1, acaba o programa
    if acabar == 1:
        break

#acaba com o código
cv2.destroyAllWindows()
#libera a camera
camera.release()
Esempio n. 9
0
            faceClosest["xy2"] = (x2, y2)
            faceClosest["wh"] = (w, h)

    # If faces do exist
    if len(faces) > 0:
        # Draw red (signal for priority face) rectangle for priority target face
        frame = cv2.rectangle(frame, faceClosest["xy"], faceClosest["xy2"],
                              (0, 0, 255), 1)

        centerX = faceClosest["xy"][0] + (faceClosest["wh"][0] // 2)
        centerY = faceClosest["xy"][1] + (faceClosest["wh"][1] // 2)

        # Draw center of target face
        frame = cv2.line(frame, (centerX, centerY), (centerX, centerY),
                         (0, 0, 255), 2)

        # Detect if face is within constraints
        if centerX < constraintLeft:
            controller.left()
        elif centerX > constraintRight:
            controller.right()
        else:
            controller.fire()

    cv2.imshow('FaceNerf', frame)

    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

cap.release()
cv2.destroyAllWindows()
cap = cv.VideoCapture("datas/videos/licenseplate_01.mp4")
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
count = 0

while True:
    success, img = cap.read()
    imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1, 10)
    for (x, y, w, h) in numberPlates:
        area = w * h
        if area > minArea:
            cv.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 2)
            cv.putText(img, "Number Plate", (x, y - 5),
                       cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 2)
            imgRoi = img[y:y + h, x:x + w]
            cv.imshow("ROI", imgRoi)

    cv.imshow("Result", img)

    if cv.waitKey(1) == ord('s'):
        cv.imwrite("datas/output/NoPlate_" + str(count) + ".jpg", imgRoi)
        cv.rectangle(img, (0, 200), (640, 300), (0, 255, 0), cv.FILLED)
        cv.putText(img, "Scan Saved", (150, 265), cv.FONT_HERSHEY_DUPLEX, 2,
                   (0, 0, 255), 2)
        cv.imshow("Result", img)
        cv.waitKey(500)
        count += 1

cv.destroyAllWindows()
Esempio n. 11
0
            speak("Here you go with music")
            music_dir = "C:\\Users\\ANUSHA\\Music"
            songs = os.listdir(music_dir)
            print(songs)
            random = os.startfile(os.path.join(music_dir, songs[1]))

        elif 'take a photo' in query:
            cam = cv2.VideoCapture(0)
            #establishes  the camera --laptop camera port=0
            print("capturing face...")
            count = random.randint(0, 1000)

            while True:
                ret, img = cam.read()

                cv2.imshow("Test", img)

                if not ret:
                    break

                k = cv2.waitKey(1)

                if k % 256 == 27:
                    #For Esc key
                    print("Close")
                    print("photo taken")
                    break
                elif k % 256 == 32:
                    #For Space key

                    print("Image " + str(count) + "saved")
Esempio n. 12
0
from cv2 import cv2
import numpy as np
from skimage.morphology import opening
#dulieu=str(input("Enter link image:"))
img = cv2.imread('beans.jpg')
kernel = np.ones((5, 5), np.uint8)

# Bước 1: Chuyển về ảnh xám
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Bước 2: Làm mờ ảnh
blur = cv2.GaussianBlur(gray, (9, 9), 1)
cv2.imshow('new', blur)

# Bước 3: Lọc nhiễu
new = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                            cv2.THRESH_BINARY, 9, -5)

# Bước 4: Opening
opening = cv2.morphologyEx(new, cv2.MORPH_OPEN, kernel)

# Bước 5: Đếm
contours = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]

# Kiểm tra kết quả
cv2.drawContours(img, contours, -1, (255, 0, 0), 3)
cv2.imshow('opening', opening)
print("Count: " + str(len(contours)))
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 13
0
        exit()
    else:
        cvt = img

    return cvt


print('Informe o endereço da imagem:')
imgAddress = input()

# img = cv2.imread("/Users/rodrigo/Pictures/missao.jpg")
img = cv2.imread(imgAddress)

cv2.waitKey(20)

cv2.imshow("Original", img)


while True:
    print("1: XYZ\n"
          "2: YUV\n"
          "3: Gray\n"
          "4: RGB\n"
          "5: HSV\n"
          "6: Lab\n"
          "9: Sair\n"
          "\n")

    print('selecione uma opção')
    chosen = input()
Esempio n. 14
0
    img = cv2.imread('/calib_images/imgS'+(str)(i)+'.png')
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Find the chess board corners
    ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)

    # If found, add object points, image points (after refining them)
    if ret == True:
        objpoints.append(objp)

        corners2 = cv2.cornerSubPix(
            gray, corners, (11, 11), (-1, -1), criteria)
        imgpoints.append(corners2)

        # Draw and display the corners
        img = cv2.drawChessboardCorners(img, (cbcol, cbrow), corners2, ret)
        cv2.imshow('img', img)
        cv2.waitKey(WAIT_TIME)

cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
    objpoints, imgpoints, gray.shape[::-1], None, None)

# ---------- Saving the calibration -----------------
cv_file = cv2.FileStorage("calib_images/test.yaml", cv2.FILE_STORAGE_WRITE)
cv_file.write("camera_matrix", mtx)
cv_file.write("dist_coeff", dist)

# note you *release* you don't close() a FileStorage object
cv_file.release()
Esempio n. 15
0
def cv_show(name, img):
    cv2.imshow(name, img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
for imgArq in os.listdir():
    if(imgArq.endswith(".jpg")):
        print(imgArq)
        img = cv.imread(imgArq)

        #suavização
        blur = cv.blur(img,(5,5))
        #suavização gaussiana
        gaus = cv.GaussianBlur(img,(5,5),0)
        #mediana
        median = cv.medianBlur(img,5)
        #bilateral
        bilat = cv.bilateralFilter(img,9,75,75)

        imgCinza = cv.cvtColor(blur, cv.COLOR_BGR2GRAY)
        cv.imshow("Thresh1", imgCinza)



        for i in range(200, 255, 10):
            nomeArq = imgArq+'_threshold'+str(i)+'Blur.png'
            imgCinza = cv.cvtColor(blur, cv.COLOR_BGR2GRAY)
            ret,thresh = cv.threshold(imgCinza,i,255,cv.THRESH_BINARY)
            cv.imwrite(nomeArq,thresh)
            pretos = np.count_nonzero(thresh==0)
            brancos = np.count_nonzero(thresh==255)
            total = pretos+brancos
            percPretos = 100*pretos/total
            percBrancos = 100*brancos/total
            arq.write(nomeArq+"\t"+str(brancos)+"\t"+str(pretos)+"\t"+str(total)+"\t"+str(percBrancos).replace('.', ',')+"\t"+str(percPretos).replace('.', ',')+"\n")
Esempio n. 17
0
from cv2 import cv2

cascade = cv2.CascadeClassifier("haarcascades/haarcascade_frontalface_default.xml")


# SNAPSHOT
img = cv2.imread("media/tokyo_snapshot.png")
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(imgGray, 1.1, 4)

for (x, y, w, h) in faces:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)


# SAMPLE VIDEO


img = cv2.resize(img, (1280, 720))
cv2.imshow("Result", img)
cv2.waitKey(0)
Esempio n. 18
0
recognizer.read("trainner.yml")
#
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
    og_labels = pickle.load(f)
    labels = {v:k for k, v in og_labels.items()}
# print(labels) # {0: 'emilia-clarke', 1: 'peter-dinklage'}
cap = cv2.VideoCapture(0+cv2.CAP_DSHOW)
#
while True:
    ret, frame = cap.read()
    frame = cv2.flip(frame,1)  # helps in mirror image
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
    for x, y, w, h in faces:
        roi_gray = gray[y:y+h, x:x+w]

        # recognizer
        id_, conf = recognizer.predict(roi_gray)
        if conf >= 80:
            font = cv2.FONT_HERSHEY_SIMPLEX
            name = labels[id_]
            color = (255, 0, 255)
            stroke = 2
            cv2.putText(frame, name, (x, y), font, 1, color, stroke, cv2.LINE_AA)
    cv2.imshow('face recognition', frame)
    if cv2.waitKey(20) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
Esempio n. 19
0
import numpy as np
from cv2 import cv2


def generate_home(paths):
    back_img = np.zeros((300, 800, 3), np.uint8)
    count = 0
    for path in paths:
        img = cv2.imread(path)
        img = cv2.resize(img, (200, 200), interpolation=cv2.INTER_AREA)
        back_img[50:250, 50 + 250 * count:250 + 250 * count] = img
        name = path.split('/')[-1]
        cv2.putText(back_img, name, (120 + 250 * count, 270),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1,
                    cv2.LINE_AA)
        count += 1
        if count >= 3:
            break
    return back_img


paths = [
    'images/path/path1-1.png', 'images/path/path1.png',
    'images/path/path2.png', 'images/path/path3.png'
]
back_img = generate_home(paths)
cv2.imshow("back", back_img)
cv2.waitKey()
cv2.destroyAllWindows()
Esempio n. 20
0
from cv2 import cv2
import numpy as np

img = cv2.imread("Resources/lambo.png")

cv2.imshow("Image", img)

imgResize = cv2.resize(img, (1000, 500))
print(imgResize.shape)

imgCropped = img[0:200, 200:500]
print(type(imgCropped))

cv2.imshow("Resized Image", imgResize)
cv2.imshow("Cropped Image", imgCropped)

cv2.waitKey(0)
Esempio n. 21
0
def run(drone, args, lerp, ddir, face_cascade):

    if not drone.tello.connect():
        print("Tello not connected")
        return

    if not drone.tello.set_speed(drone.speed):
        print("Not set speed to lowest possible")
        return

    # In case streaming is on. This happens when we quit this program without the escape key.
    if not drone.tello.streamoff():
        print("Could not stop video stream")
        return

    if not drone.tello.streamon():
        print("Could not start video stream")
        return

    frame_read = drone.tello.get_frame_read()
    drone.tello.get_battery()

    imgCount = 0
    scan = 0
    frame_idx = 0
    OVERRIDE = False
    should_stop = False
    override_speed = args.override_speed
    target_distance = args.distance
    action_str = 'Searching For Target'

    safety_zone_x = args.saftey_x
    safety_zone_y = args.saftey_y

    if args.debug:
        print("DEBUG MODE ENABLED!")

    while not should_stop:
        drone.update()

        if frame_read.stopped:
            frame_read.stop()
            break

        current_time = str(datetime.datetime.now()).replace(':', '-').replace(
            '.', '_')

        frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)
        drone_frame = frame_read.frame

        vid = drone.tello.get_video_capture()

        if args.save_session:
            cv2.imwrite("{}/tellocap{}.jpg".format(ddir, imgCount),
                        drone_frame)

        frame = np.rot90(frame)
        imgCount += 1

        time.sleep(1 / constants.FPS)

        # Listen for key presses
        keyboard = cv2.waitKey(20)
        if keyboard == ord('t'):
            if not args.debug:
                print("Lifting Off")
                drone.tello.takeoff()
                drone.tello.get_battery()
            drone.send_rc_control = True

        if keyboard == ord('l'):
            if not args.debug:
                print("Landing")
                drone.tello.land()
            drone.send_rc_control = False

        if keyboard == 8:
            if not OVERRIDE:
                OVERRIDE = True
                print("OVERRIDE ENABLED")
            else:
                OVERRIDE = False
                print("OVERRIDE DISABLED")

        if keyboard == 27:
            should_stop = True
            break
        gray = cv2.cvtColor(drone_frame, cv2.COLOR_BGR2GRAY)

        faces = face_cascade.detectMultiScale(
            gray, scaleFactor=1.05,
            minNeighbors=3)  # Detects face returns an array

        # scaleFactor – Parameter specifying how much the image size is reduced at each image scale.
        # 1.05 is a good possible value for this, which means you use a small step for resizing, i.e. reduce size by 5%, you increase the chance of a matching size with the model for detection is found.
        # This also means that the algorithm works slower since it is more thorough. You may increase it to as much as 1.4 for faster detection, with the risk of missing some faces altogether.
        #
        # minNeighbors – Parameter specifying how many neighbors each candidate rectangle should have to retain it.
        #
        # This parameter will affect the quality of the detected faces. Higher value results in less detections but with higher quality. 3~6 is a good value for it.
        #
        # minSize – Minimum possible object size. Objects smaller than that are ignored.
        #
        # This parameter determine how small size you want to detect. You decide it! Usually, [30, 30] is a good start for face detection.
        #
        # maxSize – Maximum possible object size. Objects bigger than this are ignored.

        target_face_size = constants.OPENCV_FACE_SIZES[target_distance]

        # These are our center drone_window_dimensions
        noFaces = len(faces) == 0
        bounding_box_size = 0
        drone_window_center_width = int(
            (constants.DRONE_OBERSERVATION_WINDOW_DIMENSIONS[0] / 2) - 20)
        drone_window_center_height = int(
            (constants.DRONE_OBERSERVATION_WINDOW_DIMENSIONS[1] / 2) - 20)
        drone_window_center_x = drone_window_center_width
        drone_window_center_y = drone_window_center_height

        if drone.send_rc_control and not OVERRIDE:
            frame_idx += 1

            for (x, y, w, h) in faces:

                roi_gray = gray[y:y + h, x:x + w]
                roi_color = drone_frame[y:y + h, x:x + w]
                action_str = "TARGET FOUND"

                face_box_col = (255, 0, 0)
                face_box_stroke = 2

                bounding_box_x = x + w
                bounding_box_y = y + h
                bounding_box_size = w * 2

                target_x = int((bounding_box_x + x) / 2)
                target_y = int((bounding_box_y + y) / 2) + constants.UDOFFSET

                true_center_vector = np.array(
                    (drone_window_center_width, drone_window_center_height,
                     target_face_size))
                true_target_vector = np.array(
                    (target_x, target_y, bounding_box_size))
                distance_vector = true_center_vector - true_target_vector

                dist_error = target_face_size - w
                dist_control = drone.dist_pid.control(dist_error)

                if not args.debug:
                    offset_x = target_x - drone_window_center_x
                    h_control = drone.h_pid.control(offset_x)
                    drone.yaw_velocity = h_control
                    scan = h_control

                    offset_y = target_y - drone_window_center_y
                    v_control = drone.v_pid.control(-offset_y)
                    drone.up_down_velocity = v_control

                    drone.for_back_velocity = dist_control
                    print('-----dist_control', dist_control)
                    print('-----dist_error', dist_error)
                    print(
                        "offset=(%d,%d), cur_size=%d, size_error=%d, h_control=%f"
                        % (offset_x, offset_y, w, dist_error, h_control))

                cv2.rectangle(drone_frame, (x, y),
                              (bounding_box_x, bounding_box_y), face_box_col,
                              face_box_stroke)
                cv2.circle(drone_frame, (target_x, target_y), 10, (0, 255, 0),
                           2)

                # Draw the safety zone
                # cv2.rectangle(drone_frame, (target_x - safety_zone_x, target_y - safety_zone_y),
                #               (target_x + safety_zone_x, target_y + safety_zone_y), (0, 255, 0), face_box_stroke)

                cv2.putText(drone_frame, str(distance_vector), (0, 64),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)

            if noFaces:
                print(bounding_box_size, target_distance)
                drone.h_pid.reset()
                drone.v_pid.reset()
                drone.dist_pid.reset()
                drone.yaw_velocity = scan
                drone.up_down_velocity = 0
                drone.for_back_velocity = 0
                action_str = "No Target"
                print("NO TARGET")

        # Draw the center of screen circle, this is what the drone tries to match with the target coords
        cv2.circle(drone_frame,
                   (drone_window_center_width, drone_window_center_height), 10,
                   (0, 0, 255), 2)
        get_hud(drone_frame, idx=frame_idx, action=action_str)
        dCol = lerp(np.array((0, 0, 255)), np.array((255, 255, 255)),
                    target_distance + 1 / 7)

        if OVERRIDE:
            text = "User Control: {}".format(override_speed)
            dCol = (255, 255, 255)
        else:
            text = "AI Control: {}".format(str(target_distance))

        cv2.putText(drone_frame, text, (31, 665), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    dCol, 2)
        cv2.imshow(f'Drone Tracking...', drone_frame)

    drone.tello.get_battery()
    cv2.destroyAllWindows()
    drone.tello.end()
# Bitwise Operations

# This includes bitwise AND, OR, NOT and XOR operations. They will be highly useful while extracting any part of the image (as we will see in coming chapters), defining and working with non-rectangular ROI etc. Below we will see an example on how to change a particular region of an image.

# I want to put OpenCV logo above an image. If I add two images, it will change color. If I blend it, I get an transparent effect. But I want it to be opaque. If it was a rectangular region, I could use ROI as we did in last chapter. But OpenCV logo is a not a rectangular shape. So you can do it with bitwise operations as below:

mainImg = cv2.imread('resource/lena.jpg')
img2 = cv2.imread('resource/opencv_logo.png')

# I want to put logo on top-left corner, So I create a ROI
rows, cols, channels = img2.shape
roi = mainImg[0:rows, 0:cols]

# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 0, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)

# Now black-out the area of logo in ROI
bg_roi = cv2.bitwise_and(roi, roi, mask=mask_inv)

# Take only region of logo from logo image.
fg_img2 = cv2.bitwise_and(img2, img2, mask=mask)

# Put logo in ROI and modify the main image
dst = cv2.add(bg_roi, fg_img2)
mainImg[0:rows, 0:cols] = dst

cv2.imshow('res', mainImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 23
0
# library imports
import cv2.cv2 as cv

# Saving an Image on a key press
img = cv.imread('../images/image_test.jpg')
cv.imshow('Option to Save image', img)
print("press 's' to save the image as 'image_test_2.jpg\n")
key = cv.waitKey(
    0
)  # NOTE: if you are using a 64-bit machine,this needs to be: key = cv2.waitKey(0) & 0xFF

if key == 27:  # wait for the ESC key to exit
    cv.destroyAllWindows()
elif key == ord('s'):  # wait for 's' key to save and exit
    cv.imwrite('../images/image_test_2.jpg', img)
    cv.destroyAllWindows()

# write an image with imwrite
image_to_save = '../images/image_test_3.jpg'
cv.imwrite(image_to_save, img)

print('Image saved as {}'.format(image_to_save))
Esempio n. 24
0
import numpy as np
from cv2 import cv2 as cv

cap = cv.VideoCapture('build/asaki.mp4')
while cap.isOpened():
    ret, frame = cap.read()
    # 如果正确读取帧,ret为True
    if not ret:
        print("Can't receive frame (stream end?). Exiting ...")
        break
    # gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    # cv.imshow('frame', gray)      #灰色输出

    # color = cv.cvtColor(frame, cv.COLOR_BGR2RGB)  # 默认读取是 BGR, 默认输出也是BGR,不用转成 RGB
    # cv.imshow('frame', color)

    cv.imshow('frame', frame)  # 彩色输出
    if cv.waitKey(1) == ord('q'):  # cv.waitKey()
        break
cap.release()
cv.destroyAllWindows()
Esempio n. 25
0
circles = cv2.HoughCircles(edges,
                           cv2.HOUGH_GRADIENT,
                           dp=1,
                           minDist=280,
                           param1=255,
                           param2=97,
                           minRadius=0,
                           maxRadius=0)
print("Circles: ", circles)

# Координаты и радиус для самой большой окружности
x_max, y_max, r_max = 0, 0, 0
# Выделим все найденные окружности
for circle in circles[0]:
    x0, y0, r = circle
    print(x0, y0, r)

    if r > r_max:
        x_max, y_max, r_max = x0, y0, r

    draw_circle(circles_img, (x0, y0), r)
# Выделим самую большую окружность
draw_circle(biggestCircle_img, (x_max, y_max), r_max)

cv2.imshow('All lines', lines_img)
cv2.imshow('Longest line', longestLine_img)
cv2.imshow('Circles', circles_img)
cv2.imshow('Biggest circle', biggestCircle_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 26
0
# FINDING THE FACE IN THE IMAGE
# THIS GIVES US THE LOCATION OF FACE IN FORMAT --> TOP - RIGHT - BOTTOM - LEFT
johnnyLocation = face_recognition.face_locations(imgJohnny)[0]

# GETTING 128 MEASUREMENTS OF THE FACE
johnnyEncode = face_recognition.face_encodings(imgJohnny)[0]

# REPEATING THE SAME STEP FOR A TEST IMAGE
imgJohnnytest = face_recognition.load_image_file('assets/hardik.jpg')
imgJohnnytest = cv2.cvtColor(imgJohnnytest, cv2.COLOR_BGR2RGB)
johnnytestLocation = face_recognition.face_locations(imgJohnnytest)[0]
johnnytestEncode = face_recognition.face_encodings(imgJohnnytest)[0]

# COMPARING BOTH FACES AND PRINTING THE RESULT
result = face_recognition.compare_faces([johnnyEncode],johnnytestEncode)
print(result)

# IF FACES MATCH --> BOTH IMAGES WILL HAVE GREEN RECTANGLE
# ELSE TEST IMAGE WILL HAVE RED RECTANGLE
if(result[0]):
    cv2.rectangle(imgJohnny, (johnnyLocation[3], johnnyLocation[0]), (johnnyLocation[1], johnnyLocation[2]), (0, 255, 0), 2)
    cv2.rectangle(imgJohnnytest, (johnnytestLocation[3], johnnytestLocation[0]), (johnnytestLocation[1], johnnytestLocation[2]), (0, 255, 0), 2)
else:
    cv2.rectangle(imgJohnny, (johnnyLocation[3], johnnyLocation[0]), (johnnyLocation[1], johnnyLocation[2]), (0, 255, 0), 2)
    cv2.rectangle(imgJohnnytest, (johnnytestLocation[3], johnnytestLocation[0]), (johnnytestLocation[1], johnnytestLocation[2]), (0, 0, 255), 2)

# SHOWING THE FINAL IMAGE WITH GREEN RECTANGLE
cv2.imshow('Johnny Depp', imgJohnny)
cv2.imshow('Johnny Depp Test', imgJohnnytest)
cv2.waitKey(0)
Esempio n. 27
0
def main():
    median = cv2.imread(r'median.jpg', cv2.IMREAD_GRAYSCALE)
    src = median

    hist, bins = np.histogram(src.ravel(), 256, [0, 256])
    threshold = bins[hist.argmax()] * 1.1
    th = demo_thersholding(src, threshold=threshold)

    # cv2.imshow('threshed',th)

    erosion_size = 5
    erosion_type = cv2.MORPH_ELLIPSE
    element = cv2.getStructuringElement(
        erosion_type, (2 * erosion_size + 1, 2 * erosion_size + 1),
        (erosion_size, erosion_size))
    erosion_dst = cv2.erode(th, element, iterations=1)
    cv2.imshow('erosion', erosion_dst)

    dialation_dst = cv2.dilate(erosion_dst, element, iterations=2)
    cv2.imshow('dialation', dialation_dst)
    # =====================================================================
    mask = cv2.inRange(median, 0, threshold * 1.3)
    result = cv2.bitwise_and(median, median, mask=mask)
    result2 = cv2.bitwise_and(median, mask, mask=mask)

    dialation_size = 2
    erosion_size = 1

    def a(img, dialation_size, erosion_size, dialation_iter=2, erosion_iter=1):
        mask = cv2.inRange(img, 0, threshold * 1.3)

        def get_element(size, type=cv2.MORPH_ELLIPSE):
            ksize = (2 * size + 1, 2 * size + 1)
            anchor = (size, size)
            element = cv2.getStructuringElement(type,
                                                ksize=ksize,
                                                anchor=anchor)
            return element

        dialation_element, erose_element = [
            get_element(size) for size in [dialation_size, erosion_size]
        ]
        dialated_mask = cv2.dilate(mask,
                                   dialation_element,
                                   iterations=dialation_iter)

        dialate_and_erosed = cv2.erode(dialated_mask,
                                       erose_element,
                                       iterations=erosion_iter)
        erosed_mask = cv2.erode(mask, erose_element, iterations=erosion_iter)

        items = [
            img,
            mask,
            # result,
            # result2,
            dialated_mask,
            dialate_and_erosed,
            # erosed_mask,
        ]
        cols = 2
        rows = (len(items) + 1) // cols
        plt.subplot(rows, cols, 1)
        for i, img in enumerate(items, start=1):
            plt.subplot(rows, cols, i)
            plt.imshow(img, cmap="gray")
        plt.show()

    a(median,
      dialation_size=2,
      erosion_size=2,
      dialation_iter=2,
      erosion_iter=4)
    blure_size = 45
    blured = cv2.GaussianBlur(median, (blure_size, blure_size), 0)
    a(blured,
      dialation_size=2,
      erosion_size=2,
      dialation_iter=2,
      erosion_iter=4)
    blure_size = 7
    blured = cv2.medianBlur(median, blure_size)
    a(blured,
      dialation_size=3,
      erosion_size=2,
      dialation_iter=2,
      erosion_iter=4)

    cv2.waitKey(0)
    str(th)
            encodeCutFrame = face_recognition.face_encodings(
                imgS, faceCutFrame)

            for encodeFace, faceLoc in zip(encodeCutFrame, faceCutFrame):
                matches = face_recognition.compare_faces(
                    encodeListKnow, encodeFace)
                faceDis = face_recognition.face_distance(
                    encodeListKnow, encodeFace)
                #print(faceDis)
                matchIndex = np.argmin(faceDis)  #valor minimo

                if matches[matchIndex]:
                    detected_count += 1
                    if detected_count == 20:
                        print(True)
                    y1, x2, y2, x1 = faceLoc
                    #cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
                    #cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
                    cv2.putText(img, NAME, (x1 + 6, y2 - 6),
                                cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 1)

        cv2.imshow('Reconocimiento Facial', img)

        if k == 27:  # on [ESC]
            print('[FINISH]')
            break
    cap.release()
    cv2.destroyAllWindows()
else:
    print('[CAMERA ERROR]')
Esempio n. 29
0
from cv2 import cv2
img = cv2.imread('1.jpg', 0)
cv2.imshow('image1', img)
cv2.waitKey(0)
from cv2 import cv2

cap = cv2.VideoCapture(0)

# for image_path in TEST_IMAGE_PATHS:
while True:

    ret, image_np = cap.read()

    # image_np = np.array(Image.open(image_path))

    output_dict = run_inference_for_single_image(detection_model, image_np)

    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks_reframed', None),
        use_normalized_coordinates=True,
        line_thickness=8)

    #Image.fromarray(image_np).show()

    cv2.imshow("object detection", cv2.resize(image_np, (800, 600)))
    if cv2.waitKey(25) & 0xFF == ord("q"):
        cv2.destroyAllWindows()
        break
Esempio n. 31
0
import base64

from cv2 import cv2
import numpy as np
import zmq

context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
footage_socket.bind('tcp://*:5555')
footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))

while True:
    try:
        frame = footage_socket.recv_string()
        img = base64.b64decode(frame)
        npimg = np.fromstring(img, dtype=np.uint8)
        source = cv2.imdecode(npimg, 1)
        cv2.imshow("Stream", source)
        cv2.waitKey(1)

    except KeyboardInterrupt:
        cv2.destroyAllWindows()
        break