コード例 #1
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        while True:
            frame = container.decode(video=0).next()

            image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
            cv2.imshow('Original', image)
            cv2.imshow('Canny', cv2.Canny(image, 100, 200))
            cv2.waitKey(1)

    except Exception as ex:
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
コード例 #2
0
images_path = sys.argv[1]
image_files = [path.join(images_path, file)
               for file in os.listdir(images_path)
               if path.isfile(path.join(images_path, file))]
image_count = len(image_files)
print(image_files)

i = 0
while True:
    i %= image_count
    image_file = image_files[i]
    print(image_file)

    img = cv.imread(image_file, cv.IMREAD_COLOR)
    (blue_points, red_points) = image_processor.process(img)
    print("BLUE points: " + str(blue_points))
    print("RED points: " + str(red_points))

    key = cv.waitKey()
    print("Key pressed: {}".format(key))
    if key == 81 or key == 8:
        # left arrow or backspace
        i -= 1
    elif key == 27:
        # ESC
        cv.destroyAllWindows()
        break
    else:
        i += 1
コード例 #3
0
ファイル: main.py プロジェクト: iGiraffeBlack/yolo
def main(queue,camID,initial_id,r_id,cam_id,unique_id):
    '''
    Detect, Track and save infomation of persons

    Objectives: 

    1. Use YOLO to detect person and store coordinates 
    2. Use DeepSORT to track detected persons throughout video frames
    3. Save detected persons for re-identification
    4. If re-identified, replace camID with global camID across camera views
    '''
    # Init YOLO model and load to memory
    yolo = YOLO()
    start = time.time()

    counter = []

    #deep_sort
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)
    w = int(650)
    h = int(576)
    writeVideo_flag = True # Set to False to not save videos and detections
    if writeVideo_flag:
    # Define the codec and create VideoWriter object
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('./output/'+str(camID)+'_output.avi', fourcc, 10, (w, h))
        list_file = open('logs/detection_camera'+str(camID)+'.txt', 'w')
        frame_index = -1
    
    fps = 0.0

    frame_counter = 0

    #Diretory Creation
    if not os.path.isdir('./images/frames/'+str(camID)):
        os.mkdir('./images/frames/'+str(camID))
    if not os.path.isdir('./images/detections/'+str(camID)):
        os.mkdir('./images/detections/'+str(camID))

    # Empty folders
    for file in (sorted(os.listdir('./images/detections/'+str(camID)))):
        os.remove('./images/detections/'+str(camID)+'/'+file)
    for file in (sorted(os.listdir('./images/frames/'+str(camID)))):
        os.remove('./images/frames/'+str(camID)+'/'+file)
    

    while not queue.empty():
        # Retrieve a frame from the queue
        frame = queue.get()
        cv2.imwrite('./images/frames/'+str(camID)+'/'+str(frame_counter)+'.jpg',frame)
        frame_counter+=1
        t1 = time.time()
        # frame_copy --> to be cropped according to detected person and saved
        # frame_save --> Frame to be saved with video only showing unique camID
        frame_copy = frame.copy()
        frame_save = frame.copy()

        image = Image.fromarray(frame[...,::-1]) #bgr to rgb
        # Perform YOLO detection (Objective 1)
        boxs,class_names = yolo.detect_image(image)
        backend.clear_session()
        features = encoder(frame,boxs)
        # score to 1.0 here).
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        # Call the tracker and update with current detections
        tracker.predict()
        tracker.update(detections)

        i = int(0)
        indexIDs = []
        boxes = []
        for det in detections:
            bbox = det.to_tlbr()
            #cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            #boxes.append([track[0], track[1], track[2], track[3]])
            # Store tracking_id as seperate variable for replacement
            tracking_id = track.track_id
            indexIDs.append(int(tracking_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()
            if int(bbox[0])<0 or int(bbox[1])<0 or int(bbox[2])<0 or int(bbox[3])<0:
                continue

            # Change camID's of re-identifed targets
            # Current using Camera camID 1 as the base of reference to compare other cameras
            if not camID == 1:
                for a in range(len(initial_id)):
                    if int(track.track_id) == initial_id[a]:
                        tracking_id = int(r_id[a]) # r_id is for only CAM 1
                        #Prevent donated camID from CAM 1 conflict with CAM 2 issued camID (which is coincidentally 1 as well)
                    elif int(track.track_id) == r_id[a]: #Prevent identital camID on 1 source
                        tracking_id = int(initial_id[a])
                    else:
                        tracking_id = track.track_id 
            else:
                tracking_id = track.track_id #Deepsort id
            color = [int(c) for c in COLORS[tracking_id]]
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3) #bbox[0] and [1] is startpoint [2] [3] is endpoint

            # Select which camID to be displayed (Local or Global if re-identified)
            display_id = tracking_id
            for b in range(len(unique_id)):
                if tracking_id == r_id[b]:
                    display_id = unique_id[b]
                    
            cv2.putText(frame,str(display_id),(int(bbox[0]), int(bbox[1] -10)),0, 5e-3 * 150, (color),2)

            if len(class_names) > 0:
               class_name = class_names[0]
               #cv2.putText(frame, str(class_names[0]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (color),2)
               
            # Save bounding box data for re-identification (Objective 3)
               frame1 = frame_copy[int(bbox[1]):int(bbox[3]),int(bbox[0]):int(bbox[2])]#create instance of cropped frame using current frame, crop according to bounding box coordinates
               query_path = image_path+'/query'
               gallery_path = image_path+'/gallery'
            
               #Perform resizing to ensure equal size of features extracted from images
               frame2 = cv2.resize(frame1,(46,133),interpolation = cv2.INTER_AREA) #resize cropped image
               cv2.imwrite('./images/detections/'+str(camID)+'/frame'+str(frame_counter)+'_'+str(tracking_id)+'.jpg',frame2)


               if not camID == 1:
                   dst_path = gallery_path
                   #if file does not exist --> save
                   file_path = dst_path+'/'+str(tracking_id)+'_'+str(camID)+'.png' 
                   if frame_counter % 10 == 0 or not os.path.isfile(file_path):
                       cv2.imwrite(file_path,frame2)#save cropped frame


               if camID == 1:    
                   dst_path = query_path 
                    #if file does not exist --> save
                   file_path = dst_path+'/'+str(tracking_id)+'.png' 
                   if frame_counter % 10 == 0 or not os.path.isfile(file_path):
                        cv2.imwrite(file_path,frame2)#save cropped frame

            # Draw bounding boxes and Unique IDs for video to be saved
            if tracking_id in initial_id or tracking_id in r_id:
                
                if tracking_id in initial_id and not camID == 1:
                    index = initial_id.index(tracking_id)
                    color = [int(c) for c in COLORS[int(unique_id[index].split('P')[1])]] #Determine color of bounding box
                    # Draw box and label with unique ID
                    cv2.rectangle(frame_save, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3) #bbox[0] and [1] is startpoint [2] [3] is endpoint
                    cv2.putText(frame_save,str(unique_id[index]),(int(bbox[0]), int(bbox[1] -10)),0, 5e-3 * 150, (color),2)

                elif tracking_id in r_id and camID == 1:
                    index = r_id.index(tracking_id)
                    color = [int(c) for c in COLORS[int(unique_id[index].split('P')[1])]]
                    cv2.rectangle(frame_save, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3) #bbox[0] and [1] is startpoint [2] [3] is endpoint
                    cv2.putText(frame_save,str(unique_id[index]),(int(bbox[0]), int(bbox[1] -10)),0, 5e-3 * 150, (color),2)
            
            i += 1
            #bbox_center_point(x,y)
            center = (int(((bbox[0])+(bbox[2]))/2),int(((bbox[1])+(bbox[3]))/2))
            #track_id[center]
            pts[track.track_id].append(center)
            #center point
            #cv2.circle(frame,  (center), 1, color, 5)
            '''
	    #draw motion path
            for j in range(1, len(pts[track.track_id])):
                if pts[track.track_id][j - 1] is None or pts[track.track_id][j] is None:
                   continue
                thickness = int(np.sqrt(64 / float(j + 1)) * 2)
                cv2.line(frame,(pts[track.track_id][j-1]), (pts[track.track_id][j]),(color),thickness)
                #cv2.putText(frame, str(class_names[j]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (255,255,255),2)
            '''
        count = len(set(counter))
        # Visualize result

        cv2.putText(frame, "FPS: %f"%(fps),(int(20), int(40)),0, 5e-3 * 200, (0,255,0),3)
        cv2.namedWindow('Camera '+str(camID), 0)
        cv2.resizeWindow('Camera '+str(camID), w ,h)
        cv2.imshow('Camera '+str(camID), frame)

        if writeVideo_flag:
            #save a frame
            frame_save = cv2.resize(frame_save,(w,h)) # Resize frame to fit video
            out.write(frame_save)
            frame_index = frame_index + 1
            # Write detections onto file
            list_file.write('./images/frames/'+str(camID)+'/'+str(frame_counter)+'.jpg'+' | ')
            if len(boxs) != 0:
                for i in range(0,len(boxs)):
                    list_file.write(str(boxs[i][0]) + ' '+str(boxs[i][1]) + ' '+str(boxs[i][2]) + ' '+str(boxs[i][3]) + ' ' + str(class_names[i][0])+', ')
            list_file.write('\n')

        fps  = ( fps + (1./(time.time()-t1)) ) / 2 

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    print(" ")
    print("[Finish]")
    end = time.time()

    if len(pts[track.track_id]) != None:
       print(source_names[camID-1]+": "+ str(count) + " " + str(class_name) +' Found')
       
    else:
       print("[No Found]")

    if writeVideo_flag:
        out.release()
        list_file.close()
    
    print('Time taken: '+str(round(end-start))+' seconds')

    cv2.destroyAllWindows()
 def destroy(self):
     self.cap.release()
     cv2.destroyAllWindows()
コード例 #5
0
ファイル: detect.py プロジェクト: Akuseru1/sudoku-detection
filtered = apply_filters(original)
big_square = find_big_square(filtered)
corners = find_corners(big_square)
corners_image = original.copy()
cv2.drawContours(corners_image, [big_square], -1, (0, 255, 0),
                 3)  # dibuja el contorno externo
draw_corners(corners_image, corners)  # dibuja las esquinas
originalT = transform(original, corners)
filteredT = transform(filtered, corners)
squares = find_all_squares(originalT)
originalS = draw_squares_to_image(originalT, squares)

cv2.imshow('original', original)
cv2.waitKey(0)
cv2.imshow('filtro', filtered)
cv2.waitKey(0)
cv2.imshow('contorno y esquinas', corners_image)
cv2.waitKey(0)
cv2.imshow('transformada', filteredT)
cv2.waitKey(0)
cv2.imshow('originalT', originalT)
cv2.waitKey(0)
cv2.imshow('subcuadros', originalS)
box_array = get_numbers(originalT, squares)
cv2.waitKey(0)
# get_numbers(originalT, squares)
cv2.destroyAllWindows()  #Close all windows
# cv2.imwrite('filtro.jpg',filtered)
# cv2.imwrite('esquinas.jpg',corners_image)
# cv2.imwrite('transformada.jpg',originalT)
# cv2.imwrite('squares.jpg',originalS)
コード例 #6
0
def clean_down(capture):
    capture.release()
    cv2.destroyAllWindows()
コード例 #7
0
def im_show(img):
    cv2.imshow('', img)
    if cv2.waitKey(0) == 115:
        cv2.imwrite(input('filename: '), img)
    cv2.destroyAllWindows()
コード例 #8
0
# try, except 문 사용해보기
# tyy, excpet문은 내가 예상했던 에러들이 아닌 그 외의 에러들을 캐치하기 위해 사용한다.

from cv2 import cv2 as cv

frameWidth = 640
frameHeight = 480
cap = cv.VideoCapture(2)

try:
    while (cap.isOpened()):
        results, frame = cap.read()
        cv.imshow('Display Result', frame)
        if cv.waitKey(
                1
        ) == 27:  # ASCII code로 숫자 27은 키보드의 'ESC'를 의미, 즉, "ESC"를 누르면 waitKey(1)(0.001초)멈추었다가 break문을 통해 나가짐
            break

except:
    pass

finally:
    cap.release()  # when everything done, release the capture
    cv.destroyAllWindows()  # 요즘은 버전이 업그레이드 되면서 잘 사용하지는 않는다.
コード例 #9
0
def test_age(partition="test", debug_samples=None):
    print("Partion", partition,
          debug_samples if debug_samples is not None else '')
    dataset = IMDBWIKIAge(partition=partition,
                          target_shape=(224, 224, 3),
                          preprocessing='vggface2',
                          augment=False,
                          debug_max_num_samples=debug_samples)
    print("Samples in dataset partition", dataset.get_num_samples())

    if people_by_identity_imdb:
        print("IMDB statistics...")
        samples_stats = defaultdict(list)
        for _, identity_data in people_by_identity_imdb.items():
            if identity_data[1] == PARTITION_TRAIN:
                samples_stats["train"].append(identity_data[0])
            elif identity_data[1] == PARTITION_VAL:
                samples_stats["val"].append(identity_data[0])
            elif identity_data[1] == PARTITION_TEST:
                samples_stats["test"].append(identity_data[0])
            else:
                print("Error loading partition", identity_data[0])
        print("Total train {} of different samples {}".format(
            sum(samples_stats["train"]), len(samples_stats["train"])))
        print("Total val {} of different samples {}".format(
            sum(samples_stats["val"]), len(samples_stats["val"])))
        print("Total test {} of different samples {}".format(
            sum(samples_stats["test"]), len(samples_stats["test"])))
        print()
    else:
        print("IMDB loaded from cache...")

    if people_by_identity_wiki:
        print("WIKI statistics...")
        samples_stats = defaultdict(list)
        for _, identity_data in people_by_identity_wiki.items():
            if identity_data[1] == PARTITION_TRAIN:
                samples_stats["train"].append(identity_data[0])
            elif identity_data[1] == PARTITION_VAL:
                samples_stats["val"].append(identity_data[0])
            elif identity_data[1] == PARTITION_TEST:
                samples_stats["test"].append(identity_data[0])
            else:
                print("Error loading partition", identity_data[0])
        print("Total train {} of different samples {}".format(
            sum(samples_stats["train"]), len(samples_stats["train"])))
        print("Total val {} of different samples {}".format(
            sum(samples_stats["val"]), len(samples_stats["val"])))
        print("Total test {} of different samples {}".format(
            sum(samples_stats["test"]), len(samples_stats["test"])))
        print()
    else:
        print("WIKI loaded from cache...")

    gen = dataset.get_generator(fullinfo=True)

    for batch in tqdm(gen):
        for im, age, path, roi in zip(batch[0], batch[1], batch[2], batch[3]):
            print("Path:", path)
            print("Roi:", roi)
            print("Age:", age, type(age))
            print("Shape:", im.shape)
            facemax = np.max(im)
            facemin = np.min(im)
            im = (255 * ((im - facemin) / (facemax - facemin))).astype(
                np.uint8)
            cv2.putText(
                im, "{}".format(np.argmax(age) if NUM_CLASSES > 1 else age),
                (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                (255, 255, 255))
            cv2.imshow('image', im)
            if cv2.waitKey(0) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                return
コード例 #10
0
ファイル: trackMoving.py プロジェクト: juanrios1307/RecPerson
def capturarFoto():
	execution_path = os.getcwd()
	cap = cv2.VideoCapture(0)

	img_in = "image.jpg"
	img_out = "imageo.jpg"

	detector = ObjectDetection()
	detector.setModelTypeAsRetinaNet()
	detector.setModelPath(os.path.join(
		execution_path, "/home/juan-rios/Documentos/python/trackMove/resnet50_coco_best_v2.0.1.h5"))
	detector.loadModel(detection_speed='fast')
	ti = time.time()

	# Llamada al método
	fgbg = cv2.createBackgroundSubtractorKNN(
		history=500, dist2Threshold=400, detectShadows=False)

	# Deshabilitamos OpenCL, si no hacemos esto no funciona
	cv2.ocl.setUseOpenCL(False)
	cont = 0
	while(1):
		# Leemos el siguiente frame
		ret, frame = cap.read()
		ret, var = cap.read()

		# Si hemos llegado al final del vídeo salimos
		if not ret:
			break

		# Aplicamos el algoritmo
		fgmask = fgbg.apply(frame)

		# Copiamos el umbral para detectar los contornos
		contornosimg = fgmask.copy()

		# Buscamos contorno en la imagen
		contornos, hierarchy = cv2.findContours(
			contornosimg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

		# Recorremos todos los contornos encontrados
		for c in contornos:
			# recObject()
			# Eliminamos los contornos más pequeños
			if cv2.contourArea(c) < 500:
				continue

			# Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno
			(x, y, w, h) = cv2.boundingRect(c)
		 	#Dibujamos el rectángulo del bounds
			cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

			return_value, image = cap.read()
			cv2.imwrite(img_in, image)  # save image
			detections = detector.detectObjectsFromImage(input_image=os.path.join(
				execution_path, img_in), output_image_path=os.path.join(execution_path, img_out))
			to = time.time()
			#print(to-ti)
			image = cv2.imread(img_out)
			image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
			cv2.imshow('image', image)
			cv2.imshow('Camara', frame)
			for eachObject in detections:
				print(eachObject["name"], " : ", eachObject["percentage_probability"])
				if(eachObject["name"] == "person" and eachObject["percentage_probability"] > 55):
					print("person suspect")
		

		#cv2.imshow('Camara', frame)
		#cv2.imshow('gg', var)
		#cv2.imshow('Umbral', fgmask)
		#cv2.imshow('Contornos', contornosimg)

		if cv2.waitKey(1) & 0xFF == ord('s'):
			break
		
		if cv2.contourArea(c) > 20:
		
			cont = cont +1
			#time.sleep(3)
			cv2.imwrite("/home/juan-rios/Documentos/python/trackMove/original/foto.png", var, time.sleep(0.0))
			print(cont)
			print("foto tomada exitosamente")
			time.sleep(1)
		else:
			print("no se pudo acceder a la camara")

	cap.release()
	cv2.destroyAllWindows()

	# Mostramos las capturas
	

	# Sentencias para salir, pulsa 's' y sale
	

		# Liberamos la cámara y cerramos todas las ventanas

	if cv2.contourArea(c) > 20:

		cont = cont + 1
			#time.sleep(3)
		cv2.imwrite(
				"/home/juan-rios/Documentos/python/trackMove/original/foto.png", var, time.sleep(0.0))
		print(cont)
		print("foto tomada exitosamente")
		time.sleep(1)
	cap.release()
	cv2.destroyAllWindows()
コード例 #11
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        # drone.takeoff()
        # sleep(3)
        # drone.land()
        # sleep(3)
        container = av.open(drone.get_video_stream())
        # skip first 300 frames
        frame_skip = 300
        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                cv2.imshow('Original', image)
                # cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    cv2.destroyWindow(image)
                    drone.land()
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('l'):
                    drone.land()
                    sleep(2)
                if cv2.waitKey(1) & 0xFF == ord('w'):
                    drone.forward(10)
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('s'):
                    drone.backward(10)
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('a'):
                    drone.left(10)
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('d'):
                    drone.right(10)
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('z'):
                    drone.clockwise(10)
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('c'):
                    drone.flip_right()
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('t'):
                    drone.takeoff()
                    sleep(2)
                if cv2.waitKey(1) & 0xFF == ord('u'):
                    drone.up(10)
                    sleep(1)
                if cv2.waitKey(1) & 0xFF == ord('n'):
                    drone.down(10)
                    sleep(1)
                frame_skip = int((time.time() - start_time) / frame.time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
コード例 #12
0
def run(drone, args, lerp, ddir, face_cascade):

    if not drone.tello.connect():
        print("Tello not connected")
        return

    if not drone.tello.set_speed(drone.speed):
        print("Not set speed to lowest possible")
        return

    # In case streaming is on. This happens when we quit this program without the escape key.
    if not drone.tello.streamoff():
        print("Could not stop video stream")
        return

    if not drone.tello.streamon():
        print("Could not start video stream")
        return

    frame_read = drone.tello.get_frame_read()
    drone.tello.get_battery()

    imgCount = 0
    scan = 0
    frame_idx = 0
    OVERRIDE = False
    should_stop = False
    override_speed = args.override_speed
    target_distance = args.distance
    action_str = 'Searching For Target'

    safety_zone_x = args.saftey_x
    safety_zone_y = args.saftey_y

    if args.debug:
        print("DEBUG MODE ENABLED!")

    while not should_stop:
        drone.update()

        if frame_read.stopped:
            frame_read.stop()
            break

        current_time = str(datetime.datetime.now()).replace(':', '-').replace(
            '.', '_')

        frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)
        drone_frame = frame_read.frame

        vid = drone.tello.get_video_capture()

        if args.save_session:
            cv2.imwrite("{}/tellocap{}.jpg".format(ddir, imgCount),
                        drone_frame)

        frame = np.rot90(frame)
        imgCount += 1

        time.sleep(1 / constants.FPS)

        # Listen for key presses
        keyboard = cv2.waitKey(20)
        if keyboard == ord('t'):
            if not args.debug:
                print("Lifting Off")
                drone.tello.takeoff()
                drone.tello.get_battery()
            drone.send_rc_control = True

        if keyboard == ord('l'):
            if not args.debug:
                print("Landing")
                drone.tello.land()
            drone.send_rc_control = False

        if keyboard == 8:
            if not OVERRIDE:
                OVERRIDE = True
                print("OVERRIDE ENABLED")
            else:
                OVERRIDE = False
                print("OVERRIDE DISABLED")

        if keyboard == 27:
            should_stop = True
            break
        gray = cv2.cvtColor(drone_frame, cv2.COLOR_BGR2GRAY)

        faces = face_cascade.detectMultiScale(
            gray, scaleFactor=1.05,
            minNeighbors=3)  # Detects face returns an array

        # scaleFactor – Parameter specifying how much the image size is reduced at each image scale.
        # 1.05 is a good possible value for this, which means you use a small step for resizing, i.e. reduce size by 5%, you increase the chance of a matching size with the model for detection is found.
        # This also means that the algorithm works slower since it is more thorough. You may increase it to as much as 1.4 for faster detection, with the risk of missing some faces altogether.
        #
        # minNeighbors – Parameter specifying how many neighbors each candidate rectangle should have to retain it.
        #
        # This parameter will affect the quality of the detected faces. Higher value results in less detections but with higher quality. 3~6 is a good value for it.
        #
        # minSize – Minimum possible object size. Objects smaller than that are ignored.
        #
        # This parameter determine how small size you want to detect. You decide it! Usually, [30, 30] is a good start for face detection.
        #
        # maxSize – Maximum possible object size. Objects bigger than this are ignored.

        target_face_size = constants.OPENCV_FACE_SIZES[target_distance]

        # These are our center drone_window_dimensions
        noFaces = len(faces) == 0
        bounding_box_size = 0
        drone_window_center_width = int(
            (constants.DRONE_OBERSERVATION_WINDOW_DIMENSIONS[0] / 2) - 20)
        drone_window_center_height = int(
            (constants.DRONE_OBERSERVATION_WINDOW_DIMENSIONS[1] / 2) - 20)
        drone_window_center_x = drone_window_center_width
        drone_window_center_y = drone_window_center_height

        if drone.send_rc_control and not OVERRIDE:
            frame_idx += 1

            for (x, y, w, h) in faces:

                roi_gray = gray[y:y + h, x:x + w]
                roi_color = drone_frame[y:y + h, x:x + w]
                action_str = "TARGET FOUND"

                face_box_col = (255, 0, 0)
                face_box_stroke = 2

                bounding_box_x = x + w
                bounding_box_y = y + h
                bounding_box_size = w * 2

                target_x = int((bounding_box_x + x) / 2)
                target_y = int((bounding_box_y + y) / 2) + constants.UDOFFSET

                true_center_vector = np.array(
                    (drone_window_center_width, drone_window_center_height,
                     target_face_size))
                true_target_vector = np.array(
                    (target_x, target_y, bounding_box_size))
                distance_vector = true_center_vector - true_target_vector

                dist_error = target_face_size - w
                dist_control = drone.dist_pid.control(dist_error)

                if not args.debug:
                    offset_x = target_x - drone_window_center_x
                    h_control = drone.h_pid.control(offset_x)
                    drone.yaw_velocity = h_control
                    scan = h_control

                    offset_y = target_y - drone_window_center_y
                    v_control = drone.v_pid.control(-offset_y)
                    drone.up_down_velocity = v_control

                    drone.for_back_velocity = dist_control
                    print('-----dist_control', dist_control)
                    print('-----dist_error', dist_error)
                    print(
                        "offset=(%d,%d), cur_size=%d, size_error=%d, h_control=%f"
                        % (offset_x, offset_y, w, dist_error, h_control))

                cv2.rectangle(drone_frame, (x, y),
                              (bounding_box_x, bounding_box_y), face_box_col,
                              face_box_stroke)
                cv2.circle(drone_frame, (target_x, target_y), 10, (0, 255, 0),
                           2)

                # Draw the safety zone
                # cv2.rectangle(drone_frame, (target_x - safety_zone_x, target_y - safety_zone_y),
                #               (target_x + safety_zone_x, target_y + safety_zone_y), (0, 255, 0), face_box_stroke)

                cv2.putText(drone_frame, str(distance_vector), (0, 64),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)

            if noFaces:
                print(bounding_box_size, target_distance)
                drone.h_pid.reset()
                drone.v_pid.reset()
                drone.dist_pid.reset()
                drone.yaw_velocity = scan
                drone.up_down_velocity = 0
                drone.for_back_velocity = 0
                action_str = "No Target"
                print("NO TARGET")

        # Draw the center of screen circle, this is what the drone tries to match with the target coords
        cv2.circle(drone_frame,
                   (drone_window_center_width, drone_window_center_height), 10,
                   (0, 0, 255), 2)
        get_hud(drone_frame, idx=frame_idx, action=action_str)
        dCol = lerp(np.array((0, 0, 255)), np.array((255, 255, 255)),
                    target_distance + 1 / 7)

        if OVERRIDE:
            text = "User Control: {}".format(override_speed)
            dCol = (255, 255, 255)
        else:
            text = "AI Control: {}".format(str(target_distance))

        cv2.putText(drone_frame, text, (31, 665), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    dCol, 2)
        cv2.imshow(f'Drone Tracking...', drone_frame)

    drone.tello.get_battery()
    cv2.destroyAllWindows()
    drone.tello.end()
コード例 #13
0
def cv_show(name, img):
    cv2.imshow(name, img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #14
0
def detect():
    #'/dev/video2'
    cap = cv2.VideoCapture(0)  #'/dev/video2'
    while cap.isOpened():

        _, frame = cap.read()
        if not _:
            break

        output = frame.copy()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        lower_red = np.array([10, 153, 101])
        upper_red = np.array([179, 255, 255])

        mask = cv2.inRange(hsv, lower_red, upper_red)

        res = cv2.bitwise_and(frame, frame, mask=mask)

        gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
        gray = cv2.Canny(gray, 100, 200)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        gray = cv2.medianBlur(gray, 5)

        kernel = np.ones((2, 2), np.uint8)

        gray = cv2.dilate(gray, None, iterations=1)
        gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)

        kernel = np.ones((1, 1), np.uint8)
        gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel)

        circles = cv2.HoughCircles(gray,
                                   cv2.HOUGH_GRADIENT,
                                   0.9,
                                   200,
                                   param1=70,
                                   param2=45,
                                   minRadius=0,
                                   maxRadius=0)

        if circles is not None:
            circles = np.round(circles[0, :]).astype("int")
            for (x, y, r) in circles:
                cv2.circle(output, (x, y), r, (0, 255, 0), 4)
                cv2.circle(output, (x, y), 3, (0, 255, 255), -1)

                print("Column Number:{}".format(x))
                print("Row Number: {}".format(y))
                print("Radius is: {}".format(r))
                #dist(frame,[x,y])

        cv2.imshow('gray', gray)
        cv2.imshow('frame', output)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
コード例 #15
0
def hrtRate(vidP,vidT):
    #This method is used to count the total number of freames, it takes the path 
    #of the video as a parameter and iterates till the end of video
    def countFrames(videoPath):
        vid = cv2.VideoCapture(videoPath)
        nof = 0
        while True:
            ret, frame = vid.read()
            if not ret:
                break
            nof=nof+1
            
        vid.release()
        return nof
    #This method is used to make subplots
    def make_plot(axs, tFrame, data, j, s):
        plt.figure(num=None, figsize=(12, 8), dpi=1200, facecolor='w', edgecolor='k')
        axs[j].plot(tFrame,data)
        axs[j].set_title(s)

    '''
    Variables are declares here
    '''
    #Storing the path of the video
    vidPath = vidP
    #Creation of an object that stores the video file in form of multidimensional
    #matrix
    vidObj = cv2.VideoCapture(vidPath)
    #Calculation the total number of frames in the video
    nFrames = int(countFrames(vidPath))   
    #Calculation the frames per second of the video
    fps = int(vidObj.get(cv2.CAP_PROP_FPS))
    if fps == 31 or fps == 61:
        fps=fps-1
    #This array is also intialized to zero which will be used to store the time 
    #period
    #tFrame = np.zeros(nFrames)
    #User input for total video time
    vidTime = int(vidT)
    print(vidTime)
    #Total number of frames with respect to vidTime
    totalLen = int(vidTime * (fps + 1))


    newVidPath = 'trim.mp4'
    fileExists = os.path.exists('trim.mp4')
    if(fileExists):
        os.remove('trim.mp4')

    ffmpeg_extract_subclip(vidPath, 0, 0 + vidTime,targetname = 'trim.mp4')

    newVidObj = cv2.VideoCapture('trim.mp4')

    """
    if(nFrames < totalLen):
        print("Invalid video input")
        print("Error: Video Length too small")
        sys.exit()
    """
    i = 0

    nFrames = int(countFrames(newVidPath))
    gData = np.zeros(nFrames)
    fps = int(newVidObj.get(cv2.CAP_PROP_FPS) + 1)
    if fps == 31 or fps == 61:
        fps=fps-1
    tFrame = np.zeros(nFrames)

    for i in range(nFrames):
        tFrame[i] = (i+1) / fps

    #Fail check in case of corrupted video file
    if(newVidObj.isOpened()==False):
        print("Error opening the video file.")
        sys.exit()

    i=0
    #Converting the video input to grayscaled, which is stored in gData in form of 
    #2d matrix
    while(True):
        ret, frame = newVidObj.read()
        if not ret:
            break
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        gData[i] = np.sum(gray)
        i+=1

    """
    RISKY RECTIFICATION ATTEMPT
    """

    rectifiedGraph = gData

    #For first 0.5 seconds if the fps is lets say 30 then the number of frames in
    #05 seconds will  be 0.5 x 30 = 15 frames

    framesPerBatch = int(0.5 * fps)
    min_var = sys.maxsize * 2 + 1
    max_var = -min_var - 1
    k=0
    while k < framesPerBatch:
        if rectifiedGraph[k] >= max_var:
            max_var = rectifiedGraph[k]
        if rectifiedGraph[k] <= min_var:
            min_var = rectifiedGraph[k]
        k = k + 1

    median = int((max_var + min_var)/2)
    deviation = 0
    i=0
    j=0
    k=0
    rGraph = np.zeros(nFrames)
    while i < nFrames:
        min_var = sys.maxsize * 2 + 1
        max_var = -min_var - 1
        k=i
        while k < i + framesPerBatch:
            if k >= nFrames:
                break
            if rectifiedGraph[k] >= max_var:
                max_var = rectifiedGraph[k]
            if rectifiedGraph[k] <= min_var:
                min_var = rectifiedGraph[k]
            k = k + 1
        batch_median = int((max_var + min_var)/2)
        deviation = int(median - batch_median)
        while j < i + framesPerBatch:
            if j >= nFrames:
                break
            rGraph[j] = rectifiedGraph[j] + deviation
            j = j + 1
        i = i + framesPerBatch
            


    """
    RISKY RECTIFICATION ATTEMPT
    """

    rGraph[nFrames - 1] = rGraph[nFrames - 2]

    #Filtering the data with median filter to remove unwanted noise with a factor 
    #of 5
    fData = medfilt(rGraph,5)

    #A threshold is created to recify the plot
    g_max = np.amax(fData)
    g_min = np.amin(fData)
    offset = int(g_max - g_min)
    threshold = (g_min) + (0.55*offset)

    #The threshold is used to rectify the plot into a square wave
    sqData = medfilt(rGraph,5)
    i=0
    for i in range(nFrames):
        if sqData[i] <= threshold:
            sqData[i] = 1
        else:
            sqData[i] = 0

    #Now the number of peaks is calculated which is done by calculating number of 
    #changes in the square wave and dividing it by 2
    c=0
    i=1
    prev = sqData[0]
    for i in range(nFrames):
        if sqData[i] != prev:
            c = c+1
            prev = sqData[i]
        else:
            prev = sqData[i]


    #Finally the wave is plotted
    #plt.title("HR Graph")
    #plt.xlabel("xlabel")
    #plt.ylabel("ylabel")
    #plt.plot(tFrame,sqData)

    #Creating a pane for plotting 4 different graphs
    fig, axs = plt.subplots(4)
    fig.tight_layout(h_pad = 2)
    fig.suptitle('Heart Rate')
    plt.subplots_adjust(top=0.85)
    make_plot(axs, tFrame, gData,0, "First Plot")
    make_plot(axs, tFrame, rGraph, 1, "Corrected Plot")
    make_plot(axs, tFrame, fData, 2, "Filtered Plot")
    make_plot(axs, tFrame, sqData, 3, "Square Plot")

    fileExists = os.path.exists('static\\out.png')
    if(fileExists):
        os.remove('static\\out.png')

    fig.savefig('static\\out.png',dpi = 1200)

    #Since the video taken is of 10 seconds the counter is multiplied by a factor 
    #of 6 in order to get heart beats per minute
    multiplying_factor = int(60 / vidTime)
    hrm = math.ceil(c/2) * multiplying_factor
    print(hrm)




    #Resources is being freed here
    vidObj.release()
    newVidObj.release()
    cv2.destroyAllWindows()

    fileExists = os.path.exists('trim.mp4')
    if(fileExists):
        os.remove('trim.mp4')

    return hrm
コード例 #16
0
def main():

    try:
        drone.connect()  #tello connection
        drone.wait_for_connection(60.0)

        drone.takeoff()
        time.sleep(5)

        while True:

            AAA = OpenCV()
            S = AAA[0]
            c1 = AAA[1]
            c2 = AAA[2]
            p0 = AAA[3]
            cx = AAA[4]
            cy = AAA[5]

            if S <= 1500:
                if cx - 200 > c1:
                    dir = 1
                elif cx + 200 < c1:
                    dir = 2
                else:
                    if cy - 200 > c2:
                        dir = 3
                    elif cy + 200 < c2:
                        dir = 4
                    else:
                        dir = 5
            elif 1500 < S <= 5000:
                if cx - 200 > c1:
                    dir = 1
                elif cx + 200 < c1:
                    dir = 2
                else:
                    if cy - 100 > c2:
                        dir = 3
                    elif cy + 100 < c2:
                        dir = 4
                    else:
                        dir = 5
            elif 5000 < S <= 10000:
                if cx - 100 > c1:
                    dir = 1
                elif cx + 100 < c1:
                    dir = 2
                else:
                    if cy - 100 > c2:
                        dir = 3
                    elif cy + 100 < c2:
                        dir = 4
                    else:
                        dir = 5
            elif 10000 < S <= 30000:
                if cx - 100 > c1:
                    dir = 1
                elif cx + 100 < c1:
                    dir = 2
                else:
                    if cy - 100 > c2:
                        dir = 3
                    elif cy + 100 < c2:
                        dir = 4
                    else:
                        dir = 7
            #elif 30000 < S < 50000:
            #  if cx - 80 > c1:
            #    dir = 1
            #  elif cx + 80 < c1:
            #    dir = 2
            #  else:
            #    if cy - 80 > c2 :
            #      dir = 3
            #    elif cy + 80 < c2:
            #      dir = 4
            #    else:
            #      dir = 7

            if dir == 1:
                drone.left(5)
                time.sleep(3)
                drone.left(0)
                time.sleep(2)
            elif dir == 2:
                drone.right(5)
                time.sleep(3)
                drone.right(0)
                time.sleep(2)
            elif dir == 3:
                drone.up(10)
                time.sleep(3)
                drone.up(0)
                time.sleep(3)
            elif dir == 4:
                drone.down(10)
                time.sleep(3)
                drone.down(0)
                time.sleep(3)
            elif dir == 5:
                drone.forward(10)
                time.sleep(3)
                drone.forward(0)
                time.sleep(3)
            elif dir == 6:
                drone.forward(5)
                time.sleep(3)
                drone.forward(0)
                time.sleep(3)
            elif dir == 7:
                drone.down(30)
                time.sleep(10)
                drone.down(0)
                time.sleep(20)
                drone.up(20)
                time.sleep(10)
                drone.up(0)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()  #実況中の作業の座標提示
        traceback.print_exception(exc_type, exc_value,
                                  exc_traceback)  #実行過程でのstuck frameの記録をプリント
        print(ex)
    finally:
        drone.land()
        drone.quit()
        cv2.destroyAllWindows()
コード例 #17
0
    def camera_loop(self):
        global fig, ax
        global cameraOpen

        neuralModel = NeuralNetworkModel()

        neuralModelPickle = open("ModelStorage/nnlivetest.pickle", "rb")

        neuralModelDict = pickle.load(neuralModelPickle)

        video_capture = cv2.VideoCapture(0)

        frame_count = 0
        openness_average = []
        openness_min = 1
        openness_max = 0

        extraversion_average = []
        extraversion_min = 1
        extraversion_max = 0

        neuroticism_average = []
        neuroticism_min = 1
        neuroticism_max = 0

        agreeableness_average = []
        agreeableness_min = 1
        agreeableness_max = 0

        conscientiousness_average = []
        conscientiousness_min = 1
        conscientiousness_max = 0

        while cameraOpen:
            ret, frame = video_capture.read()

            font = cv2.FONT_HERSHEY_SIMPLEX
            color = (255, 255, 255)

            valueUpdated = False

            if frame_count % 15 is 0:
                temp_openness, temp_extraversion, temp_neuroticism, temp_agreeableness, temp_conscientiousness = neuralModel.predict_single_frame(
                    frame, neuralModelDict)

                if temp_openness["average"] is not -1:
                    openness_average.append(temp_openness["average"])
                    if temp_openness["min"] < openness_min:
                        openness_min = temp_openness["min"]
                    if temp_openness["max"] > openness_max:
                        openness_max = temp_openness["max"]

                    valueUpdated = True

                if temp_extraversion["average"] is not -1:
                    extraversion_average.append(temp_extraversion["average"])
                    if temp_extraversion["min"] < extraversion_min:
                        extraversion_min = temp_extraversion["min"]
                    if temp_extraversion["max"] > extraversion_max:
                        extraversion_max = temp_extraversion["max"]

                    valueUpdated = True

                if temp_agreeableness["average"] is not -1:
                    agreeableness_average.append(temp_agreeableness["average"])
                    if temp_agreeableness["min"] < agreeableness_min:
                        agreeableness_min = temp_agreeableness["min"]
                    if temp_agreeableness["max"] > agreeableness_max:
                        agreeableness_max = temp_agreeableness["max"]

                    valueUpdated = True

                if temp_conscientiousness["average"] is not -1:
                    conscientiousness_average.append(
                        temp_conscientiousness["average"])
                    if temp_conscientiousness["min"] < conscientiousness_min:
                        conscientiousness_min = temp_conscientiousness["min"]
                    if temp_conscientiousness["max"] > conscientiousness_max:
                        conscientiousness_max = temp_conscientiousness["max"]

                    valueUpdated = True

                if temp_neuroticism["average"] is not -1:
                    neuroticism_average.append(temp_neuroticism["average"])
                    if temp_neuroticism["min"] < neuroticism_min:
                        neuroticism_min = temp_neuroticism["min"]
                    if temp_neuroticism["max"] > neuroticism_max:
                        neuroticism_max = temp_neuroticism["max"]

                    valueUpdated = True

                font = cv2.FONT_HERSHEY_SIMPLEX

            if valueUpdated:
                raw_data = {
                    'trait_name': [
                        'Openness', 'Extraversion', 'Agreeableness',
                        'Neuroticism', 'Conscientiousness'
                    ],
                    'avg': [
                        np.average(openness_average),
                        np.average(extraversion_average),
                        np.average(agreeableness_average),
                        np.average(neuroticism_average),
                        np.average(conscientiousness_average)
                    ],
                    'min': [
                        openness_min, extraversion_min, agreeableness_min,
                        neuroticism_min, conscientiousness_min
                    ],
                    'max': [
                        openness_max, extraversion_max, agreeableness_max,
                        neuroticism_max, conscientiousness_max
                    ]
                }
                df = pd.DataFrame(raw_data,
                                  columns=['trait_name', 'avg', 'min', 'max'])
                graphThread = threading.Thread(target=self.update_graph,
                                               args=(df, ))
                graphThread.start()

                self.Otable.setItem(
                    0, 0,
                    QtWidgets.QTableWidgetItem("{0:.4f}".format(
                        np.average(openness_average))))
                self.Otable.setItem(
                    1, 0,
                    QtWidgets.QTableWidgetItem("{0:.4f}".format(openness_min)))
                self.Otable.setItem(
                    2, 0,
                    QtWidgets.QTableWidgetItem("{0:.4f}".format(openness_max)))

                self.Atable.setItem(
                    0, 0,
                    QtWidgets.QTableWidgetItem("{0:.4f}".format(
                        np.average(agreeableness_average))))
                self.Atable.setItem(
                    1, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(agreeableness_min)))
                self.Atable.setItem(
                    2, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(agreeableness_max)))

                self.Etable.setItem(
                    0, 0,
                    QtWidgets.QTableWidgetItem("{0:.4f}".format(
                        np.average(extraversion_average))))
                self.Etable.setItem(
                    1, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(extraversion_min)))
                self.Etable.setItem(
                    2, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(extraversion_max)))

                self.Ctable.setItem(
                    0, 0,
                    QtWidgets.QTableWidgetItem("{0:.4f}".format(
                        np.average(conscientiousness_average))))
                self.Ctable.setItem(
                    1, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(conscientiousness_min)))
                self.Ctable.setItem(
                    2, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(conscientiousness_max)))

                self.Ntable.setItem(
                    0, 0,
                    QtWidgets.QTableWidgetItem("{0:.4f}".format(
                        np.average(neuroticism_average))))
                self.Ntable.setItem(
                    1, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(neuroticism_min)))
                self.Ntable.setItem(
                    2, 0,
                    QtWidgets.QTableWidgetItem(
                        "{0:.4f}".format(neuroticism_max)))

            cv2.putText(
                frame, "Openness (Avg) = {0:.4f}".format(
                    np.average(openness_average)), (10, 20), font, 0.45, color)
            cv2.putText(
                frame, "Extraversion (Avg) = {0:.4f}".format(
                    np.average(extraversion_average)), (10, 40), font, 0.45,
                color)
            cv2.putText(
                frame, "Neuroticism (Avg) = {0:.4f}".format(
                    np.average(neuroticism_average)), (10, 60), font, 0.45,
                color)
            cv2.putText(
                frame, "Agreeableness (Avg) = {0:.4f}".format(
                    np.average(agreeableness_average)), (10, 80), font, 0.45,
                color)
            cv2.putText(
                frame, "Consentiousness (Avg)= {0:.4f}".format(
                    np.average(conscientiousness_average)), (10, 100), font,
                0.45, color)

            cv2.putText(frame, "Openness (Min) = {0:.4f}".format(openness_min),
                        (10, 140), font, 0.45, color)
            cv2.putText(
                frame, "Extraversion (Min) = {0:.4f}".format(extraversion_min),
                (10, 160), font, 0.45, color)
            cv2.putText(frame,
                        "Neuroticism (Min) = {0:.4f}".format(neuroticism_min),
                        (10, 180), font, 0.45, color)
            cv2.putText(
                frame,
                "Agreeableness (Min) = {0:.4f}".format(agreeableness_min),
                (10, 200), font, 0.45, color)
            cv2.putText(
                frame,
                "Consentiousness (Min)= {0:.4f}".format(conscientiousness_min),
                (10, 220), font, 0.45, color)

            cv2.putText(frame, "Openness (Max) = {0:.4f}".format(openness_max),
                        (10, 260), font, 0.45, color)
            cv2.putText(
                frame, "Extraversion (Max) = {0:.4f}".format(extraversion_max),
                (10, 280), font, 0.45, color)
            cv2.putText(frame,
                        "Neuroticism (Max) = {0:.4f}".format(neuroticism_max),
                        (10, 300), font, 0.45, color)
            cv2.putText(
                frame,
                "Agreeableness (Max) = {0:.4f}".format(agreeableness_max),
                (10, 320), font, 0.45, color)
            cv2.putText(
                frame,
                "Consentiousness (Max)= {0:.4f}".format(conscientiousness_max),
                (10, 340), font, 0.45, color)

            frame_count += 1
            cv2.imshow("output", frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        video_capture.release()
        cv2.destroyAllWindows()
        plt.close()
コード例 #18
0
def detect_text(path): # text detection function definition with "path" argument
    """Detects text in the file."""
    with io.open(path, 'rb') as image_file: # open and read image_file in binary
        content = image_file.read() # read and stores image_file content in content var

    image = types.Image(content=content) # image to be processed request
    response = client.text_detection(image=image) # if image is present, function returns detections
    texts = response.text_annotations # detections from image
    string = '' # initializes string variable

    for text in texts: # for all individual content within image detection
        string+=' ' + text.description # store new individual content with space 
    return string # return newest string

cap = cv2.VideoCapture(0) # stores captured webcam video, frame-by-frame

while(True):
    # for each captured frame
    ret, frame = cap.read() # ret = returns true if frame is availible, frame = image array vector
    file = 'live.png' # initialzes file to store a .png image 
    cv2.imwrite( file,frame) # read captured frame stored in file variable

    print(detect_text(file)) # calls detect text file defined above with new file path and prints string

    cv2.imshow('frame',frame) # names dispay frame and displays image array vector for cap
    if cv2.waitKey(1) & 0xFF == ord('q'): # frame waits 1 ms for keyboard even to equal keystroke q
        break # if q is pressed, breaks form loop

cap.release() # releases captured video
cv2.destroyAllWindows() # destory display
コード例 #19
0
ファイル: VideoMaker.py プロジェクト: wumh7/MKCF
def view_trackers():
    tracker_names = ['KF', 'EOTRM', 'EOTGGIW', 'KCF', 'ASKCF', 'MKL', 'MKCF']
    #colors[red, yellow, light blue, white, mud green, violite, light green]
    tracker_colors= [(0,0,255),(0,255,255), (239,200,148), (255,255,255), (24,115,84), (227,5,198), (97,241,115) ]
    target_names = ['ALice', 'Billy', 'Camen', 'Dolphin', 'Ellen']
    res_path = '/Users/yizhou/code/MKCF_MAC_Python3.6/results/'
    ResData = {}

    #loading target's bounding box '*_tbbs.txt' file for 5 targets in 7 trackers.
    for tracker_name in tracker_names:
        TrackerResData = {}
        for target_name in target_names:
            ttData = {}
            fname = res_path + 'Res_' + tracker_name + '/' + target_name + '_' + tracker_name + '_Tbbs.txt'
            ttData = uti.get_obj_dict(fname)
            TrackerResData.update({target_name: ttData})
        ResData.update({tracker_name: TrackerResData})
    print('size of the results data in bytes: %d'% ResData.__sizeof__())

    image_path = '/Users/yizhou/code/MKCF_MAC_Python3.6/sequences/gray/'
    video_path = '/Users/yizhou/code/MKCF_MAC_Python3.6/results/video/'
    # video = cv2.VideoWriter(video_path + '7trackers_5targets.mp4', cv2.VideoWriter_fourcc('a', 'v', 'c', '1'), 30,
    #                     (2048, 600))

    video = cv2.VideoWriter(video_path + '7trackers_5targets.mp4', cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 30,
                            (2048, 600))
    img_files = os.listdir(image_path)
    img_files.sort()
    for file in img_files:
        img_file = os.path.join(image_path, file)
        if img_file.endswith('.png'):
            img = cv2.imread(img_file)

            id = int(file.split('.')[0])
            if id > 409:
                break
            frame_id = 'frame %d' % id
            for i,tracker_name in enumerate(tracker_names):
                cv2.putText(img, frame_id, org=(20, 50),
                            fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=1, color=(245,255,118),#show the tracker's color in rectangle and label.
                            thickness=2, lineType=cv2.LINE_AA)
                img = cv2.rectangle(img, (30, 60+i*40), (80, 90+i*40), tracker_colors[i], 2)
                cv2.putText(img, tracker_name, org=(80, 90+i*40),
                            fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=tracker_colors[i],
                            thickness=2, lineType=cv2.LINE_AA)

                for target_name in target_names:
                    if frame_id in  ResData[tracker_name][target_name]:
                        tbb = ResData[tracker_name][target_name][frame_id]['BoundingBox']
                        if len(tbb)>0:
                            tp  = (tbb[0], tbb[1])
                            br  = (int(tbb[0]+tbb[2]), int(tbb[1]+tbb[3]))
                            img = cv2.rectangle(img, tp, br, tracker_colors[i], 2)
                            if(tracker_name == 'MKCF'):
                                cv2.putText(img, target_name, org=tp,
                                            fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(245,255,118),
                                            thickness=2, lineType=cv2.LINE_AA)

            video.write(img)
            cv2.imshow("Image", img)
            cv2.waitKey(1)
    video.release()
    cv2.destroyAllWindows()
コード例 #20
0
def display_image(img, label="image"):
    cv2.imshow(str(label), img.copy())  # Display the image
    cv2.waitKey(
        0)  # Wait for any key to be pressed (with the image window active)
    cv2.destroyAllWindows()  # Close all windows
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        # drone.startControlCommand()
        # drone.takeoffsimplecontrol()
        # drone.takeoff()
        # sleep(3)
        # drone.land()
        # sleep(3)
        drone.set_video_encoder_rate(1)
        container = av.open(drone.get_video_stream())
        print('Start Video Stream**********************************')
        # skip first 10 frames

        frame_skip = 10
        count_frame = 10
        flags = numpy.zeros((1, 4))
        pastidx = None  # a var to store info of indx, used in one person ver. to make same movement
        actor = None  # a var to identify which user gets the control of tello

        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                # start_time = time.time()
                interupt = cv2.waitKey(10)  # 10s to read keys? roll call?
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                keypoints, output_image = openpose.forward(image, True)
                # keypoints is a matrix filled in multi-person data
                # format:[[p1][p2][p3]]

                cv2.imshow("output", output_image)
                # print('get keypoint!')
                # print(keypoints)

                # once test input is not the 7 poses, return idx=6 ?
                # is it because the dist_all<0.7 is too general?
                # solution: modify idx when dist_all>0.7 (multi-person version)

                # for one person data matrix, size=3*25=75
                if 40 < numpy.size(
                        keypoints
                ) < 76:  # ensure that it is a valid data for one person
                    # implement knn
                    (idx, dist_all) = kNNtest.implement_kNN(keypoints)
                    print('One-Person mode')
                    actor = 0
                    # set actor as 0 in one person ver., if next frame is multi-person, we don't know who gets the control
                    # this setting is due to actor cannot be none for logic comparasion in the multi-person actor change stage

                    # print(dist_all)
                    if dist_all[0] < 0.7:
                        print('*****       Pose_Idx=%d       *****' % idx)

                        # if the idx is not the same, change idx
                        # if the idx is the same, do the same movement as the past idx indicates
                        # if the idx of the pose cannot be recgonized, the drone will still move as the pastidx (save energy for actor)
                        if idx != pastidx:
                            pastidx = idx
                            print('pose idx has changed to %d' % (idx))
                        idx2pose(drone, pastidx)

                # for multi-person data matrices, size=n*(3*25)
                if numpy.size(keypoints) > 76:
                    print('multi-person mode')
                    person = len(
                        keypoints
                    )  # a var used in person number changed between frames
                    idx_list = [
                    ]  # a list to store idx of all the person in one frame

                    kp = dict()
                    # apply knn to all the person matrices one by one
                    for i in range(0, len(keypoints)):
                        a = []
                        a.append(keypoints[i])
                        print('seperate kp')
                        name = 'kp{}'.format(i)
                        kp[name] = array(a)

                        # ensure the points are enough for analysis
                        if 40 < numpy.size(kp[name]) < 76:
                            (idx, dist_all) = kNNtest.implement_kNN(kp[name])
                            print('idx, dist done')

                            # if the pose of the person cannot be matched with poseidx 0-6, then idx = none
                            if dist_all[0] > 0.7:
                                idx = None

                            # store the idx only for matrices with enough points
                            idx_list += [idx]

                    print('index list of multi-person:')
                    print(idx_list)

                    # this part is the assignment of actor in multi-person mode
                    # in one person mode, actor = 0 as default

                    # this part is the situation when the plane has not been taken off yet, so actor = none
                    if actor == None:
                        print('Actor is None in multi-person mode')
                        # the person who let the plane take off is assigned as the actor
                        if 2 in idx_list:
                            actor = idx_list.index(2)
                            idx = idx_list[actor]
                            print('take off in multi-person mode by actor:',
                                  actor)
                            idx2pose(drone, idx)
                            print('take off in multi-person mode done')

                    # this part is entered when the plane:
                    # 1/ takes off in multi-person mode
                    # 2/ takes off in one-person mode
                    elif actor != None:
                        print('Actor is not None')
                        # what if in the first frame, person=4, and actor=4 (list index=3)
                        # in the next frame, person = 3, actor idx does not changed, list = [0,1,2] act = 3, out of range
                        # base stage:
                        # if person = 3, p-1 = 2, actor = 2 is out of range
                        if person >= 3:
                            if actor >= (person - 1):
                                actor = 0
                                print('actor overflow, changed to 0')
                                # actor = 0 is still dangerous, need to be tested
                        if 4 in idx_list:
                            actor = idx_list.index(4)
                            # actor is set to be the first idx of 4 in the list(due to function ofo index)
                            # need to improve: ensure which is should be the actor
                            # why the list is full of 4? is it a bug?
                            print(
                                'actor has changed to the person who did pose 4'
                            )

                        print(
                            'ready to get the idx in [multi-person] actor mode'
                        )
                        idx = idx_list[actor]
                        print('actor has set the idx to:', idx)
                        idx2pose(drone, idx)
                        print('actor is :', actor, 'pose is:', idx)

                    # print('ready to do pose!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
                    # idx2pose(drone, idx)
                    # print('do action in multi-person!!!!!!!!!!!!!!!!!!!!!!!')

                elif interupt & 0xFF == ord('q'):
                    cv2.destroyWindow(output_image)
                    drone.land()

                elif numpy.size(
                        keypoints
                ) == 0:  ##if UAV can't find any person,turn around until detect one person
                    drone.clockwisesimplecontrol(20)

                    # drone.quitsimplecontrol()
                    # sleep(1)
                # if interupt & 0xFF == ord('l'):
                #     drone.land()
                #     sleep(2)
                # if interupt & 0xFF == ord('w'):
                #     drone.forwardsimplecontrol(20)
                #     # sleep(1)
                # if interupt & 0xFF == ord('s'):
                #     drone.backwardsimplecontrol(20)
                #     sleep(1)
                # if interupt & 0xFF == ord('a'):
                #     drone.leftsimplecontrol(20)
                #     sleep(1)
                # if interupt & 0xFF == ord('d'):
                #     drone.rightsimplecontrol(20)
                #     sleep(1)
                # if interupt & 0xFF == ord('z'):
                #     drone.clockwisesimplecontrol(20)
                #     sleep(1)
                # if interupt & 0xFF == ord('c'):
                #     drone.flip_rightsimplecontrol()
                #     sleep(1)
                # if interupt & 0xFF == ord('t'):
                #     drone.takeoff()
                #     sleep(2)
                # if interupt & 0xFF == ord('u'):
                #     drone.upsimplecontrol(20)
                #     sleep(1)
                # if interupt & 0xFF == ord('n'):
                #     drone.downsimplecontrol(20)
                #     sleep(1)
                # if interupt & 0xFF == ord('v'):
                #     drone.contourclockwisesimplecontrol(20)
                #     sleep(1)
                # if interupt & 0xFF == ord('b'):
                #     drone.flip_leftsimplecontrol()
                #     sleep(1)
                count_frame = 10
                flags = numpy.zeros(
                    (1, 4))  # initial count of each gesture are all 0
                # print('*****       count_frame=%d       *****' % count_frame)
                # frame_skip = int((time.time() - start_time) / frame.time_base)
                frame_skip = 20

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
コード例 #22
0
def cv_show(name, img):
    cv.imshow(name, img)
    k = cv.waitKey(0)
    if k == 27:
        cv.destroyAllWindows()
コード例 #23
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        #drone.startControlCommand()
        #drone.takeoffsimplecontrol()
        #drone.takeoff()
        # sleep(3)
        #drone.land()
        # sleep(3)
        drone.set_video_encoder_rate(1)
        container = av.open(drone.get_video_stream())
        print('Start Video Stream**********************************')
        # skip first 300 frames
        frame_skip = 300

        while True:
            for frame in container.decode(video=0):

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()

                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                #cv2.imshow('Original', image)

                keypoints, output_image = openpose.forward(image, True)
                cv2.imshow("output", output_image)
                # cv2.waitKey(1)
                if numpy.size(keypoints) > 1:
                    angle = whatPosition(keypoints)
                    print(str(angle) + '**************************')
                # if numpy.size(keypoints)>1:
                #     a = whatPosition(keypoints)
                #     if a=='l':
                #         print('hello***********************************************')
            #          drone.land()
            #          sleep(2)
            #
                waitkey_num = cv2.waitKeyEx()
                # cv2.imshow('Canny', cv2.Canny(image, 100, 200))

                if waitkey_num == ord('q'):
                    cv2.destroyWindow(output_image)
                    drone.land()
                    # drone.quitsimplecontrol()
                    sleep(1)
                if waitkey_num == ord('l'):
                    drone.land()
                    sleep(2)
                if waitkey_num == ord('w'):
                    drone.forwardsimplecontrol(20)
                    # sleep(1)
                if waitkey_num == ord('s'):
                    drone.backwardsimplecontrol(20)
                    sleep(1)
                if waitkey_num == ord('a'):
                    drone.leftsimplecontrol(20)
                    sleep(1)
                if waitkey_num == ord('d'):
                    drone.rightsimplecontrol(20)
                    sleep(1)
                if waitkey_num == ord('z'):
                    drone.clockwisesimplecontrol(20)
                    sleep(1)
                if waitkey_num == ord('c'):
                    drone.flip_rightsimplecontrol()
                    sleep(1)
                if waitkey_num == ord('t'):
                    drone.takeoffsimplecontrol()
                    sleep(2)
                if waitkey_num == ord('u'):
                    drone.upsimplecontrol(20)
                    sleep(1)
                if waitkey_num == ord('n'):
                    drone.downsimplecontrol(20)
                    sleep(1)
                if waitkey_num == ord('v'):
                    drone.contourclockwisesimplecontrol(20)
                    sleep(1)
                frame_skip = int((time.time() - start_time) / frame.time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
コード例 #24
0
def main():
    try:
        drone.connect()  #tello connection

        drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
        drone.subscribe(drone.EVENT_LOG_DATA, handler)

        drone.wait_for_connection(60.0)

        drone.takeoff()
        time.sleep(5)

        while True:  #not need

            AAA = OpenCV()
            S = AAA[0]
            c1 = AAA[1]
            c2 = AAA[2]
            p0 = AAA[3]
            cx = AAA[4]
            cy = AAA[5]

            a = str(flight_data)

            print(a)

            with open("height protocol 2021-4-28 -1 .txt", "a") as f:
                result = "{:s}\n".format(a)
                f.write(result)

            if S <= 1500:
                if cx - 200 > c1:
                    dir = 1
                elif cx + 200 < c1:
                    dir = 2
                else:
                    if cy - 200 > c2:
                        dir = 3
                    elif cy + 200 < c2:
                        dir = 4
                    else:
                        dir = 5
            elif 1500 < S <= 5000:
                if cx - 200 > c1:
                    dir = 1
                elif cx + 200 < c1:
                    dir = 2
                else:
                    if cy - 100 > c2:
                        dir = 3
                    elif cy + 100 < c2:
                        dir = 4
                    else:
                        dir = 5
            elif 5000 < S <= 10000:
                if cx - 100 > c1:
                    dir = 1
                elif cx + 100 < c1:
                    dir = 2
                else:
                    if cy - 100 > c2:
                        dir = 3
                    elif cy + 100 < c2:
                        dir = 4
                    else:
                        dir = 5
            elif 10000 < S <= 30000:
                if cx - 100 > c1:
                    dir = 1
                elif cx + 100 < c1:
                    dir = 2
                else:
                    if cy - 100 > c2:
                        dir = 3
                    elif cy + 100 < c2:
                        dir = 4
                    else:
                        dir = 7
            if dir == 1:
                drone.left(5)
                time.sleep(3)
                drone.left(0)
                time.sleep(2)
            elif dir == 2:
                drone.right(5)
                time.sleep(3)
                drone.right(0)
                time.sleep(2)
            elif dir == 3:
                drone.up(10)
                time.sleep(3)
                drone.up(0)
                time.sleep(3)
            elif dir == 4:
                drone.down(10)
                time.sleep(3)
                drone.down(0)
                time.sleep(3)
            elif dir == 5:
                drone.forward(10)
                time.sleep(3)
                drone.forward(0)
                time.sleep(3)
            elif dir == 6:
                drone.forward(5)
                time.sleep(3)
                drone.forward(0)
                time.sleep(3)
            elif dir == 7:
                drone.down(30)
                time.sleep(10)
                drone.down(0)
                time.sleep(20)
                drone.up(20)
                time.sleep(10)
                drone.up(0)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info(
        )  # zikkoutyuuno sagyou no zyouhou teizi
        traceback.print_exception(
            exc_type, exc_value,
            exc_traceback)  # zikkoukatei deno stuck frame no kiroku wo print
        print(ex)
    finally:
        drone.land()
        drone.quit()
        cv2.destroyAllWindows()
コード例 #25
0
    def loop_forever(self):
        firstFrame = None
        notDetectedCounter = self.border_notDetected
        frameCounter = 0
        motionDetected = False

        while True:
            if (self.capture.isOpened()):
                ret, frame = self.capture.read()
                frameCounter += 1
                # text = "Unoccupied"
                if ret == True:

                    # resize the frame, convert it to grayscale, and blur it
                    scaledFrame = imutils.resize(frame, width=500)
                    gray = cv2.cvtColor(scaledFrame, cv2.COLOR_BGR2GRAY)
                    gray = cv2.GaussianBlur(gray, (21, 21), 0)

                    if firstFrame is None:
                        firstFrame = gray
                        continue

                    # compute the absolute difference between the current frame and
                    # first frame
                    frameDelta = cv2.absdiff(firstFrame, gray)
                    thresh = cv2.threshold(frameDelta, 25, 255,
                                           cv2.THRESH_BINARY)[1]
                    # dilate the thresholded image to fill in holes, then find contours
                    # on thresholded image
                    thresh = cv2.dilate(thresh, None, iterations=2)
                    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)
                    cnts = imutils.grab_contours(cnts)
                    motionDetected = False
                    for c in cnts:
                        if cv2.contourArea(c) > self.border_contourArea:
                            motionDetected = True
                            notDetectedCounter = 0

                    if motionDetected or notDetectedCounter < self.border_notDetected:
                        #print("schreibe Datei")
                        # write frame to output file
                        if not self.writer.isOpened():
                            # setup file writer if is not opened
                            file_path = self.generateFilePath()
                            frame_width = int(self.capture.get(3))
                            frame_height = int(self.capture.get(4))

                            fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
                            self.writer = cv2.VideoWriter(
                                file_path, fourcc, self.fps,
                                (frame_width, frame_height))
                        self.writer.write(frame)
                        notDetectedCounter += 1
                    else:
                        if self.writer.isOpened():
                            self.writer.release()

                        if frameCounter >= self.border_resetFirstFrame:
                            firstFrame = gray
                            frameCounter = 0
                            print("reset")
                    """
                    # loop over the contours
                    for c in cnts:
                        # if the contour is too small, ignore it
                        if cv2.contourArea(c) < 500:#args["min_area"]:
                            continue
                        # compute the bounding box for the contour, draw it on the frame,
                        # and update the text
                        (x, y, w, h) = cv2.boundingRect(c)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                        text = "Occupied"

                   # draw the text and timestamp on the frame
                    cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                    cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                        (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
                    # show the frame and record if the user presses a key
                    #"""

                    # Bilder anzeigen

                    cv2.imshow("Security Feed", gray)
                    cv2.imshow("Thresh", thresh)
                    cv2.imshow("Frame Delta", frameDelta)

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

                    #"""
                    # print(counter_frames)
                    # recording completed
                    #if counter_frames >= self.videolength_frames:
                    #logging.info("Recording completed.")
                    #self.mqtt.sendProcessMessage(self.user_name, self.mqtt.info_list[self.module_name]["RecordedFile"],file=self.file_name)
                    #    break

                else:
                    if not self.capture.isOpened():
                        #self.mqtt.sendProcessMessage(self.user_name, self.mqtt.info_list[self.module_name]["RecordLostConnection"],file=self.file_name)
                        #logging.error("Lost connection to camera.")
                        pass
                    #self.mqtt.sendProcessMessage(self.user_name, self.mqtt.info_list[self.module_name]["RecordFileError"],file=self.file_name)
                    #logging.error("Can not read from VideoCapture.")
                    break
        cv2.destroyAllWindows()
コード例 #26
0
def main():
    global star
    global cascPath
    posx = 0
    counter = 0
    drone = tellopy.Tello()
    counter1 = 0
    step = 200  #Seed Value, Dont Care :3
    step1 = 200
    stop = 0
    flag1 = True

    while star == 1:
        cc = str(ser.readline())
        if cc[2:][:-5] == "Calling Drone":
            print(cc[2:][:-5])
            star = 2
            break

    try:
        #Start Protocol
        drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
        drone.connect()
        drone.wait_for_connection(60.0)
        container = av.open(drone.get_video_stream())
        frame_skip = 300
        xdis = 200
        ydis = 150
        drone.takeoff()
        sleep(5)
        drone.up(18)
        sleep(5)
        drone.up(0)
        sleep(1)
        countface = 0
        # End Start Protocol

        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                #imagep= cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_BGR2GRAY)

                faces = faceCascade.detectMultiScale(
                    image,
                    scaleFactor=1.7,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags=cv2.CASCADE_SCALE_IMAGE)

                if (len(faces) == 0):
                    countface = 0
                    if (step != 0):
                        if (step == 1):
                            drone.backward(0)
                        elif (step == 3):
                            drone.up(0)
                        elif (step == 4):
                            drone.down(0)
                        elif (step == 5):
                            drone.left(0)
                        elif (step == 6):
                            drone.right(0)
                        elif (step == 7):
                            drone.up(0)
                            drone.right(0)
                        elif (step == 8):
                            drone.up(0)
                            drone.left(0)
                        elif (step == 9):
                            drone.down(0)
                            drone.right(0)
                        elif (step == 10):
                            drone.down(0)
                            drone.left(0)
                        elif (step == 11):
                            drone.forward(0)
                        else:
                            ...  #Nothing
                    step = 0
                    counter1 += 1
                    if counter1 >= 10 and flag1 == True:
                        flag1 = False
                        if posx < 430:
                            drone.counter_clockwise(0)
                        else:
                            drone.clockwise(0)
                else:
                    flag1 = True
                    counter1 = 0
                    for (x, y, w, h) in faces:
                        cv2.rectangle(image, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                        place = image[y:y + h, x:x + w]

                        #Save last X position in Memory
                        if (countface > 3):
                            posx = (x + (w / 2))
                        else:
                            countface += 1

                        signal = signalCascade.detectMultiScale(
                            image[y:y + h, x:x + w],
                            scaleFactor=3.5,
                            minNeighbors=20,
                            minSize=(20, 20),
                            flags=cv2.CASCADE_SCALE_IMAGE)

                        for (x, y, w, h) in signal:
                            cv2.rectangle(place, (x, y), (x + w, y + h),
                                          (255, 255, 0), 2)

                            if (counter == 10):
                                ...
                                #raise ValueError('Close Connection')
                                #break
                            elif (len(signal) != 0):
                                counter += 1
                            else:
                                counter = 0

                        if (w * h > 60000 and step != 1):
                            if (step == 0):
                                drone.clockwise(0)
                            elif (step == 1):
                                ...
                                #drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                                drone.counter_clockwise(10)
                                cascPath = cascPathStop
                                stop = 1
                            step = 1

                        elif (w * h > 40000 and step != 2):

                            if (step != 2):
                                if (step == 0):
                                    drone.counter_clockwise(0)
                                    drone.clockwise(0)
                                elif (step == 1):
                                    drone.backward(0)
                                elif (step == 3):
                                    drone.up(0)
                                elif (step == 4):
                                    drone.down(0)
                                elif (step == 5):
                                    drone.left(0)
                                elif (step == 6):
                                    drone.right(0)
                                elif (step == 7):
                                    drone.down(0)
                                    drone.right(0)
                                elif (step == 8):
                                    drone.down(0)
                                    drone.left(0)
                                elif (step == 9):
                                    drone.up(0)
                                    drone.right(0)
                                elif (step == 10):
                                    drone.up(0)
                                    drone.left(0)
                                elif (step == 11):
                                    drone.forward(0)
                                else:
                                    if (stop == 1):
                                        raise ValueError('Close Connection')
                                        break

                            if (y < ydis and step1 != 1 and x > xdis
                                    and (960 - x - w) > xdis):
                                step1 = 1
                                drone.up(12)
                                print("Up Stable")
                            elif ((720 - y - h) < ydis and step1 != 2
                                  and x > xdis and (960 - x - w) > xdis):
                                step1 = 2
                                drone.down(12)
                                print("Down Stable")
                            elif (x < xdis and step1 != 3
                                  and (720 - y - h) > ydis and y > ydis):
                                step1 = 3
                                drone.left(12)
                                print("Left Stable")
                            elif ((960 - x - w) < xdis and step1 != 4
                                  and (720 - y - h) > ydis and y > ydis):
                                step1 = 4
                                drone.right(12)
                                print("Right Stable")
                            elif (y < ydis and step1 != 5 and x > xdis
                                  and (960 - x - w) < xdis):
                                step1 = 5
                                drone.right(12)
                                drone.up(12)
                                print("Right Up Stable")
                            elif ((720 - y - h) < ydis and step1 != 6
                                  and x > xdis and (960 - x - w) < xdis):
                                step1 = 6
                                drone.right(12)
                                drone.down(12)
                                print("Right Down Stable")
                            elif (y < ydis and step1 != 7 and x < xdis
                                  and (960 - x - w) > xdis):
                                step1 = 7
                                drone.left(12)
                                drone.up(12)
                                print("Left Up Stable")
                            elif ((720 - y - h) < ydis and step1 != 8
                                  and x < xdis and (960 - x - w) > xdis):
                                step1 = 8
                                drone.left(12)
                                drone.down(12)
                                print("Left Down Stable")
                            elif (step1 != 9 and (720 - y - h) > ydis
                                  and y > ydis and x > xdis
                                  and (960 - x - w) > xdis):
                                if (step1 == 1):
                                    drone.up(0)
                                elif (step1 == 2):
                                    drone.down(0)
                                elif (step1 == 3):
                                    drone.left(0)
                                elif (step1 == 4):
                                    drone.right(0)
                                elif (step1 == 5):
                                    drone.up(0)
                                    drone.right(0)
                                elif (step1 == 6):
                                    drone.right(0)
                                    drone.down(0)
                                elif (step1 == 7):
                                    drone.left(0)
                                    drone.up(0)
                                elif (step1 == 8):
                                    drone.left(0)
                                    drone.down(0)
                                else:
                                    ...  #Nothing
                                step1 = 9
                                print("Stable")
                            else:
                                ...  #Nothing
                            step = 2
                        elif (y < ydis and step != 3 and x > xdis
                              and (960 - x - w) > xdis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 3
                            drone.up(12)
                            print("Up")

                        elif ((720 - y - h) < ydis and step != 4 and x > xdis
                              and (960 - x - w) > xdis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                ...
                                #drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 4
                            drone.down(12)
                            print("Down")

                        elif (x < xdis and step != 5 and (720 - y - h) > ydis
                              and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                ...
                                #drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            print(step)
                            step = 5
                            drone.left(12)
                            print("Left")
                        elif ((960 - x - w) < xdis and step != 6
                              and (720 - y - h) > ydis and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                ...
                                #drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 6
                            drone.right(12)
                            print("Right")
                        elif ((960 - x - w) < xdis and step != 7
                              and (720 - y - h) < ydis and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                ...
                                #drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                ...
                                #drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 7
                            drone.down(12)
                            drone.right(12)
                            print("Right Down")
                        elif (x < xdis and step != 8 and (720 - y - h) < ydis
                              and y > ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                ...
                                #drone.down(0)
                            elif (step == 5):
                                ...
                                #drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 8
                            drone.down(12)
                            drone.left(12)
                            print("Left Down")
                        elif ((960 - x - w) < xdis and step != 9
                              and (720 - y - h) > ydis and y < ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                ...
                                #drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                ...
                                #drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                ...
                                #drone.up(0)
                                #drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 9
                            drone.up(12)
                            drone.right(12)
                            print("Right Up")
                        elif (x < xdis and step != 10 and (720 - y - h) > ydis
                              and y < ydis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                ...
                                #drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                ...
                                #drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                ...
                                #drone.up(0)
                                #drone.left(0)
                            elif (step == 11):
                                drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 10
                            drone.up(12)
                            drone.left(12)
                            print("Left Up")
                        elif (step != 11 and (720 - y - h) > ydis and y > ydis
                              and x > xdis and (960 - x - w) > xdis):
                            if (step == 0):
                                drone.counter_clockwise(0)
                                drone.clockwise(0)
                            elif (step == 1):
                                drone.backward(0)
                            elif (step == 3):
                                drone.up(0)
                            elif (step == 4):
                                drone.down(0)
                            elif (step == 5):
                                drone.left(0)
                            elif (step == 6):
                                drone.right(0)
                            elif (step == 7):
                                drone.down(0)
                                drone.right(0)
                            elif (step == 8):
                                drone.down(0)
                                drone.left(0)
                            elif (step == 9):
                                drone.up(0)
                                drone.right(0)
                            elif (step == 10):
                                drone.up(0)
                                drone.left(0)
                            elif (step == 11):
                                ...
                                #drone.forward(0)
                            else:
                                ...  #Nothing
                            step = 11
                            drone.forward(12)
                            print("Adelante")
                        else:
                            ...  #Nothing

    # Display the resulting frame
                cv2.putText(image, "Battery %:" + str(battery), (10, 30), font,
                            1, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.imshow('Original', image)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    raise ValueError('Close Connection')
                    break
                elif frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time) / time_base)

    except:
        drone.forward(0)
        drone.backward(0)
        drone.right(0)
        drone.left(0)
        drone.down(0)
        drone.up(0)
        drone.counter_clockwise(100)
        drone.land()
        sleep(5)
        drone.quit()
        cv2.destroyAllWindows()
        exit()
コード例 #27
0
def calibrate():
    #棋盘角点数col*row
    col = 13
    row = 6

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    objp = np.zeros((row * col, 3), np.float32)
    # 用于标定的棋盘每个方格边长为22mm
    objp[:, :2] = 22*np.mgrid[0:col, 0:row].T.reshape(-1, 2)

    objpoints = []  # 世界坐标系下的点坐标
    imgpoints = []  # 像素平面坐标系下的点坐标
    print("请选择标定用到的照片所在的文件夹", "\n")

    root = tkinter.Tk()
    root.withdraw()

    global path  # 用于标定的照片所在目录
    path = tkinter.filedialog.askdirectory(
        title="选择标定用到的照片所在的文件夹")  # 选择标定用到的照片所在的文件夹
    images = glob.glob(path+"/*.jpg")
    found = 0  # 记录用于标定的图像数目
    for k, fname in enumerate(images):
        img = cv2.imread(fname)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        ret, corners = cv2.findChessboardCorners(gray, (col, row), None)
        # 角点检测

        if ret is True:
            print("读取", fname)
            objpoints.append(objp)

            
            # 角点检测精度会影响标定的精度
            corners2 = cv2.cornerSubPix(
                gray, corners, (11, 11), (-1, -1), criteria)#亚像素角点位置
            # corners2=corners
            #_,corners2=cv2.find4QuadCornerSubpix(gray, corners, (11, 11))

            imgpoints.append(corners2)
            img = cv2.drawChessboardCorners(img, (col, row), corners2, ret)#标记角点
            found += 1
            if len(images) < 16:  # 图片过多时,不在UI中展示,避免弹窗过多
                cv2.namedWindow('press any key to continue', cv2.WINDOW_NORMAL)
                cv2.imshow('press any key to continue', img)
                cv2.waitKey(0)

            #image_name = path2 + "//corner"+str(found) + '.png'
            #cv2.imwrite(image_name, img)
            #存储已标出角点的照片
            
    global path2  # 存放结果的目录(含记录相机参数的文件,和畸变矫正后的照片,3-D box照片)
    path2 = tkinter.filedialog.askdirectory(
        title="选择结果存放的文件夹(应与用于标定的照片所在的文件夹不同)")  # 选择结果存放的文件夹

    print("Number of images used for calibration: ", found)

    # 相机标定
    ret2, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                        gray.shape[::-1], None, None)

    print("reprojection error:", ret2)
    print("内参矩阵:", mtx)
    print("畸变系数:", dist)
    print("旋转向量:", rvecs)
    print("平移向量:", tvecs)

    images = glob.glob(path+"//*.jpg")
    for i, fname in enumerate(images):
        img = cv2.imread(fname)
        if img is None:
            continue
        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                            (w, h))
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)  # 矫正畸变

        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]#裁剪
        outpath = path2+"//tianyi_gao_undistorted" + str(i + 1) + ".jpg"
        cv2.imwrite(outpath, dst)
    print("新内参矩阵:", newcameramtx)
    
    data = {
        'camera_matrix': np.asarray(mtx).tolist(),
        'dist_coeff': np.asarray(dist).tolist(),
        'new_camera_matrix': np.asarray(newcameramtx).tolist(),
        'rvecs': np.asarray(rvecs).tolist(),
        'tvecs': np.asarray(tvecs).tolist(),
        'reprojection_error': np.asarray(ret2).tolist()
    }
    # 存储相机参数(yaml)
    with open(path2+"//calibration_parameters.yaml", "w") as f:
        yaml.dump(data, f)
    # 存储相机参数(txt)
    with open(path2+"//tianyi_gao_cam.txt", "w") as f2:
        name = list(data.keys())
        value = list(data.values())
        for i in range(len(name)):
            f2.write(name[i] + ":" + "\n" + str(value[i]) + "\n")

    print('Calibrate Done')
    cv2.destroyAllWindows()
    return mtx, dist, rvecs, tvecs, ret2, path2
コード例 #28
0
def main():
    """
    Main function of the program.
    """
    # Capture the video from the default camera
    defaultCamera = 0
    capturedVideo = cv2.VideoCapture(defaultCamera)

    # Initialization of the background subtractor
    backgroundSubtractor = cv2.createBackgroundSubtractorMOG2(
        detectShadows=True)

    if not capturedVideo.isOpened():
        print('Error: Unable to open file')
        exit(0)

    # Top-left and bottom-right points of the region of interest rectangle:

    # Low resolution webcam
    regionOfInterestPoint1 = (330, 10)
    regionOfInterestPoint2 = (630, 310)

    # Medium resolution webcam
    # regionOfInterestPoint1 = (800, 30)
    # regionOfInterestPoint2 = (1250, 530)

    # Constant tuple with the two learning rates for the background
    #  subtractor
    learningRates = (0.3, 0)

    # Initialization of the current learning rate
    currentLearningRate = learningRates[0]

    # Boolean that stores if the user wants to count the raised fingers or not
    countHandFingers = True

    # Boolean that stores if the user wants to detect hand gestures or not
    detectHandGestures = True

    # Boolean that stores if the user wants to draw with the index finger or not
    indexFingerDrawing = False

    # List that stores the trace of the current stroke of the drawing
    currentStroke = list()

    # List that stores the entire drawing
    currentDrawing = list()

    # Boolean that stores if the user wants to see the help info or not
    showHelp = True

    while True:
        # Read the data from the captured video
        returntErrorValue, capturedFrame = capturedVideo.read()
        if not returntErrorValue:
            print('Error: Unable to get data')
            exit(0)

        # Window showing the mirrored captured video and the region of interest
        #  marked with a blue rectangle
        capturedFrame = cv2.flip(capturedFrame, 1)
        cv2.rectangle(capturedFrame,
                      regionOfInterestPoint1,
                      regionOfInterestPoint2,
                      color=(255, 0, 0))
        cv2.imshow('WebCam', capturedFrame)

        # Window showing the region of interest only
        regionOfInterest = capturedFrame[
            regionOfInterestPoint1[1]:regionOfInterestPoint2[1],
            regionOfInterestPoint1[0]:regionOfInterestPoint2[0], :].copy()
        # cv2.imshow('Region of Interest', regionOfInterest)

        # Window showing the background subtraction applied
        foregroundMask = backgroundSubtractor.apply(regionOfInterest, None,
                                                    currentLearningRate)
        # cv2.imshow('Foreground Mask', foregroundMask)

        # Window showing the gray threshold applied
        returntErrorValue, blackAndWhite = cv2.threshold(
            foregroundMask, 200, 255, cv2.THRESH_BINARY)
        cv2.imshow('Black and White', blackAndWhite)

        # Window showing the hand contour
        contours = cv2.findContours(blackAndWhite, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[0]
        contourWindow = regionOfInterest.copy()

        if len(contours) > 0 and currentLearningRate != learningRates[0]:
            handContour = getLargerContour(contours)
            cv2.drawContours(contourWindow,
                             handContour,
                             contourIdx=-1,
                             color=(0, 255, 0),
                             thickness=3)
        else:
            handContour = None

        cv2.imshow('Contour', contourWindow)

        # Window showing the hand's convex hull
        convexHullWindow = regionOfInterest.copy()

        if handContour is not None:
            handConvexHull = cv2.convexHull(handContour)
            cv2.drawContours(convexHullWindow, [handConvexHull],
                             contourIdx=0,
                             color=(255, 0, 0),
                             thickness=3)
        else:
            handConvexHull = None

        cv2.imshow('Convex Hull', convexHullWindow)

        # Window showing the fingers' convexity defects
        convexityDefectsWindow = regionOfInterest.copy()

        if handContour is not None:
            handConvexHull = cv2.convexHull(handContour,
                                            clockwise=False,
                                            returnPoints=False)
            tempPythonList = list(handConvexHull)
            tempPythonList.sort(reverse=True, key=lambda element: element[0])
            handConvexHull = numpy.array(tempPythonList)
            handConvexityDefects = cv2.convexityDefects(
                handContour, handConvexHull)
            fingerConvexityDefects = list()

            if handConvexityDefects is not None:
                for currentConvexityDefect in range(len(handConvexityDefects)):
                    startIndex, endIndex, farIndex, distanceToConvexHull \
                        = handConvexityDefects[currentConvexityDefect][0]

                    startPoint = tuple(handContour[startIndex][0])
                    endPoint = tuple(handContour[endIndex][0])
                    farPoint = tuple(handContour[farIndex][0])

                    depth = distanceToConvexHull / 256.0

                    if depth > 80.0:
                        # angleOfCurrentConvexityDefect = angle(
                        #     startPoint, endPoint, farPoint)
                        cv2.line(convexityDefectsWindow,
                                 startPoint,
                                 endPoint,
                                 color=(255, 0, 0),
                                 thickness=2)
                        cv2.circle(convexityDefectsWindow,
                                   farPoint,
                                   radius=5,
                                   color=(0, 0, 255),
                                   thickness=-1)
                        fingerConvexityDefects.append(
                            (startPoint, endPoint, farPoint))

        else:
            handConvexHull = None
            handConvexityDefects = None

        cv2.imshow('Convexity Defects', convexityDefectsWindow)

        # Window showing the hand's bounding rectangle
        boundingRectangleWindow = regionOfInterest.copy()

        if handContour is not None:
            handBoundingRectangle = cv2.boundingRect(handContour)

            boundingRectanglePoint1 = (handBoundingRectangle[0],
                                       handBoundingRectangle[1])
            boundingRectanglePoint2 = (handBoundingRectangle[0] +
                                       handBoundingRectangle[2],
                                       handBoundingRectangle[1] +
                                       handBoundingRectangle[3])

            cv2.rectangle(boundingRectangleWindow,
                          boundingRectanglePoint1,
                          boundingRectanglePoint2,
                          color=(0, 0, 255),
                          thickness=3)
        else:
            handBoundingRectangle = None
            boundingRectanglePoint1 = None
            boundingRectanglePoint2 = None

        cv2.imshow('Bounding Rectangle', boundingRectangleWindow)

        # Window showing the user side functionalities
        mainWindow = regionOfInterest.copy()

        if handContour is not None:
            numberOfFingers = countFingers(fingerConvexityDefects,
                                           handBoundingRectangle)

            if countHandFingers:
                mainWindow = printFingers(numberOfFingers, mainWindow)

            if detectHandGestures:
                handGesture = detectHandGesture(numberOfFingers,
                                                fingerConvexityDefects,
                                                handBoundingRectangle)
                mainWindow = printGestures(handGesture, mainWindow)

            if indexFingerDrawing:
                currentStroke = fingerDrawing(currentStroke, handContour,
                                              mainWindow)

            mainWindow = printStroke(currentStroke, mainWindow)

            for stroke in currentDrawing:
                mainWindow = printStroke(stroke, mainWindow)

        if showHelp:
            mainWindow = printHelp(mainWindow)

        if currentLearningRate == learningRates[0]:
            mainWindow = printLearning(mainWindow)

        cv2.imshow('Main Window', mainWindow)

        keyboard = cv2.waitKey(1)

        # Key used for swaping between the two learning rates
        if keyboard & 0xFF == ord('s'):
            currentLearningRate = swapLearningRate(currentLearningRate,
                                                   learningRates)

        # Key used for swaping between counting the raised fingers or not
        if keyboard & 0xFF == ord('f'):
            countHandFingers = not countHandFingers

        # Key used for swaping between detecting hand gestures or not
        if keyboard & 0xFF == ord('g'):
            detectHandGestures = not detectHandGestures

        # Key used for swaping between drawing with the index finger or not
        if keyboard & 0xFF == ord('d'):
            indexFingerDrawing = not indexFingerDrawing

            if not indexFingerDrawing:
                currentDrawing.append(currentStroke[:])
                currentStroke.clear()

        # Key used for cleaning the last stroke
        if keyboard & 0xFF == ord('c'):
            currentDrawing.pop()

        # Key used for cleaning the entire drawing
        if keyboard & 0xFF == ord('x'):
            currentDrawing.clear()

        # Key used for swaping between showing the help info or not
        if keyboard & 0xFF == ord('h'):
            showHelp = not showHelp

        # Key used for finishing the program execution
        if keyboard & 0xFF == ord('q'):
            break

    capturedVideo.release()
    cv2.destroyAllWindows()
コード例 #29
0
def mode_t():       
    #drone = tellopy.Tello()

    try:
        #drone.connect()
        # 15초간 드론 연결을 대기.. 초과되면 프로그램 종료
        #drone.wait_for_connection(15.0)
        print("드론 연결 성공.")
        #drone.takeoff()
        #time.sleep(4)
        print("드론 이륙 성공.")
        
        retry = 3
        container = None
        #while container is None and 0 < retry:
        #    retry -= 1
        #    try:
        #        container = av.open(drone.get_video_stream())
        #    except av.AVError as ave:
        #        print(ave)
        #        print('av.open err retry...')

        # 첫 420 frame은 연결 문제가 있어서 생략
        frame_skip = 420
        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                # 촬영 시작 시간
                start_time = time.time()
                # 드론이 촬영한 영상을 opencv형식으로 변환
                img = cv.cvtColor(numpy.array(frame.to_image()), cv.COLOR_RGB2BGR)
                gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
                #cv.imshow('drone cam', img)
                faces = face_cascade.detectMultiScale(gray, 1.3, 5)
                        
                for (x, y, w, h) in faces:
                    img = cv.rectangle(img, (x, y),(x + w, y + h), (255, 0, 0), 2)
                    roi_gray = gray[y : y + h, x : x + w]
                    roi_color = img[y : y + h, x : x + w]
                    eyes = eye_cascade.detectMultiScale(roi_gray)
                    for (ex, ey, ew, eh) in eyes:
                        cv.rectangle(roi_color, (ex, ey),(ex + ew, ey + eh), (0, 255, 0), 2)

                cv.imshow('face_detection', img)        
                k = cv.waitKey(1)
                if k == ord('s'):
                    break
                if frame.time_base < 1.0/60:
                    time_base = 1.0/60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time)/time_base)
                    
    # 모든 에러에 대응
    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
        print('프로그램을 종료합니다.')
        drone.quit()
        cv.destroyAllWindows()
コード例 #30
0
ファイル: wc.py プロジェクト: coolblue91198/rc_stuffz
 def _release_resources(self):
     self._cap.release()
     cv2.destroyAllWindows()
コード例 #31
0
def main(source,
         destination=None,
         movingaverage=False,
         alignment=False,
         framedelay=None):
    print("-------------- PARAMETERS --------------")
    print("Reading", source if source != 0 else "from internal cam...")
    print("Output:", "imshow" if destination is None else destination)
    print("Moving Average:", movingaverage)
    print("Face Alignment:", alignment)
    print("Frame Delay:", framedelay)
    print("----------------------------------------")
    cam = cv.VideoCapture(source)
    W, H = int(cam.get(3)), int(cam.get(4))
    fontsize = int(H // 20.5)  #35
    if destination is not None:
        # TODO add support conversion param or same of source
        out = cv.VideoWriter(destination, cv.VideoWriter_fourcc(*'MP4V'), 20.0,
                             (W, H))
    multitask = MultiTaskNetwork()
    facedet = FaceDetector(conf_thresh=CONFIDENCE_DETECTOR)
    facealign = FaceAligner()

    tracker = CentroidTracker(maxDisappeared=CENTROID_TOLERANCE)

    if movingaverage or debug_movingaverage:
        averager = MultiMovingAverage(GMA, AMA, EtMA, EmMA)
    frame = 0
    while True:
        _, annImage = cam.read()
        faces = facedet.detect(annImage)
        rects = []

        if framedelay is None or frame >= framedelay:
            frame = 0
            if movingaverage:
                for f in faces:
                    rects.append(coords(f))
                objects, items = tracker.update(rects, faces)
                for ((faceID, centroid), (_,
                                          f)) in zip(objects.items(),
                                                     items.items()):
                    if alignment:
                        face_coords = (0, 0, f['img'].shape[0],
                                       f['img'].shape[1])
                        face = (facealign.align(f['img'], face_coords))
                    else:
                        face = f['img']
                    G, A, E, R = multitask.get_prediction(face)
                    if movingaverage:
                        G, A, E, R = averager.average(faceID, G, A, E, R)
                    cv.rectangle(annImage, top_left(f), bottom_right(f),
                                 bounding_color, 2)
                    label_apply = bottom_center(
                        f)  #bottom_left(f) #left alignment
                    annImage = write_str(annImage,
                                         "%s, %d\n%s\n%s" % (G, A, E, R),
                                         label_apply, label_color,
                                         (select_strcolor(G), text_color),
                                         fontsize)

                    # text = "ID {}".format(faceID)
                    # cv.putText(annImage, text, (centroid[0] - 10, centroid[1] - 10),
                    #     cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    # cv.circle(annImage, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
            else:
                for f in faces:
                    if alignment:
                        face_coords = (0, 0, f['img'].shape[0],
                                       f['img'].shape[1])
                        face = (facealign.align(f['img'], face_coords))
                    else:
                        face = f['img']
                    G, A, E, R = multitask.get_prediction(face)
                    if debug_movingaverage:
                        G, A, E, R = averager.average(0, G, A, E, R)
                    cv.rectangle(annImage, top_left(f), bottom_right(f),
                                 bounding_color, 2)
                    label_apply = bottom_center(f)  #bottom_left(f)
                    annImage = write_str(annImage,
                                         "%s, %d\n%s\n%s" % (G, A, E, R),
                                         label_apply, label_color,
                                         (select_strcolor(G), text_color),
                                         fontsize)
        if framedelay is not None:
            frame += 1
        if destination is None:
            cv.imshow('Multitask CNNs for efficient face analysis in the wild',
                      annImage)
        else:
            out.write(annImage)
        if cv.waitKey(1) == ord('q'):
            cv.destroyAllWindows()
            exit()