Esempio n. 1
0
    def run_darknet(self):
        os.chdir(DARKNET_PATH)
        cmd = DARKNET_CMD.format(path=DARKNET_PATH, image=TMP_FILE).split(" ")
        output = subprocess.check_output(cmd)
        logging.debug(output)
        persons = len(re.findall("person:", str(output)))
        logging.debug("found persons {0}".format(persons))

        self.person_count = persons
        self.image = cv2.imread(DARKNET_PATH + "/my_output.jpg")
        hud.get_hud(self.image, "found persons: {0}".format(self.person_count))
        self.finished = True
Esempio n. 2
0
def runocv(ws):
    marker_detector = ObjectDetector()
    face_detector = FaceDetector()
    frame_idx = 0
    #writer = videowriter.VideoWriter(cam, out_file="/home/user/opencv/videos/object-detection.mp4")
    ws.send(json.dumps({'message': 'Beginning with OCV'}))

    while True:
        ret, frame = cam.read()
        frame = imutils.resize(frame, width=640)
        frame_idx += 1
        marker = marker_detector.detect(frame)
        if marker is not None:

            marker = cv2.boundingRect(marker)
            faces = face_detector.detect(frame)
            for face in faces:
                if in_region(marker, face):
                    hud.mark_rois(frame, [face], label="face")
                    x1, y1, w, h = face

                    p_x1 = x1 - w
                    p_x2 = (3 * w)
                    p_y1 = y1
                    p_y2 = (6 * h)
                    facecenter = x1 + (0.5 * w)
                    imgwidth = 640
                    if (facecenter < imgwidth / 3):
                        logging.debug('Face Left')
                        ws.send(json.dumps({'function': 'turn', 'args': [1]}))
                    elif (facecenter < imgwidth * 2 / 3):
                        logging.debug('Face Center')
                        ws.send(json.dumps({'function': 'turn', 'args': [0]}))

                    else:
                        logging.debug('Face right')
                        ws.send(json.dumps({'function': 'turn', 'args': [-1]}))

                    hud.mark_rois(frame, [(p_x1, p_y1, p_x2, p_y2)])

        hud.get_hud(frame, None, frame_idx)
        cv2.imshow("frame", frame)
        #writer.write(frame)
        key = cv2.waitKey(1) & 0xff
        if key == 27:
            ws.send(json.dumps({'function': 'autoPilot', 'args': ['true']}))
            break
Esempio n. 3
0
def main():
    global current_step
    global current_point
    mybike = bike()
    alpha = 100.0
    changed = False
    step = 20
    doc = parse('breckenridge_5k.tcx')
    trackpoints = doc.getElementsByTagName("Trackpoint")
    elements = ('AltitudeMeters', 'DistanceMeters', 'LatitudeDegrees', 'LongitudeDegrees')
    for tp in trackpoints:
        obj = {}
        for el in elements:
            obj[el] = float(tp.getElementsByTagName(el)[0].firstChild.data)
        waypoints.append(obj)
    
    pygame.init ()
    screen = pygame.display.set_mode ((640, 480), 0, 32)
    
    all_route_points = {}
    all_route_points = StreetView.load_route_points(data_dir, waypoints)
    
    current_image = pygame.image.load (all_route_points[current_point]['FileName']).convert()
    next_image    = pygame.image.load (all_route_points[current_point+1]['FileName']).convert()

    pygame.display.flip ()
    pygame.key.set_repeat (500, 30)
    usage()

    going = True
    while going:
        for event in pygame.event.get ():
            if event.type == QUIT:
                going = False

            if event.type == KEYDOWN and event.key == K_q:
                alpha -= step
                if alpha <= 0.0:
                    alpha = 100
                    current_point+=1
                    current_image = pygame.image.load (all_route_points[current_point]['FileName']).convert()
                    next_image    = pygame.image.load (all_route_points[current_point+1]['FileName']).convert()
                changed = True
            elif event.type == KEYDOWN and event.key == K_w:
                alpha += step
                changed = True
                if alpha >= 100:
                    alpha = 100

        current_image.set_alpha(alpha)
        next_image.set_alpha(100-alpha)
        screen.blit (current_image, (0, 0))
        screen.blit (next_image, (0, 0))
        screen.blit (hud.get_hud(mybike), (0,0))
        pygame.display.flip ()
        changed = False

    pygame.quit()
cam = cv2.VideoCapture("../../../videos/motion-flow.mp4")
ret, prev = cam.read()
prev = hud.prepare_frame(prev)
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
cur_glitch = prev.copy()

frame_idx = 0

while True:
    ret, img = cam.read()
    img = hud.prepare_frame(img)
    orig = img.copy()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 1, 5, 1.2, 0)
    prevgray = gray
    frame_idx += 1

    if frame_idx % 1 == 0:
        hud.get_hud(orig, None, frame_idx)
        mask = np.zeros_like(orig)
        mask = draw_flow(mask, flow)
        cv2.addWeighted(orig, 1, mask, 0.3, 1, dst=orig)
        cv2.imshow('flow', orig)

    ch = cv2.waitKey(5)
    if ch == 27:
        break
cv2.destroyAllWindows()
Esempio n. 5
0
from pydrone import *
import hud
import detectors

file = "../../media/drone-video-test.mp4"
file = "http://192.168.42.12:8080/olitest"
video = cv2.VideoCapture(file)
frame_idx = 0

while video.isOpened():
    ret, frame = video.read()
    frame_idx += 1

    if frame_idx < 100:
        hud.get_hud(frame, action="STARTING DRONE", idx=frame_idx)
    else:
        hud.get_hud(frame, action=None, idx=frame_idx)

    if frame_idx % 10 == 0:
        detectors.detect_person(frame)

    cv2.imshow("video", frame)

    key = cv2.waitKey(1) & 0xff
    if key == 27:
        break

Esempio n. 6
0
# - brew tap homebrew/science
# - brew install opencv

frame_idx = 0
action_str = "centered"
dimensions = (960, 720)

cWidth = int((dimensions[0] / 2) + 140)
cHeight = int((dimensions[1] / 2) - 20)

cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)

if vc.isOpened():  # try to get the first frame
    rval, frame = vc.read()
else:
    rval = False

while rval:
    cv2.imshow("preview", frame)
    rval, frame = vc.read()
    key = cv2.waitKey(20)
    if key == 27:  # exit on ESC
        break
    else:
        frame_idx += 1
        get_hud(frame, idx=frame_idx, action=action_str)
        cv2.circle(frame, (cWidth, cHeight), 10, (0, 0, 255), 2)

vc.release()
cv2.destroyWindow("preview")
Esempio n. 7
0
            frame = cv2.addWeighted(frame, 1, ori, 0.2, 1)

            mask = np.zeros_like(frame)
            mask = cv2.rectangle(mask, (int(width/2)-center_margin, 0), (int(width/2)+center_margin, height), (0, 255, 0), thickness=-1)
            frame = cv2.addWeighted(frame, 1, mask, 0.05, 1)

            text = " | focused | "

            if x1 < int((width/2)-center_margin):
                text = " << turn "

            if x1 > int((width/2)+center_margin):
                text = " turn >> "

            xt, yt = down_right
            yt += 40
            xt -= 160
            cv2.putText(frame, text, (xt, yt),
                        hud.FONT, 1, hud.HUD_RED)

    hud.get_hud(frame, None, frame_idx)

    writer.write(frame)
    cv2.imshow("image", frame)
    cv2.moveWindow("image", 0, 0)

    k = cv2.waitKey(1) & 0xff
    if k == 27:
        break
        # dlib.hit_enter_to_continue()
Esempio n. 8
0
def run(drone, args, lerp, ddir, face_cascade):

    if not drone.tello.connect():
        print("Tello not connected")
        return

    if not drone.tello.set_speed(drone.speed):
        print("Not set speed to lowest possible")
        return

    # In case streaming is on. This happens when we quit this program without the escape key.
    if not drone.tello.streamoff():
        print("Could not stop video stream")
        return

    if not drone.tello.streamon():
        print("Could not start video stream")
        return

    frame_read = drone.tello.get_frame_read()
    drone.tello.get_battery()

    imgCount = 0
    scan = 0
    frame_idx = 0
    OVERRIDE = False
    should_stop = False
    override_speed = args.override_speed
    target_distance = args.distance
    action_str = 'Searching For Target'

    safety_zone_x = args.saftey_x
    safety_zone_y = args.saftey_y

    if args.debug:
        print("DEBUG MODE ENABLED!")

    while not should_stop:
        drone.update()

        if frame_read.stopped:
            frame_read.stop()
            break

        current_time = str(datetime.datetime.now()).replace(':', '-').replace(
            '.', '_')

        frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)
        drone_frame = frame_read.frame

        vid = drone.tello.get_video_capture()

        if args.save_session:
            cv2.imwrite("{}/tellocap{}.jpg".format(ddir, imgCount),
                        drone_frame)

        frame = np.rot90(frame)
        imgCount += 1

        time.sleep(1 / constants.FPS)

        # Listen for key presses
        keyboard = cv2.waitKey(20)
        if keyboard == ord('t'):
            if not args.debug:
                print("Lifting Off")
                drone.tello.takeoff()
                drone.tello.get_battery()
            drone.send_rc_control = True

        if keyboard == ord('l'):
            if not args.debug:
                print("Landing")
                drone.tello.land()
            drone.send_rc_control = False

        if keyboard == 8:
            if not OVERRIDE:
                OVERRIDE = True
                print("OVERRIDE ENABLED")
            else:
                OVERRIDE = False
                print("OVERRIDE DISABLED")

        if keyboard == 27:
            should_stop = True
            break
        gray = cv2.cvtColor(drone_frame, cv2.COLOR_BGR2GRAY)

        faces = face_cascade.detectMultiScale(
            gray, scaleFactor=1.05,
            minNeighbors=3)  # Detects face returns an array

        # scaleFactor – Parameter specifying how much the image size is reduced at each image scale.
        # 1.05 is a good possible value for this, which means you use a small step for resizing, i.e. reduce size by 5%, you increase the chance of a matching size with the model for detection is found.
        # This also means that the algorithm works slower since it is more thorough. You may increase it to as much as 1.4 for faster detection, with the risk of missing some faces altogether.
        #
        # minNeighbors – Parameter specifying how many neighbors each candidate rectangle should have to retain it.
        #
        # This parameter will affect the quality of the detected faces. Higher value results in less detections but with higher quality. 3~6 is a good value for it.
        #
        # minSize – Minimum possible object size. Objects smaller than that are ignored.
        #
        # This parameter determine how small size you want to detect. You decide it! Usually, [30, 30] is a good start for face detection.
        #
        # maxSize – Maximum possible object size. Objects bigger than this are ignored.

        target_face_size = constants.OPENCV_FACE_SIZES[target_distance]

        # These are our center drone_window_dimensions
        noFaces = len(faces) == 0
        bounding_box_size = 0
        drone_window_center_width = int(
            (constants.DRONE_OBERSERVATION_WINDOW_DIMENSIONS[0] / 2) - 20)
        drone_window_center_height = int(
            (constants.DRONE_OBERSERVATION_WINDOW_DIMENSIONS[1] / 2) - 20)
        drone_window_center_x = drone_window_center_width
        drone_window_center_y = drone_window_center_height

        if drone.send_rc_control and not OVERRIDE:
            frame_idx += 1

            for (x, y, w, h) in faces:

                roi_gray = gray[y:y + h, x:x + w]
                roi_color = drone_frame[y:y + h, x:x + w]
                action_str = "TARGET FOUND"

                face_box_col = (255, 0, 0)
                face_box_stroke = 2

                bounding_box_x = x + w
                bounding_box_y = y + h
                bounding_box_size = w * 2

                target_x = int((bounding_box_x + x) / 2)
                target_y = int((bounding_box_y + y) / 2) + constants.UDOFFSET

                true_center_vector = np.array(
                    (drone_window_center_width, drone_window_center_height,
                     target_face_size))
                true_target_vector = np.array(
                    (target_x, target_y, bounding_box_size))
                distance_vector = true_center_vector - true_target_vector

                dist_error = target_face_size - w
                dist_control = drone.dist_pid.control(dist_error)

                if not args.debug:
                    offset_x = target_x - drone_window_center_x
                    h_control = drone.h_pid.control(offset_x)
                    drone.yaw_velocity = h_control
                    scan = h_control

                    offset_y = target_y - drone_window_center_y
                    v_control = drone.v_pid.control(-offset_y)
                    drone.up_down_velocity = v_control

                    drone.for_back_velocity = dist_control
                    print('-----dist_control', dist_control)
                    print('-----dist_error', dist_error)
                    print(
                        "offset=(%d,%d), cur_size=%d, size_error=%d, h_control=%f"
                        % (offset_x, offset_y, w, dist_error, h_control))

                cv2.rectangle(drone_frame, (x, y),
                              (bounding_box_x, bounding_box_y), face_box_col,
                              face_box_stroke)
                cv2.circle(drone_frame, (target_x, target_y), 10, (0, 255, 0),
                           2)

                # Draw the safety zone
                # cv2.rectangle(drone_frame, (target_x - safety_zone_x, target_y - safety_zone_y),
                #               (target_x + safety_zone_x, target_y + safety_zone_y), (0, 255, 0), face_box_stroke)

                cv2.putText(drone_frame, str(distance_vector), (0, 64),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)

            if noFaces:
                print(bounding_box_size, target_distance)
                drone.h_pid.reset()
                drone.v_pid.reset()
                drone.dist_pid.reset()
                drone.yaw_velocity = scan
                drone.up_down_velocity = 0
                drone.for_back_velocity = 0
                action_str = "No Target"
                print("NO TARGET")

        # Draw the center of screen circle, this is what the drone tries to match with the target coords
        cv2.circle(drone_frame,
                   (drone_window_center_width, drone_window_center_height), 10,
                   (0, 0, 255), 2)
        get_hud(drone_frame, idx=frame_idx, action=action_str)
        dCol = lerp(np.array((0, 0, 255)), np.array((255, 255, 255)),
                    target_distance + 1 / 7)

        if OVERRIDE:
            text = "User Control: {}".format(override_speed)
            dCol = (255, 255, 255)
        else:
            text = "AI Control: {}".format(str(target_distance))

        cv2.putText(drone_frame, text, (31, 665), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    dCol, 2)
        cv2.imshow(f'Drone Tracking...', drone_frame)

    drone.tello.get_battery()
    cv2.destroyAllWindows()
    drone.tello.end()
Esempio n. 9
0
	def Update(self, screen, route, sThread):
		self.route = route
		main_dir = os.path.split(os.path.abspath(__file__))[0]

		self.update = False

		self.bike.rpm = sThread.getBikeRpm()
		
		if not self.init:
			self.init = True
			self.Start()
			self.alpha = 0
			self.update = True
			self.config = self.load_configuration((route+"/route.dat"))
			print self.config.getint('Route', 'image_count')
			if self.config.getint('Route', 'image_count') >= (self.position+1):
				self.image = pygame.image.load (os.path.join (main_dir, route+"/ROUTE-"+str(self.position)+".jpg")).convert()
				self.next_image = pygame.image.load (os.path.join (main_dir, route+"/ROUTE-"+str(self.position+1)+".jpg")).convert()
				
		for event in pygame.event.get():
			if event.type == QUIT:
				self.quit = True
			elif event.type == KEYDOWN and event.key == K_q:
				if self.running:
					self.MoveBike(route)
			elif event.type == KEYDOWN and event.key == K_b:	
				if not self.running:
					self.Start()
			elif event.type == KEYDOWN and event.key == K_n:	
				if self.running:
					self.Pause()
			elif event.type == KEYDOWN and event.key == K_m:	
				self.Stop()
		
		if(self.running):
			self.MoveBike(route)
		
		#Update Bike
		if self.running:
			seconds = time.time() - self.start_time
			self.bike.time = str(datetime.timedelta(seconds=int(seconds)))
			if not self.last_time == self.bike.time:
				self.last_time = self.bike.time
			#Blit HUD
			screen.blit(hud.get_hud(self.bike, screen.get_width(), screen.get_height()), (0,0))

		if self.update:
			if self.alpha >140:
				self.image.set_alpha(255)
				screen.blit (self.image, (0,50))
			else:
				self.image.set_alpha(self.alpha)
				screen.blit (self.image, (0,50))
				self.next_image.set_alpha(255-self.alpha)
				#Blit World Data
				screen.blit (self.next_image, (0,50))

			#Blit HUD
			screen.blit(hud.get_hud(self.bike, screen.get_width(), screen.get_height()), (0,0))
		
			#NAV BOX
			screen.blit(plot.get_chart(self.route_points,0), (10, screen.get_height()-100))

			if self.cleanup_mode:
				self.image.set_alpha(255)
				screen.blit(pygame.transform.scale(self.image, (200,100)), (20, 300))

		return True