Пример #1
0
def action(img_bytes, img_width, img_height, is_belly, ctrl_state, vbat_flying_percentage, theta, phi, psi, altitude, vx, vy):
 
    # Set up command defaults
    zap = 0
    phi = 0    
    theta = 0
    gaz = 0
    yaw = 0
 
    # Set up state variables first time around
    if not hasattr(action, 'count'):
        action.count = 0
        action.errx_1 = 0
        action.erry_1 = 0
        action.phi_1 = 0
        action.gaz_1 = 0
                 
    # Create full-color image from bytes
    image = cv.CreateImageHeader((img_width,img_height), cv.IPL_DEPTH_8U, 3)      
    cv.SetData(image, img_bytes, img_width*3)
                 
    # Grab centroid of face
    ctr, faces = face_tracker.track(image)
#    ctr = ball_tracker.track(image)

    print "got %s faces" % str(len(faces))

    dx = 0
    dy = 0

    # Use centroid if it exists
    if ctr and ctr != (-1, -1):
 
        # Compute proportional distance (error) of centroid from image center
        errx =  _dst(ctr, 0, img_width)
        erry = -_dst(ctr, 1, img_height)
        dx = errx * img_width
        dy = erry * img_height
 
        # Compute vertical, horizontal velocity commands based on PID control after first iteration
        if action.count > 0:
            phi = _pid(action.phi_1, errx, action.errx_1, Kpx, Kix, Kdx)
            gaz = _pid(action.gaz_1, erry, action.erry_1, Kpy, Kiy, Kdy)
 
        # Remember PID variables for next iteration
        action.errx_1 = errx
        action.erry_1 = erry
        action.phi_1 = phi
        action.gaz_1 = gaz
        action.count += 1

    if len(faces) > 1 or len(faces) == 0:
        return 0, 0, 0, 0, 0, 0, 0, None

    ((x, y, w, h), n) = faces[0]
    size = (w, h)

    # Send control parameters back to drone
    print (zap, phi, theta, gaz, yaw, dx, dy, size)
    return (zap, phi, theta, gaz, yaw, dx, dy, size)
def action(img_bytes, img_width, img_height, is_belly, ctrl_state,
           vbat_flying_percentage, theta, phi, psi, altitude, vx, vy):

    # Set up command defaults
    zap = 0
    phi = 0
    theta = 0
    gaz = 0
    yaw = 0

    # Set up state variables first time around
    if not hasattr(action, 'count'):
        action.count = 0
        action.errx_1 = 0
        action.erry_1 = 0
        action.errz_1 = 0
        action.phi_1 = 0
        action.gaz_1 = 0
        action.yaw_1 = 0
        action.theta_1 = 0

    # Create full-color image from bytes
    image = cv.CreateImageHeader((img_width, img_height), cv.IPL_DEPTH_8U, 3)
    cv.SetData(image, img_bytes, img_width * 3)

    # Grab centroid of face
    ctr = face_tracker.track(image)

    # Use centroid if it exists
    if ctr:

        # Compute proportional distance (error) of centroid from image center
        errx = _dst(ctr, 0, img_width)
        erry = -_dst(ctr, 1, img_height)
        # use this if you want to make it come towards you
        errz = _dst(ctr, 2, img_height)

        # Compute vertical, horizontal velocity commands based on PID control after first iteration
        if action.count > 0:
            # use this if you want to make it strafe to follow your face horizontally
            #phi = _pid(action.phi_1, errx, action.errx_1, Kpx, Kix, Kdx)
            # use this if you want to make it rotate to follow your face horizontally
            yaw = _pid(action.yaw_1, errx, action.errx_1, Kpx, Kix, Kdx)
            gaz = _pid(action.gaz_1, erry, action.erry_1, Kpy, Kiy, Kdy)
            # use this if you want to make it come towards you
            theta = _pid(action.theta_1, errz, action.errz_1, Kpy, Kiy, Kdy)

        # Remember PID variables for next iteration
        action.errx_1 = errx
        action.erry_1 = erry
        action.errz_1 = errz
        action.phi_1 = phi
        action.gaz_1 = gaz
        action.yaw_1 = yaw
        action.theta_1 = theta
        action.count += 1

    # Send control parameters back to drone
    return (zap, phi, theta, gaz, yaw)
def action(img_bytes, img_width, img_height, is_belly, ctrl_state, vbat_flying_percentage, theta, phi, psi, altitude, vx, vy):

    # Set up command defaults
    zap = 0
    phi = 0
    theta = 0
    gaz = 0
    yaw = 0

    # Set up state variables first time around
    if not hasattr(action, 'count'):
        action.count = 0
        action.errx_1 = 0
        action.erry_1 = 0
        action.errz_1 = 0
        action.phi_1 = 0
        action.gaz_1 = 0
        action.yaw_1 = 0
        action.theta_1 = 0

    # Create full-color image from bytes
    image = cv.CreateImageHeader((img_width,img_height), cv.IPL_DEPTH_8U, 3)
    cv.SetData(image, img_bytes, img_width*3)

    # Grab centroid of face
    ctr = face_tracker.track(image)

    # Use centroid if it exists
    if ctr:

        # Compute proportional distance (error) of centroid from image center
        errx =  _dst(ctr, 0, img_width)
        erry = -_dst(ctr, 1, img_height)
        # use this if you want to make it come towards you
        errz = _dst(ctr, 2, img_height)

        # Compute vertical, horizontal velocity commands based on PID control after first iteration
        if action.count > 0:
            # use this if you want to make it strafe to follow your face horizontally
            #phi = _pid(action.phi_1, errx, action.errx_1, Kpx, Kix, Kdx)
            # use this if you want to make it rotate to follow your face horizontally
            yaw = _pid(action.yaw_1, errx, action.errx_1, Kpx, Kix, Kdx)
            gaz = _pid(action.gaz_1, erry, action.erry_1, Kpy, Kiy, Kdy)
            # use this if you want to make it come towards you
            theta = _pid(action.theta_1, errz, action.errz_1, Kpy, Kiy, Kdy)

        # Remember PID variables for next iteration
        action.errx_1 = errx
        action.erry_1 = erry
        action.errz_1 = errz
        action.phi_1 = phi
        action.gaz_1 = gaz
        action.yaw_1 = yaw
        action.theta_1 = theta
        action.count += 1

    # Send control parameters back to drone
    return (zap, phi, theta, gaz, yaw)
    def run_new(self):
        import cv2.cv as cv
        while self.running:
            pixelarray = self.drone.get_image()  # get an frame form the Drone
            if pixelarray is not None:  # check whether the frame is not empty
                self.frame = pixelarray[:, :, ::-1].copy()  #convert to a frame

            weirdsize = self.frame.shape
            size = (weirdsize[1], weirdsize[0])

            image = cv.CreateImageHeader(size[:2], cv.IPL_DEPTH_8U, 3)
            cv.SetData(image, self.frame, size[0] * 3)

            # Grab centroid and faces
            ctr, faces = face_tracker.track(image)
            faces = [f for (f, n) in faces]

            # print "got %s faces" % str(len(faces))

            gotface = True
            face = None
            bestface = None
            if self.lastface is None:  # no previous data, wait until only one face detected
                if len(faces) == 1:
                    self.lastface = faces[0]
                    face = faces[0]
                else:
                    # print "no face yet"
                    gotface = False
            else:
                # we have previous face data
                if len(faces) == 0:
                    self.consequtive_misses += 1
                    gotface = False
                else:
                    # look for the best face
                    for item in faces:
                        (x, y, w, h) = item
                        # print "face at ", x, " ", y
                        facediff = tuple_absolute_difference(
                            item, self.lastface)
                        avg_diff = sum(facediff) / len(facediff)
                        if bestface is None or avg_diff < bestface[1]:
                            bestface = (item, avg_diff)
                            # print "reassign: avgdiff=", avg_diff

                    if bestface is None or bestface[1] > 30.0:
                        # ignore improbable face movement
                        self.consequtive_misses += 1
                    else:
                        face = bestface[0]

                if self.consequtive_misses > 10:
                    self.consequtive_misses = 0
                    print "20 misses, resetting face detector"
                    self.lastface = None
            if gotface:
                # draw all faces
                for item in faces:
                    (x, y, w, h) = item
                    if item == face:
                        cv.Rectangle(image, (x, y), (x + w, y + h),
                                     cv.RGB(255, 0, 0), 3, 8, 0)
                    else:
                        cv.Rectangle(image, (x, y), (x + w, y + h),
                                     cv.RGB(0, 0, 255), 3, 8, 0)

                if face is not None:
                    # steer and draw steering line
                    (fx, fy, fw, fh) = face
                    framecenter = (size[0] / 2, size[1] / 2)
                    center = (fx + fw / 2, fy + fh / 2)
                    dx = center[0] - framecenter[0]
                    dy = center[1] - framecenter[1]
                    cv2.line(self.frame, framecenter, center, (0, 255, 0), 4)
                    self.steer_to(dx, dy, (fw, fh))
                    self.lastface = face

            yu = size[1] / 2 + Z_UPPER
            yl = size[1] / 2 + Z_LOWER
            cv2.line(self.frame, (0, yu), (size[0], yu), cv.RGB(255, 0, 0), 1)
            cv2.line(self.frame, (0, yl), (size[0], yl), cv.RGB(255, 0, 0), 1)
            cv2.imshow(self.window_name, self.frame)
            time.sleep(0.05)

        cv2.destroyAllWindows()
        print "video loop stopped."
Пример #5
0
    def run_new(self):
        import cv2.cv as cv
        while self.running:
            pixelarray = self.drone.get_image() # get an frame form the Drone
            if pixelarray is not None: # check whether the frame is not empty
                self.frame = pixelarray[:, :, ::-1].copy() #convert to a frame

            weirdsize = self.frame.shape
            size = (weirdsize[1], weirdsize[0])

            image = cv.CreateImageHeader(size[:2], cv.IPL_DEPTH_8U, 3)
            cv.SetData(image, self.frame, size[0]*3)

            # Grab centroid and faces
            ctr, faces = face_tracker.track(image)
            faces = [f for (f, n) in faces]

            # print "got %s faces" % str(len(faces))

            gotface = True
            face = None
            bestface = None
            if self.lastface is None:  # no previous data, wait until only one face detected
                if len(faces) == 1:
                    self.lastface = faces[0]
                    face = faces[0]
                else:
                    # print "no face yet"
                    gotface = False
            else:
                # we have previous face data
                if len(faces) == 0:
                    self.consequtive_misses += 1
                    gotface = False
                else:
                    # look for the best face
                    for item in faces:
                        (x, y, w, h) = item
                        # print "face at ", x, " ", y
                        facediff = tuple_absolute_difference(item, self.lastface)
                        avg_diff = sum(facediff) / len(facediff)
                        if bestface is None or avg_diff < bestface[1]:
                            bestface = (item, avg_diff)
                            # print "reassign: avgdiff=", avg_diff

                    if bestface is None or bestface[1] > 30.0:
                        # ignore improbable face movement
                        self.consequtive_misses += 1
                    else:
                        face = bestface[0]

                if self.consequtive_misses > 10:
                    self.consequtive_misses = 0
                    print "20 misses, resetting face detector"
                    self.lastface = None
            if gotface:
                # draw all faces
                for item in faces:
                    (x, y, w, h) = item
                    if item == face:
                        cv.Rectangle(image, (x, y), (x+w, y+h), cv.RGB(255, 0, 0), 3, 8, 0)
                    else:
                        cv.Rectangle(image, (x, y), (x+w, y+h), cv.RGB(0, 0, 255), 3, 8, 0)

                if face is not None:
                    # steer and draw steering line
                    (fx, fy, fw, fh) = face
                    framecenter = (size[0] / 2, size[1] / 2)
                    center = (fx + fw / 2, fy + fh / 2)
                    dx = center[0] - framecenter[0]
                    dy = center[1] - framecenter[1]
                    cv2.line(self.frame, framecenter, center, (0, 255, 0), 4)
                    self.steer_to(dx, dy, (fw, fh))
                    self.lastface = face

            yu = size[1] / 2 + Z_UPPER
            yl = size[1] / 2 + Z_LOWER
            cv2.line(self.frame, (0, yu), (size[0], yu), cv.RGB(255, 0, 0), 1)
            cv2.line(self.frame, (0, yl), (size[0], yl), cv.RGB(255, 0, 0), 1)
            cv2.imshow(self.window_name, self.frame)
            time.sleep(0.05)

        cv2.destroyAllWindows()
        print "video loop stopped."
Пример #6
0
def action(img_bytes, img_width, img_height, is_belly, ctrl_state,
           vbat_flying_percentage, theta, phi, psi, altitude, vx, vy):

    # Set up command defaults
    zap = 0
    phi = 0
    theta = 0
    gaz = 0
    yaw = 0

    # Set up state variables first time around
    if not hasattr(action, 'count'):
        action.count = 0
        action.errx_1 = 0
        action.erry_1 = 0
        action.phi_1 = 0
        action.gaz_1 = 0

    # Create full-color image from bytes
    image = cv.CreateImageHeader((img_width, img_height), cv.IPL_DEPTH_8U, 3)
    cv.SetData(image, img_bytes, img_width * 3)

    # Grab centroid of face
    ctr, faces = face_tracker.track(image)
    #    ctr = ball_tracker.track(image)

    print "got %s faces" % str(len(faces))

    dx = 0
    dy = 0

    # Use centroid if it exists
    if ctr and ctr != (-1, -1):

        # Compute proportional distance (error) of centroid from image center
        errx = _dst(ctr, 0, img_width)
        erry = -_dst(ctr, 1, img_height)
        dx = errx * img_width
        dy = erry * img_height

        # Compute vertical, horizontal velocity commands based on PID control after first iteration
        if action.count > 0:
            phi = _pid(action.phi_1, errx, action.errx_1, Kpx, Kix, Kdx)
            gaz = _pid(action.gaz_1, erry, action.erry_1, Kpy, Kiy, Kdy)

        # Remember PID variables for next iteration
        action.errx_1 = errx
        action.erry_1 = erry
        action.phi_1 = phi
        action.gaz_1 = gaz
        action.count += 1

    if len(faces) > 1 or len(faces) == 0:
        return 0, 0, 0, 0, 0, 0, 0, None

    ((x, y, w, h), n) = faces[0]
    size = (w, h)

    # Send control parameters back to drone
    print(zap, phi, theta, gaz, yaw, dx, dy, size)
    return (zap, phi, theta, gaz, yaw, dx, dy, size)