Ejemplo n.º 1
0
class Camera(object):
    def __init__(self, calib_img, resolution, framerate):
        self.stream = PiVideoStream(resolution=resolution, framerate=framerate)
        calib = undistort(cv2.imread(calib_img))
        self.transform = birdseye_transform(calib)

    def start(self):
        self.stream.start()

    def stop(self):
        self.stream.stop()

    def get_line(self):
        frame = self.stream.read()
        test = undistort(frame)
        bird = birdseye(test, transform=self.transform)

        u = u_channel(bird)

        filtered = sobel(u)

        thresh = threshold(filtered)

        lines, line_coeff = detect_lines(thresh)
        # print("line:", p)
        # print("curvature: {}, slope: {}, offset: {}".format(*line_coeff))
        return line_coeff
Ejemplo n.º 2
0
class PiCam(BaseCamera):
    def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20):
        from imutils.video.pivideostream import PiVideoStream  #settings Pi camera
        from imutils.video import FPS

        self.image_d = image_d
        self.frame = None
        self.running = True
        resolution = (image_w, image_h)
        self.cap = PiVideoStream(resolution=resolution,
                                 framerate=20)  #rpi camera
        self.cap.start()
        self.cap.camera.iso = 0  # 0:auto, 100-200: sunny day
        self.cap.camera.awb_mode = 'auto'  # sunlight,cloudy,auto
        self.cap.camera.hflip = False
        self.cap.camera.vflip = False
        print('PiCamera loaded.. .warming camera')
        time.sleep(2.0)

        print("analog_gain: ", float(self.cap.camera.analog_gain))
        print("exposure_speed: ", self.cap.camera.exposure_speed)
        print("iso: ", self.cap.camera.iso)

    def poll(self):
        self.frame = self.cap.read()
        if self.image_d == 1:
            self.frame = rgb2gray(self.frame)

    def update(self):
        '''
        poll the camera for a frame
        '''
        while (self.running):
            self.poll()

    def run(self):
        self.poll()
        return self.frame

    # def run_threaded(self):
    #     return self.frame

    def shutdown(self):
        self.running = False
        time.sleep(0.2)
        self.cap.stop()
def acquire_numpy_array_old(duration=8):
    from imutils.video.pivideostream import PiVideoStream
    fps = 90
    vs = PiVideoStream(resolution=(640, 480), framerate=fps).start()
    vs.camera.shutter_speed = 4000
    sleep(2)
    vs.start()
    save_dir = '/home/pi/NumpyData/' + time_stamp()
    os.makedirs(save_dir)
    print("Acquiring + saving uncompressed video ... ")
    example_frame = vs.read()
    assert example_frame.size
    frames_shape = [fps * duration] + list(example_frame.shape)
    frames = np.empty(frames_shape, dtype=np.uint8)
    for i in range(fps * duration):
        last_frame_time = time()
        frames[i] = vs.read()
        sleep(1.0 / fps - (time() - last_frame_time))
    vs.stop()
    np.save(save_dir + '/data.npy', frames)
Ejemplo n.º 4
0
class StingGui(object):

    buttonPins = {p: i for i, p in enumerate((17, 22, 23, 27))}
    framerate = 90
    screenWidth = 320
    screenHeight = 240

    blue = (0xff, 0, 0)
    green = (0, 0xa0, 0)
    red = (0, 0, 0xff)

    faceTimeout = 1.0
    logoTimeout = 5.0

    collectionId = 'sturdy-sting'

    def __init__(self, showLogo=True):

        self.ready = False
        self.menu = generateMenus(self)
        self._oldMenu = None

        self.toast = None
        self._oldToast = None

        self._oldButton = None

        self.running = True

        self.fireEnable = True
        self.addWhiteListFlag = False
        self.removeWhiteListFlag = False

        self.showLogo = showLogo
        self.startTime = time.time()

        self.logoImage = cv2.imread('logo.png')
        self.faceCascade = cv2.CascadeClassifier('face_cascade.xml')

        self.videoStream = VideoStream(framerate=self.framerate,
                                       resolution=(self.screenWidth,
                                                   self.screenHeight))

        self.isEnemy = False
        self.rekClient = boto3.client('rekognition')

    def __enter__(self):
        self.ready = True
        self.videoStream.start()

        GPIO.setmode(GPIO.BCM)
        for buttonPin in self.buttonPins.keys():
            GPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
            GPIO.add_event_detect(buttonPin,
                                  GPIO.FALLING,
                                  callback=self.buttonPressed,
                                  bouncetime=50)

        cv2.namedWindow('disp', cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty('disp', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

        return self

    def __exit__(self, *args):
        self.videoStream.stop()
        for buttonPin in self.buttonPins.keys():
            GPIO.remove_event_detect(buttonPin)
            GPIO.cleanup(buttonPin)

    def buttonPressed(self, buttonPin):
        # The built-in debounce in rpi.GPIO isn't very good,
        # so we add our own
        time.sleep(.010)

        pressed = not GPIO.input(buttonPin)
        if pressed:
            buttonIndex = self.buttonPins[buttonPin]
            print 'button', buttonIndex
            self.menu = self.menu.select(buttonIndex)

    def stop(self):
        self.running = False

    def addOverlay(self, frame):

        if self.showLogo:
            delta = time.time() - self.startTime
            if delta > self.logoTimeout:
                self.showLogo = False
            else:
                weight = (self.logoTimeout - delta) / self.logoTimeout
                return cv2.addWeighted(frame, 1.0, self.logoImage, weight, 0)

        if self.toast:
            self.toast = self.toast.checkTimeout()

        if self.isEnemy and (not self.toast or not self.toast.priority):
            self.toast = Toast('TARGET ACQUIRED',
                               color=self.red,
                               timeout=1,
                               priority=True)
            self.screenShotFrames = 5
        elif not self.toast:
            self.toast = Toast(
                'Weapon Hot' if self.fireEnable else 'Weapon Safe', 0)

        menuChanged = self.menu != self._oldMenu
        toastChanged = self.toast != self._oldToast

        if menuChanged or toastChanged:
            self._oldMenu = self.menu
            self._oldToast = self.toast
            texts = ([x[0] for x in self.menu.options] + ['', '', '', ''])[:4]
            heightStep = self.screenHeight / len(texts)
            yOffset = heightStep / 2
            thickness = 1
            fontFace = cv2.FONT_HERSHEY_DUPLEX
            fontScale = getattr(self.menu, 'fontScale', 1.0)

            img = np.zeros((self.screenHeight, self.screenWidth, 3), np.uint8)
            for i, text in enumerate(texts):
                size, dummy = cv2.getTextSize(text, fontFace, fontScale,
                                              thickness)
                w, h = size
                x = self.screenWidth - w - 5
                y = i * heightStep + h + yOffset
                cv2.putText(img, text, (x, y), fontFace, fontScale,
                            (255, 255, 255), thickness)

            thickness = 2
            if self.menu.title:
                size, dummy = cv2.getTextSize(self.menu.title, fontFace,
                                              fontScale, thickness)
                w, h = size
                x = 0
                y = h
                cv2.putText(img, self.menu.title, (x, y), fontFace, fontScale,
                            (255, 255, 255), thickness)

            if self.toast:
                fontScale = getattr(self.toast, 'fontScale', 1.0)
                size, dummy = cv2.getTextSize(self.toast.text, fontFace,
                                              fontScale, thickness)
                x = 0
                y = int(self.screenHeight - size[1] * 1.1)
                cv2.putText(img, self.toast.text, (x, y), fontFace, fontScale,
                            self.toast.color, thickness)
            self._menuImage = img

        return cv2.add(frame, self._menuImage)

    def getFrame(self):
        frame = self.videoStream.read()
        frame = cv2.flip(frame, -1)  # rotate 180
        return frame

    def getFaces(self, frame):
        ''' Returns an array of tuples which are (faceImage, (x1, y1, x2, y2)) '''

        ret = []
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faceLocations = self.faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(40, 40),
            flags=cv2.CASCADE_SCALE_IMAGE)

        for (x, y, w, h) in faceLocations:
            cx, cy = int(x + w / 2), int(y + h / 2)
            hw, hh = [int(q * 2) / 2 for q in (w, h)]
            x1, x2 = cx - hw, cx + hw
            y1, y2 = cy - hh, cy + hh

            imgH, imgW = frame.shape[0:2]

            x1, y1 = [max(q, 0) for q in (x1, y1)]
            x2, y2 = [min(q, r) for q, r in ((x2, imgW), (y2, imgH))]

            if (x2 - x1) > 0 and (y2 - y1) > 0:
                faceImage = copy.deepcopy(gray[y1:y2, x1:x2])
                ret.append((faceImage, (x1, y1, x2, y2)))

        return ret

    def drawBoxesAroundFaces(self, frame, faces):

        color = self.red if self.isEnemy else self.blue
        if self.isEnemy:
            color = self.red
            for face, box in faces:
                x1, y1, x2, y2 = box
                w = x2 - x1
                h = y2 - y1
                centerX, centerY = center = (x1 + w / 2, y1 + h / 2)
                radius = int(max([q * .4 for q in (w, h)]))
                cv2.circle(frame, center, radius, color, 2)
                cv2.line(frame, (x1, centerY), (x2, centerY), color, 2)
                cv2.line(frame, (centerX, y1), (centerX, y2), color, 2)

        else:
            color = self.blue
            for face, box in faces:
                cv2.rectangle(frame, box[0:2], box[2:4], color, 2)

    def mainLoop(self):
        if not self.ready:
            raise RuntimeError('Class not in ready state use "with" statement')

        frameTries = 5

        lastFaceFoundTime = 0
        faceFound = False
        faceId = None
        frameTriesRemaining = 0

        faceRek = None

        loopStart = time.time()

        with Sting() as sting:
            while self.running:
                loopElapsed = time.time() - loopStart
                if loopElapsed > .2:
                    print 'loop took %d milliseconds' % (loopElapsed * 1000)
                loopStart = time.time()
                frame = self.getFrame()
                faces = self.getFaces(frame)
                self.drawBoxesAroundFaces(frame, faces)

                faceThisFrame = len(faces) > 0

                if faceThisFrame:
                    if faceFound == False:
                        if not self.addWhiteListFlag:
                            if faceRek:
                                faceRek.destroy()
                            faceRek = FaceRekognizer(self.collectionId)
                            frameTriesRemaining = frameTries

                        faceId = None
                        self.isEnemy = False

                    faceFound = True
                    lastFaceFoundTime = time.time()
                    faceImage = faces[0][0]

                    # Identify, Friend or Foe?
                    if frameTriesRemaining and faceRek:
                        frameTriesRemaining -= 1
                        faceRek.addImage(faceImage, frameTriesRemaining > 0)

                    if self.addWhiteListFlag:
                        jpg = imgToJpg(faceImage)
                        args = {
                            'CollectionId': self.collectionId,
                            'Image': {
                                'Bytes': jpg
                            }
                        }
                        try:
                            ret = self.rekClient.index_faces(**args)
                            self.toast = Toast('Friend added', 3)
                            self.menu = self.menu.select(GoBack())
                            faceId = None
                            faceFound = False
                            isEnemy = False
                        except:
                            pass

                    elif self.removeWhiteListFlag and faceId:
                        ret = self.rekClient.delete_faces(
                            CollectionId=self.collectionId, FaceIds=[faceId])
                        print 'called delete_faces'
                        self.toast = Toast('Friend removed', 3)
                        self.menu = self.menu.select(GoBack())
                        faceId = None
                        faceFound = False
                        isEnemy = True
                # Coast for a bit if we don't find a faces for a few frames
                elif faceFound and time.time(
                ) - lastFaceFoundTime > self.faceTimeout:
                    self.isEnemy = False
                    faceFound = False
                    faceId = None

                if faceRek and faceRek.finished:
                    faceId = faceRek.getResult()
                    self.isEnemy = not faceId
                    faceRek.destroy()
                    faceRek = None

                if faceRek:
                    print 'Checking    \r',
                else:
                    if faceFound:
                        if self.isEnemy:
                            print 'Enemy         \r',
                        else:
                            print 'Friend         \r',
                    else:
                        print 'idle               \r',

                sys.stdout.flush()

                # Where the rubber meets the road
                if self.fireEnable and faceFound:
                    if self.isEnemy or (faceRek and not faceRek.finished):
                        sting.flywheel = True
                        if self.isEnemy:
                            sting.trigger = True
                    else:
                        sting.trigger = False
                        sting.flywheel = False
                else:
                    sting.trigger = False
                    sting.flywheel = False

                # Update display
                frame = self.addOverlay(frame)
                cv2.imshow('disp', frame)
                cv2.waitKey(2)
    for i in range(LCD_WIDTH):
        lcd_byte(ord(message[i]), LCD_CHR)


lcd_string(" No Intruder", LCD_LINE_1)
#################################################
recognizer = cv2.createLBPHFaceRecognizer()
recognizer.load('trainner/trainner.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=1, help="jffj")
args = vars(ap.parse_args())
cam = PiVideoStream()
cam = cam.start()
time.sleep(.5)
font = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1)
while (True):
    i = 0
    if i == 1:  # When output from motion sensor is HIGH
        print("Intruder Detected")
        GPIO.output(4, 1)  # Turn ON LED
        time.sleep(.5)
        GPIO.output(4, 0)
        lcd_string("Intruder Detect", LCD_LINE_1)
        lcd_string("    Room 1", LCD_LINE_2)
    img = cam.read()
    img = imutils.resize(img, width=450)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray, 1.2, 5)
Ejemplo n.º 6
0
def ai():
    cap = PiVideoStream(resolution=(208, 208))  #rpi camera
    cap.start()
    time.sleep(2.0)
    cap.camera.hflip = True
    cap.camera.vflip = True

    print("[INFO] video recording")
    ##    out = cv2.VideoWriter('avcnet.avi',cv2.VideoWriter_fourcc(*'XVID'), 10, (208,208))

    # load the trained convolutional neural network
    print("[INFO] loading network...")
    ##    model = load_model("./avcnet_v1.model")
    model = load_model("/home/pi/drone_exe/avcnet_best_5.hdf5",
                       custom_objects={"tf": tf})

    # default parameters
    orient = 0
    tim_old = 0.1
    state = 'fly'
    dist = 510
    global velocity_y
    velocity_y = 0
    fly_1 = 0.9
    k = 0.83  # k=(tau/T)/(1+tau/T) tau time constant LPF, T period

    while True:
        start = time.time()
        frame = cap.read()
        orig = frame.copy()
        frame = cv2.resize(frame, (64, 64))
        frame = frame.astype("float") / 255.0
        frame = img_to_array(frame)
        frame = np.expand_dims(frame, axis=0)

        # classify the input image
        (stop, left, right, fly) = model.predict(frame)[0]
        # build the label
        ##        my_dict = {'stop':stop, 'left':left, 'right':right,'fly':fly}
        my_dict = {'stop': stop, 'left': left, 'right': right}
        maxPair = max(my_dict.iteritems(), key=itemgetter(1))
        fly_f = k * fly_1 + (1 - k) * fly
        fly_1 = fly_f

        if state == 'avoid':
            ##            label=maxPair[0]
            ##            proba=maxPair[1]
            if fly_f * 100 >= 60:
                dist = 510
                state = 'fly'
                print >> f, state

        else:
            label = 'forward'
            proba = fly
            if fly_f * 100 <= 50:
                dist = 180
                label = maxPair[0]
                proba = maxPair[1]
                print >> f, my_dict
                state = 'avoid'

        label_1 = "{} {:.1f}% {}".format(label, proba * 100, state)
        # draw the label on the image
        output = imutils.resize(orig, width=208)
        cv2.putText(output, label_1, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)

        # Write the frame into the file 'output.avi'
        ##        out.write(output)

        if vehicle.channels['8'] > 1700:
            event.clear()
        if vehicle.channels['8'] > 1400 and vehicle.channels['8'] < 1700:
            if state == "fly":
                event.clear()

            if state == "avoid":
                event.set()

                if label == 'left':
                    velocity_y = -0.5
                if label == 'right':
                    velocity_y = 0.5
                if label == 'stop':
                    velocity_y = 0

        if vehicle.channels['8'] < 1400:
            event.clear()

        msg_sensor(dist, orient)

        # show the output frame
        cv2.imshow("Frame", output)
        key = cv2.waitKey(10) & 0xFF

        # if the `Esc` key was pressed, break from the loop
        if key == 27:
            break

    # do a bit of cleanup
    f.close()
    print('end')

    cv2.destroyAllWindows()
    cap.stop()
Ejemplo n.º 7
0
class DroidVisionThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.running = True
        self.fps_counter = FPS().start()
        self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH, config.RAW_FRAME_HEIGHT))
        self.camera.start()
        time.sleep(config.CAMERA_WARMUP_TIME) # wait for camera to initialise
        self.frame = None
        self.frame_chroma = None
        self.centre = config.CENTRE
        self.can_see_lines = False
        self.last_yellow_mean = config.WIDTH * 0.8
        self.last_blue_mean = config.WIDTH * 0.2
        self.desired_steering = config.NEUTRAL_STEERING # 0 = left, 0.5 = center, 1 = right
        self.desired_throttle = config.NEUTRAL_THROTTLE # 0 = stop, 0.5 = medium speed, 1 = fastest

    def run(self):
        debug("DroidVisionThread: Thread started")
        self.vision_processing()
        self.camera.stop()
        cv2.destroyAllWindows()
        debug("DroidVisionThread: Thread stopped")

    def stop(self):
        self.running = False
        debug("DroidVisionThread: Stopping thread")

    def vision_processing(self):
        while self.running:
            self.grab_frame()

            # colour mask
            blue_mask = cv2.inRange(self.frame_chroma, config.BLUE_CHROMA_LOW, config.BLUE_CHROMA_HIGH)
            yellow_mask = cv2.inRange(self.frame_chroma, config.YELLOW_CHROMA_LOW, config.YELLOW_CHROMA_HIGH)
            colour_mask = cv2.bitwise_or(blue_mask, yellow_mask)
            colour_mask = cv2.erode(colour_mask, config.ERODE_KERNEL)
            colour_mask = cv2.dilate(colour_mask, config.DILATE_KERNEL)
            
            # lines
            lines = cv2.HoughLinesP(colour_mask, config.HOUGH_LIN_RES, config.HOUGH_ROT_RES, config.HOUGH_VOTES, config.HOUGH_MIN_LEN, config.HOUGH_MAX_GAP)
            blue_lines = np.array([])
            yellow_lines = np.array([])
            if lines != None:
                for line in lines:
                    x1,y1,x2,y2 = line[0]
                    angle = np.rad2deg(np.arctan2(y2-y1, x2-x1))
                    if config.MIN_LINE_ANGLE < abs(angle) < config.MAX_LINE_ANGLE:
                        if config.IMSHOW:
                            cv2.line(self.frame, (x1,y1), (x2,y2), (0,0,255), 1)
                        if angle > 0:
                            yellow_lines = np.append(yellow_lines, [x1, x2])
                        elif angle < 0:
                            blue_lines = np.append(blue_lines, [x1, x2])

            # find centre
            blue_mean = self.last_blue_mean
            yellow_mean = self.last_yellow_mean
            if len(blue_lines):
                blue_mean = int(np.mean(blue_lines))
            if len(yellow_lines):
                yellow_mean = int(np.mean(yellow_lines))
            self.centre = (blue_mean + yellow_mean) / 2.0
            self.last_blue_mean = blue_mean
            self.last_yellow_mean = yellow_mean

            self.can_see_lines = (len(blue_lines) or len(yellow_lines))

            # set steering and throttle
            self.desired_steering = (1.0 - (self.centre / config.WIDTH))
            if len(blue_lines) or len(yellow_lines):
                self.desired_throttle = 0.22
            else:
                self.desired_throttle = 0

            if config.IMSHOW:
                cv2.circle(self.frame, (int(self.centre), config.HEIGHT - 20), 10, (0,0,255), -1)
                cv2.imshow("colour_mask without noise", colour_mask)
                cv2.imshow("raw frame", self.frame)
                cv2.waitKey(1)

    def grab_frame(self):
        self.fps_counter.update()
        # grab latest frame and index the ROI
        self.frame = self.camera.read()
        self.frame = self.frame[config.ROI_YMIN:config.ROI_YMAX, config.ROI_XMIN:config.ROI_XMAX]
        # convert to chromaticity colourspace
        B = self.frame[:, :, 0].astype(np.uint16)
        G = self.frame[:, :, 1].astype(np.uint16)
        R = self.frame[:, :, 2].astype(np.uint16)    
        Y = 255.0 / (B + G + R)
        b = (B * Y).astype(np.uint8)
        g = (G * Y).astype(np.uint8)
        r = (R * Y).astype(np.uint8)
        self.frame_chroma = cv2.merge((b,g,r))

    def get_fps(self):
        self.fps_counter.stop()
        return self.fps_counter.fps()

    def get_error(self):
        return (config.CENTRE - self.centre)

    def get_steering_throttle(self):
        return self.desired_steering, self.desired_throttle
Ejemplo n.º 8
0
from imutils.video.pivideostream import PiVideoStream
from imutils.video import FPS # Pour mesurer les FPS
import cv2  # Librairie OpenCV
import time # Fonctions temporelles

# Definition des seuils HSV pour le vert
greenLower = (85, 126, 60)
greenUpper = (95, 255, 255)
# Creation de la fenetre d'affichage
cv2.namedWindow('Tracking', cv2.WINDOW_NORMAL)
# Demarage du flux video sur un thread different
vs = PiVideoStream()
vs.camera.video_stabilization = True
vs.start()
# Laisse le temps de chauffer a la camera
time.sleep(1.0)
# Demarre le compteur de FPS
fps = FPS().start()
# Boucle principale 
while True :
    frame = vs.read()
    #frame = imutils.resize(frame, width = 400)
    # Convertis la frame en HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # Construis un masque pour la couleur définie
    mask = cv2.inRange(hsv, greenLower, greenUpper)
    # Effectue 2 passages d'erosion pour retirer les petits éléments
    mask = cv2.erode(mask, None, iterations = 2)  
    # Effectue 2 passages de dilatation pour conserver la taille
    mask = cv2.dilate(mask, None, iterations = 2)
    # Detecte les contours dans le masque 
Ejemplo n.º 9
0
def ai():
    cap = PiVideoStream(resolution=(208, 208), framerate=20)  #rpi camera
    cap.start()
    cap.camera.iso = 0  # 0:auto, 100-200: sunny day
    cap.camera.awb_mode = 'sunlight'  # sunlight,cloudy,auto
    cap.camera.hflip = True
    cap.camera.vflip = True
    time.sleep(2.0)

    print("analog_gain: ", float(cap.camera.analog_gain), file=f)
    print("exposure_speed: ", cap.camera.exposure_speed, file=f)
    print("iso: ", cap.camera.iso, file=f)

    print("[INFO] video recording")
    out = cv2.VideoWriter('avcnet.avi', cv2.VideoWriter_fourcc(*'XVID'), 4,
                          (208, 208))

    # load the trained convolutional neural network
    print("[INFO] loading network...")
    print('inference:', inference)
    print('inference:', inference, file=f)

    if inference == 'k':
        from keras.models import load_model
        import tensorflow as tf
        from keras.preprocessing.image import img_to_array
        model = load_model("/home/pi/drone_exe/TF_model/fly1.h5",
                           custom_objects={"tf": tf})

    if inference == 'tf':
        from tensorflow.python.platform import gfile
        import tensorflow as tf
        from keras.preprocessing.image import img_to_array
        f1 = gfile.FastGFile("/home/pi/drone_exe/TF_model/tf_model_cv.pb",
                             'rb')
        graph_def = tf.GraphDef()
        # Parses a serialized binary message into the current message.
        graph_def.ParseFromString(f1.read())
        f1.close()
        sess = tf.Session()
        sess.graph.as_default()
        # Import a serialized TensorFlow `GraphDef` protocol buffer
        # and place into the current default `Graph`.
        tf.import_graph_def(graph_def)
        softmax_tensor = sess.graph.get_tensor_by_name(
            'import/activation_5/Softmax:0')

    if inference == 'dnn':
        net = cv2.dnn.readNetFromTensorflow(
            '/home/pi/drone_exe/TF_model/fly1.pb')
        ##        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # if no NCS stick
        ##        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # if no NCS stick
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    # default parameters
    orient = 0
    distmax = 600
    tim_old = 0.1
    state = 'fly'
    dist = 510
    global velocity_y
    velocity_y = 0
    fly_1 = 0.9
    k = 0.66  # k=(tau/T)/(1+tau/T) tau time constant LPF, T period
    ##    k=0 # no filtering
    fps = FPS().start()

    while True:
        start = time.time()
        frame = cap.read()
        orig = frame.copy()
        ##

        #===========================dnn============================================
        if inference == 'dnn':
            # use cv2.dnn module for inference
            resized = cv2.resize(frame, (64, 64))
            scale = 1 / 255.0  # AI trained with scaled images image/255
            blob = cv2.dnn.blobFromImage(resized,
                                         scale, (64, 64), (0, 0, 0),
                                         swapRB=True,
                                         ddepth=5)
            net.setInput(blob)
            predictions = net.forward()

#===========================tf=============================================
        if inference == 'tf':
            #use tf for inference
            frame = cv2.resize(frame, (64, 64))
            frame = cv2.cvtColor(
                frame, cv2.COLOR_RGB2BGR)  # @ training images RGB->BGR
            frame = frame.astype("float") / 255.0
            frame = img_to_array(frame)
            frame = np.expand_dims(frame, axis=0)
            predictions = sess.run(softmax_tensor,
                                   {'import/img_input:0': frame})

#===========================k==============================================
        if inference == 'k':
            #use k for inference
            frame = cv2.resize(frame, (64, 64))
            frame = cv2.cvtColor(
                frame, cv2.COLOR_RGB2BGR)  # @ training images RGB->BGR
            frame = frame.astype("float") / 255.0
            frame = img_to_array(frame)
            frame = np.expand_dims(frame, axis=0)
            predictions = model.predict(frame)

#===========================================================================

# classify the input image
        fly = predictions[0][0]
        # build the label

        my_dict = {
            'stop': predictions[0][3],
            'left': predictions[0][1],
            'right': predictions[0][2]
        }
        maxPair = max(my_dict.items(), key=itemgetter(1))
        fly_f = k * fly_1 + (1 - k) * fly
        fly_1 = fly_f

        if state == 'avoid':
            ##            label=maxPair[0]
            ##            proba=maxPair[1]
            if fly_f * 100 >= 60:
                dist = 510
                state = 'fly'
                print(state, file=f)

        else:
            label = 'forward'
            proba = fly
            if fly_f * 100 <= 50:
                dist = 180
                label = maxPair[0]
                proba = maxPair[1]
                print(my_dict, file=f)
                state = 'avoid'

        label_1 = "{} {:.1f}% {}".format(label, proba * 100, state)
        # draw the label on the image
        output = imutils.resize(orig, width=208)
        cv2.putText(output, label_1, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)

        # Write the frame into the file 'output.avi'
        out.write(output)

        if vehicle.channels['8'] > 1700:
            event.clear()
        if vehicle.channels['8'] > 1400 and vehicle.channels['8'] < 1700:
            if state == "fly":
                event.clear()

            if state == "avoid":
                event.set()

                if label == 'left':
                    velocity_y = -0.8
                if label == 'right':
                    velocity_y = 0.8
                if label == 'stop':
                    velocity_y = 0

        if vehicle.channels['8'] < 1400:
            event.clear()

        msg_sensor(dist, 0, 600)

        # show the output frame
        cv2.imshow("Frame", output)
        key = cv2.waitKey(10) & 0xFF

        # update the FPS counter
        fps.update()
        # if the `Esc` key was pressed, break from the loop
        if key == 27:
            break

    # do a bit of cleanup
    # stop the timer and save FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()), file=f)
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()), file=f)
    f.close()
    f.close()
    print('end')

    cv2.destroyAllWindows()
    cap.stop()
    out.release()
Ejemplo n.º 10
0
def get_video():
	array,_ = freenect.sync_get_video()
	array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
	return array
 
#function to get depth image from kinect
def get_depth():
	array,_ = freenect.sync_get_depth()
	array = array.astype(np.uint8)
	#array = array * 32
	return array
#nitialize the camera and grab a reference to the raw camera capture

#res = '(640, 480)'    
stream = PiVideoStream(resolution=(640,480)).start()
stream.start()
#camera = PiCamera()
#camera.resolution = (640, 480)
#camera.framerate = 30
 
# allow the camera to warmup
time.sleep(1)

if __name__ == "__main__":
	while 1:
		#get a frame from RGB camera
		frame = stream.read()
		#get a frame from depth sensor
		depth = get_depth()

		
Ejemplo n.º 11
0
class DroidVisionThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.running = True
        self.fps_counter = FPS().start()
        self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH,
                                                config.RAW_FRAME_HEIGHT))
        self.camera.start()
        time.sleep(config.CAMERA_WARMUP_TIME)  # wait for camera to initialise
        self.frame = None
        self.frame_chroma = None
        self.centre = config.CENTRE
        self.can_see_lines = False
        self.last_yellow_mean = config.WIDTH * 0.8
        self.last_blue_mean = config.WIDTH * 0.2
        self.desired_steering = config.NEUTRAL_STEERING  # 0 = left, 0.5 = center, 1 = right
        self.desired_throttle = config.NEUTRAL_THROTTLE  # 0 = stop, 0.5 = medium speed, 1 = fastest

    def run(self):
        debug("DroidVisionThread: Thread started")
        self.vision_processing()
        self.camera.stop()
        cv2.destroyAllWindows()
        debug("DroidVisionThread: Thread stopped")

    def stop(self):
        self.running = False
        debug("DroidVisionThread: Stopping thread")

    def vision_processing(self):
        while self.running:
            self.grab_frame()

            # colour mask
            blue_mask = cv2.inRange(self.frame_chroma, config.BLUE_CHROMA_LOW,
                                    config.BLUE_CHROMA_HIGH)
            yellow_mask = cv2.inRange(self.frame_chroma,
                                      config.YELLOW_CHROMA_LOW,
                                      config.YELLOW_CHROMA_HIGH)
            colour_mask = cv2.bitwise_or(blue_mask, yellow_mask)
            colour_mask = cv2.erode(colour_mask, config.ERODE_KERNEL)
            colour_mask = cv2.dilate(colour_mask, config.DILATE_KERNEL)

            # lines
            lines = cv2.HoughLinesP(colour_mask, config.HOUGH_LIN_RES,
                                    config.HOUGH_ROT_RES, config.HOUGH_VOTES,
                                    config.HOUGH_MIN_LEN, config.HOUGH_MAX_GAP)
            blue_lines = np.array([])
            yellow_lines = np.array([])
            if lines != None:
                for line in lines:
                    x1, y1, x2, y2 = line[0]
                    angle = np.rad2deg(np.arctan2(y2 - y1, x2 - x1))
                    if config.MIN_LINE_ANGLE < abs(
                            angle) < config.MAX_LINE_ANGLE:
                        if config.IMSHOW:
                            cv2.line(self.frame, (x1, y1), (x2, y2),
                                     (0, 0, 255), 1)
                        if angle > 0:
                            yellow_lines = np.append(yellow_lines, [x1, x2])
                        elif angle < 0:
                            blue_lines = np.append(blue_lines, [x1, x2])

            # find centre
            blue_mean = self.last_blue_mean
            yellow_mean = self.last_yellow_mean
            if len(blue_lines):
                blue_mean = int(np.mean(blue_lines))
            if len(yellow_lines):
                yellow_mean = int(np.mean(yellow_lines))
            self.centre = (blue_mean + yellow_mean) / 2.0
            self.last_blue_mean = blue_mean
            self.last_yellow_mean = yellow_mean

            self.can_see_lines = (len(blue_lines) or len(yellow_lines))

            # set steering and throttle
            self.desired_steering = (1.0 - (self.centre / config.WIDTH))
            if len(blue_lines) or len(yellow_lines):
                self.desired_throttle = 0.22
            else:
                self.desired_throttle = 0

            if config.IMSHOW:
                cv2.circle(self.frame, (int(self.centre), config.HEIGHT - 20),
                           10, (0, 0, 255), -1)
                cv2.imshow("colour_mask without noise", colour_mask)
                cv2.imshow("raw frame", self.frame)
                cv2.waitKey(1)

    def grab_frame(self):
        self.fps_counter.update()
        # grab latest frame and index the ROI
        self.frame = self.camera.read()
        self.frame = self.frame[config.ROI_YMIN:config.ROI_YMAX,
                                config.ROI_XMIN:config.ROI_XMAX]
        # convert to chromaticity colourspace
        B = self.frame[:, :, 0].astype(np.uint16)
        G = self.frame[:, :, 1].astype(np.uint16)
        R = self.frame[:, :, 2].astype(np.uint16)
        Y = 255.0 / (B + G + R)
        b = (B * Y).astype(np.uint8)
        g = (G * Y).astype(np.uint8)
        r = (R * Y).astype(np.uint8)
        self.frame_chroma = cv2.merge((b, g, r))

    def get_fps(self):
        self.fps_counter.stop()
        return self.fps_counter.fps()

    def get_error(self):
        return (config.CENTRE - self.centre)

    def get_steering_throttle(self):
        return self.desired_steering, self.desired_throttle
Ejemplo n.º 12
0
class Detector:
    """カメラ映像を読み込み、人体検知を用いたスクワット回数の計測を行う"""
    def __init__(self,
                 model_path,
                 threshold=0.8,
                 resolution=(512, 352),
                 framerate=5,
                 head_line=80):
        """初期化処理"""
        # True: 立っている状態, False: かがんでいる状態
        self.is_standing = True
        # is_standing の判定に使うしきい値
        self.head_line = head_line

        # カメラへの接続
        self.camera = PiVideoStream(resolution=resolution, framerate=framerate)

        # どのくらいの尤度で人として検知するか
        self.threshold = threshold
        # TFLiteモデルの読み込み
        self.detector = tf.lite.Interpreter(model_path=model_path)
        # TFLiteの初期化
        self.detector.allocate_tensors()
        self.detector.set_num_threads(4)
        # モデルの入出力情報の取得
        self.input_details = self.detector.get_input_details()
        self.output_details = self.detector.get_output_details()
        self.input_height = self.input_details[0]['shape'][1]
        self.input_width = self.input_details[0]['shape'][2]

    def start(self):
        """カメラ映像の読み込み開始"""
        self.camera.start()

    def stop(self):
        """カメラ映像の読み込みを終了"""
        self.camera.stop()

    def process_frame(self, count):
        squatted = False
        # 映像フレームの読み込み
        frame = self.camera.read()
        # 人体検知の実行
        detection = self._detect(frame)
        if detection:
            # 検知結果をフレームに描画
            self._draw_box(frame, detection)
            # スクワットの判定
            squatted = self._update_state(head_position=detection[4])

        # head_line の位置を描画
        cv2.line(frame, (0, self.head_line), (frame.shape[1], self.head_line),
                 (0, 255, 255), 2)
        # is_standing 状態の描画
        cv2.putText(frame, f'standing: {self.is_standing}', (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
        # スクワット回数を描画
        cv2.putText(frame, str(count), (5, 332), cv2.FONT_HERSHEY_SIMPLEX, 4,
                    (0, 255, 255), 2)

        return squatted, frame

    def _detect(self, frame):
        """人体検知の実行"""
        # 映像フレームのサイズを取得
        height, width, channels = frame.shape
        # 入力データの用意
        resized = cv2.resize(frame, (self.input_width, self.input_height))
        data = np.expand_dims(resized, axis=0)
        self.detector.set_tensor(self.input_details[0]['index'], data)
        # 推論の実行
        self.detector.invoke()
        # 推論結果の取得
        boxes = self.detector.get_tensor(self.output_details[0]['index'])
        scores = self.detector.get_tensor(self.output_details[2]['index'])
        num_boxes = self.detector.get_tensor(self.output_details[3]['index'])
        if int(num_boxes) < 1:
            return
        # 最も尤度の高かった物体の情報のみ取得
        top, left, bottom, right = boxes[0][0]
        score = scores[0][0]
        # 尤度が threshold 以下の場合は無視する
        if score < self.threshold:
            return
        # 矩形座標をオリジナル画像用にスケール
        xmin = int(left * width)
        ymin = int(bottom * height)
        xmax = int(right * width)
        ymax = int(top * height)

        return (score, xmin, ymin, xmax, ymax)

    def _draw_box(self, frame, detection):
        """推論結果を映像フレームに描画"""
        score, xmin, ymin, xmax, ymax = detection
        # 検知した人の位置を描画
        cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
        # スクワットを行う際の目印となる点を描画
        xcent = int(((xmax - xmin) // 2) + xmin)
        cv2.circle(frame, (xcent, ymax), 3, (0, 0, 255), 5, 8, 0)
        # 尤度の描画
        label = 'person: {:.2f}%'.format(score * 100)
        y = ymin - 15 if ymin - 15 > 15 else ymin + 15
        cv2.putText(frame, label, (xmin, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 255, 0), 1)

    def _update_state(self, head_position):
        """
        立ち・屈みの状態を更新し、
        屈みから立ちの状態になった場合はスクワットした判定を返す
        """
        current = head_position < self.head_line
        # もし以前と状態が違うなら
        if current is not self.is_standing:
            # 新しい状態に更新する
            self.is_standing = current
            # 屈みから立ちの状態になった場合は
            if current:
                # Trueを返す
                return True
        # そうでない場合はFalseを返す
        return False