示例#1
0
    def __init__(self):
        self.webcam = WebcamVideoStream()
        self.webcam.start()
        self.detection = Detection()

        #hand gesture status
        self.is_okay = False
        self.is_vhand = False
        self.is_phand = False
        self.is_palm = False
        self.is_fist = False

        #get location of hand when tracking
        self.x_axis = 0.0
        self.y_axis = 0.0

        #get tracker location
        self.yprime_axis = 0
        self.xprime_axis = 0

        #get middle of frame
        self.x_center = (self.webcam.camera().get(
            cv2.CAP_PROP_FRAME_WIDTH)) / 2
        self.y_center = (self.webcam.camera().get(
            cv2.CAP_PROP_FRAME_HEIGHT)) / 2
示例#2
0
def from_stream():

    fps = FPS().start()
    cam = WebcamVideoStream().start()

    max_frames = 50
    i = 0

    while True:

        frame = cam.read()

        if i > max_frames:

            fps.stop()
            print(fps.elapsed())
            print(fps.fps())
            break

        i += 1

        testcone(frame, stream=True)
        fps.update()
        cv2.imshow('', frame)
        cv2.waitKey(1)
示例#3
0
    def __init__(self):
        self.webcam = WebcamVideoStream()
        self.webcam.start()
        self.detection = Detection()
        #self.servoYaw = Servo(205,409,0)
        #self.servoPitch = Servo(205,409,4)
        #self.servoYaw.neutralPos()
        #self.servoPitch.neutralPos()

        self.system = System()
        self.system.servoSetAllNeutral()
        #lights instances

        #hand gesture status
        self.is_okay  = False
        self.is_vhand = False
        self.is_phand = False
        self.is_palm  = False
        self.is_fist  = False

        #get location of hand when tracking
        self.x_axis = 0.0
        self.y_axis = 0.0

        #get tracker location
        self.yprime_axis = 0
        self.xprime_axis = 0

        #get middle of frame
        self.x_center = (self.webcam.camera().get(cv2.CAP_PROP_FRAME_WIDTH))/2;
        self.y_center = (self.webcam.camera().get(cv2.CAP_PROP_FRAME_HEIGHT))/2;
示例#4
0
文件: studio.py 项目: schultzca/cv
def main():

    # Create file queue
    file_queue = queue.Queue()

    # Initialize video stream
    vs = WebcamVideoStream(src=0).start()

    # Initialize image writer
    ir = ImageWriter(queue=file_queue).start()

    name = input("Please enter name of individual: ")

    while True:

        # Read most recent video frame
        frame = vs.read()

        # Copy frame to display and annotate
        frame_copy = frame.copy()

        # Overlay name to frame
        cv2.putText(frame_copy, name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                    (0, 255, 0), 2, cv2.LINE_AA)

        # Display current frame
        cv2.imshow("Video", frame_copy)

        # Send frame to image writer thread
        key = cv2.waitKey(1) & 0xFF

        # Enter new name
        if key == ord("n"):
            name = input("Please enter name of individual: ")

        # Capture picture and write to disk
        if key == ord("p"):
            file_queue.put((name, frame))

        # Quit application
        if key == ord("q"):
            cv2.destroyAllWindows()
            break

    # Stop the image writing thread
    ir.stop()

    # Stop the video stream thread
    vs.stop()
示例#5
0
class VideoStream:
    def __init__(self,
                 src=0,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32,
                 image_flip=0,
                 **kwargs):

        self.image_flip = 1
        # check to see if the picamera module should be used
        if usePiCamera:
            from pi_camera import PiVideoStream

            # initialize picamera stream and let the camera sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate,
                                        **kwargs)

# otherwise, we are using OpenCV so initialize the webcam stream
        else:
            self.stream = WebcamVideoStream(src=src,
                                            resolution=resolution,
                                            framerate=framerate)

    def start(self):
        # start the threaded video stream
        return self.stream.start()

    def read(self):
        # return the current frame
        return self.flip_if_needed(self.stream.read())

    def read_jpg(self):
        frame = self.flip_if_needed(self.stream.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def flip_if_needed(self, frame):
        if self.image_flip:
            return np.fliplr(frame)
        return frame

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
示例#6
0
    def __init__(self,
                 src=0,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32):
        # check to see if the picamera module should be used
        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from picam import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            self.stream = WebcamVideoStream(src=src)
示例#7
0
    def __init__(self,
                 src=0,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32,
                 image_flip=0,
                 **kwargs):

        self.image_flip = 1
        # check to see if the picamera module should be used
        if usePiCamera:
            from pi_camera import PiVideoStream

            # initialize picamera stream and let the camera sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate,
                                        **kwargs)

# otherwise, we are using OpenCV so initialize the webcam stream
        else:
            self.stream = WebcamVideoStream(src=src,
                                            resolution=resolution,
                                            framerate=framerate)
示例#8
0
class VideoStream:
    def __init__(self,
                 src=0,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32,
                 rotation=0):
        # check to see if the picamera module should be used
        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from picam import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate,
                                        rotation=rotation)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            self.stream = WebcamVideoStream(src=src)

    def start(self):
        # start the threaded video stream
        return self.stream.start()

    def update(self):
        # grab the next frame from the stream
        self.stream.update()

    def read(self):
        # return the current frame
        return self.stream.read()

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
示例#9
0
def test_cam(args):
    """Function to predict for a camera image stream
    """

    if torch.cuda.is_available() and not args.no_cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    download_model_if_doesnt_exist(args.model_name)
    model_path = os.path.join("models", args.model_name)
    print("-> Loading model from ", model_path)
    encoder_path = os.path.join(model_path, "encoder.pth")
    depth_decoder_path = os.path.join(model_path, "depth.pth")

    # LOADING PRETRAINED MODEL
    print("   Loading pretrained encoder")
    encoder = networks.ResnetEncoder(18, False)
    loaded_dict_enc = torch.load(encoder_path, map_location=device)

    # Extract the height and width of image that this model was trained with
    feed_height = loaded_dict_enc['height']
    feed_width = loaded_dict_enc['width']
    filtered_dict_enc = {
        k: v
        for k, v in loaded_dict_enc.items() if k in encoder.state_dict()
    }
    encoder.load_state_dict(filtered_dict_enc)
    encoder.to(device)
    encoder.eval()

    print("   Loading pretrained decoder")
    depth_decoder = networks.DepthDecoder(num_ch_enc=encoder.num_ch_enc,
                                          scales=range(4))
    loaded_dict = torch.load(depth_decoder_path, map_location=device)
    depth_decoder.load_state_dict(loaded_dict)
    depth_decoder.to(device)
    depth_decoder.eval()

    print("-> Loading complete, initializing the camera")

    # Initialize camera to capture image stream
    # Change the value to 0 when using default camera
    video_stream = WebcamVideoStream(src=args.webcam).start()

    if not args.no_display:
        # Object to display images
        image_display = DisplayImage(not args.no_process)

    # Flag that records when 'q' is pressed to break out of inference loop below
    quit_inference = False

    def on_release(key):
        if key == keyboard.KeyCode.from_char('q'):
            nonlocal quit_inference
            quit_inference = True
            return False

    keyboard.Listener(on_release=on_release).start()

    # Number of frames to capture to calculate fps
    num_frames = 5
    curr_time = np.zeros(num_frames)
    with torch.no_grad():
        while True:
            if quit_inference:
                if args.no_display:
                    print('-> Done')
                break

            # Capture frame-by-frame
            frame = video_stream.read()

            # Calculate the fps
            curr_time[1:] = curr_time[:-1]
            curr_time[0] = time.time()
            fps = num_frames / (curr_time[0] - curr_time[len(curr_time) - 1])

            # Our operations on the frame come here
            input_image = pil.fromarray(frame).convert('RGB')
            original_width, original_height = input_image.size
            input_image = input_image.resize((feed_width, feed_height),
                                             pil.LANCZOS)
            input_image = transforms.ToTensor()(input_image).unsqueeze(0)

            # PREDICTION
            input_image = input_image.to(device)
            features = encoder(input_image)
            outputs = depth_decoder(features)

            disp = outputs[("disp", 0)]
            disp_resized = torch.nn.functional.interpolate(
                disp, (original_height, original_width), mode="nearest")

            # Get the predict depth
            scaled_disp, pred_depth = disp_to_depth(disp_resized, 0.1, 100)
            pred_depth_np = pred_depth.squeeze().cpu().detach().numpy()

            # Initialize a 3x4 depth map
            depth_map = np.zeros([3, 4])
            for i in range(len(depth_map)):
                for j in range(len(depth_map[0])):
                    # Cut and store the average value of depth information of 640x480 into 3x4 grid
                    depth_map[i][j] = get_avg_depth(pred_depth_np, 160 * i,
                                                    160 * j, 160 * i + 160,
                                                    160 * j + 160)

            # Giving a simple decision logic
            if depth_map[0, 1] <= 1 or depth_map[1, 1] <= 1 or depth_map[
                    0, 2] <= 1 or depth_map[1, 2] <= 1:
                if depth_map[1, 1] <= 1 and depth_map[1, 2] <= 1:
                    print("Dangerous!!! AHEAD")
                else:
                    if depth_map[0, 1] <= 1 or depth_map[1, 1] <= 1:
                        print("Dangerous!!! LEFT")
                    if depth_map[0, 2] <= 1 or depth_map[1, 2] <= 1:
                        print("Dangerous!!! RIGHT")
            elif np.sum(depth_map[0:2, 2:3]) <= 7 or np.sum(
                    depth_map[0:2, 2:3]) <= 7:
                if np.sum(depth_map[0:2, 0:1]) <= 7:
                    print("Careful!! LEFT")
                if np.sum(depth_map[0:2, 2:3]) <= 7:
                    print("Careful!! RIGHT")
            else:
                print("Clear")

            if not args.no_display:
                # DISPLAY
                # Generate color-mapped depth image
                disp_resized_np = disp_resized.squeeze().cpu().detach().numpy()
                image_display.display(frame,
                                      disp_resized_np,
                                      fps,
                                      original_width,
                                      original_height,
                                      blended=not args.no_blend)
            else:
                print(f"FPS: {fps}")

            # if quit_inference:
            #    if args.no_display:
            #        print('-> Done')
            #    break

    # When everything is done, stop camera stream
    video_stream.stop()
#
# @socketio.on('connect', namespace='/test')
# def test_connect():
#     emit('my response', {'data': 'Connected'})
#
# @socketio.on('disconnect', namespace='/test')
# def test_disconnect():
#     print('Client disconnected')

# logging.basicConfig(level=logging.DEBUG)
# logger = logging.getLogger("Server")

# Spawning the relevant threads
#logger.info("Starting webcam thread")
# webcam_thread = WebcamVideoStream(src=0).start()
webcam_thread = WebcamVideoStream(src=1).start()
fps = FPS().start()

#logger.info("Starting inference thread")
INFERENCE_API = ""
inference_thread = InferenceHelper(INFERENCE_API).start()

#logger.info("Starting drawing thread")
drawing_thread = DrawingHelper(webcam_thread.VID_WIDTH,
                               webcam_thread.VID_HEIGHT).start()

# Run until quit.

# define a new function that we're going to run on a seperate thread:

frame = webcam_thread.read()
示例#11
0
class HandTracker:
    def __init__(self):
        self.webcam = WebcamVideoStream()
        self.webcam.start()
        self.detection = Detection()

        #hand gesture status
        self.is_okay = False
        self.is_vhand = False
        self.is_phand = False
        self.is_palm = False
        self.is_fist = False

        #get location of hand when tracking
        self.x_axis = 0.0
        self.y_axis = 0.0

        #get tracker location
        self.yprime_axis = 0
        self.xprime_axis = 0

        #get middle of frame
        self.x_center = (self.webcam.camera().get(
            cv2.CAP_PROP_FRAME_WIDTH)) / 2
        self.y_center = (self.webcam.camera().get(
            cv2.CAP_PROP_FRAME_HEIGHT)) / 2

    def _start_up(self):

        while self.is_okay == False:
            # get image from webcam
            image = self.webcam.read()
            print("WAITING FOR OK SIGN")

            # look for the OK sign to start up
            self.is_okay = self.detection.is_item_detected_in_image(
                'data/ok_cascade_48x30.xml', image)

            if self.is_okay:
                # recognized OK Sign
                print("OK GESTURE Detected ")
                self.is_okay = False
                # move to modes stage
                self._modes()

            if cv2.waitKey(1) == 27:
                self._shut_down()
                break

    def _modes(self):
        # Look to recognize a gesture
        while True:
            # get image from webcam
            image = self.webcam.read()

            #different classifier for different modes
            #self.is_phand = self.detection.is_item_detected_in_image('data/face.xml', image )
            self.is_fist = self.detection.is_item_detected_in_image(
                'data/fist.xml', image)
            #self.is_vhand =self.detection.is_item_detected_in_image('data/.xml', image )
            #self.is_palm =self.detection.is_item_detected_in_image('data/palm.xml', image )

            #check which hand gesture detected

            #Fist hand gesture
            if self.is_fist:
                self.is_fist = False
                print("Fist detected, See if it moved")
                self.x_axis = self.detection.x_axis
                self.y_axis = self.detection.y_axis
                self._keepCenter(self.x_axis, self.y_axis)

            #Phand gesture
            if self.is_phand:
                self.is_phand = False
                print("Phand detected, See if it moved")
                self.x_axis = self.detection.x_axis
                self._moveFocus(self.x_axis)
            #Vhand gesture
            if self.is_vhand:
                self.is_vhand = False
                print("Vhand detected, See if it moved")
                self.x_axis = self.detection.x_axis
                self._changeLight(self.x_axis)
            #Palm gesture
            if self.is_palm:
                self.is_palm = False
                print("Palm detected, See if it moved")
                self.x_axis = self.detection.x_axis
                self._powerLight(self)
            #Escape from program
            if cv2.waitKey(1) == 27:
                self._shut_down()
                break
        return

    def _powerButton(self):
        #turn on/off L.E.D

        return

    def _keepCenter(self, x_axis, y_axis):
        flag = True
        x = 25
        while x != 0:
            # get image from webcam
            image = self.webcam.read()

            #different classifier for different modes
            self.is_fist = self.detection.is_item_detected_in_image(
                'data/face.xml', image)

            #check which hand gesture detected

            if self.is_fist:
                x = 25
                self.is_fist = False
                #Get new position x and y positions
                self.xprime_axis = self.detection.x_axis
                self.yprime_axis = self.detection.y_axis

                #If the new position is different from the intial position take the absoute value to find the difference
                if ((self.xprime_axis != self.x_axis
                     or self.yprime_axis != self.y_axis) and (flag == True)):
                    dx = self.xprime_axis - self.x_axis
                    dx = abs(dx)

                    dy = self.yprime_axis - self.y_axis
                    dy = abs(dy)

                    print("x_axis: ", self.x_axis, " y_axis: ", self.y_axis)
                    print("Face MOVED xprime_axis: ", self.xprime_axis,
                          "FACE moved yprime_axis: ", self.yprime_axis)
                    print("dx: ", dx, "dy: ", dy)

                    #if above threshold for movement
                    if (dx >= 20 or dy >= 20):

                        #Hand Ready to be Tracked
                        flag = False
                #If the new position is not equal to the center of the screen continue
                if ((self.xprime_axis != self.x_center
                     or self.yprime_axis != self.y_center)
                        and (flag == False)):

                    print("Face centering Going on")

                    # Calculate how far away from the center
                    dx = self.xprime_axis - self.x_center
                    dx = abs(dx)

                    dy = self.yprime_axis - self.y_center
                    dy = abs(dy)

                    print("Face MOVED CENTERX_axis: ", self.x_center,
                          "FACE moved CENTERY_axis: ", self.y_center)
                    print("Face MOVED xprime_axis: ", self.xprime_axis,
                          "FACE moved yprime_axis: ", self.yprime_axis)
                    #if above threshold for movement
                    if (dx >= 20 or dy >= 20):
                        print("Movement of Motors")
                        self._moveMotors(self.xprime_axis, self.yprime_axis,
                                         dx, dy)

            else:
                print("No Gesture Detected")
                x = x - 1

            if cv2.waitKey(1) == 27:
                self._shut_down()
                break

        print("___________****TIME OUT*****__________")
        self._start_up()
        return

    def _moveFocus(self, x_axis):
        flag = True
        x = 25
        while x != 0:
            # get image from webcam
            image = self.webcam.read()

            #different classifier for different modes
            self.is_phand = self.detection.is_item_detected_in_image(
                'data/fist.xml', image)

            #check which hand gesture detected

            if self.is_phand:
                x = 25
                self.is_phand = False
                #Get the new x position
                self.xprime_axis = self.detection.x_axis
                #If the new x position is different from the intial position take the absoute value to find the difference
                if ((self.xprime_axis != self.x_axis) and (flag == True)):
                    dx = self.xprime_axis - self.x_axis
                    dx = abs(dx)

                    print("x_axis: ", self.x_axis)
                    print("Phand MOVED xprime_axis: ", self.xprime_axis)
                    print("dx: ", dx)

                    #if above threshold for movement
                    if (dx >= 20):

                        #Hand Ready to be Tracked
                        flag = False

                #If the new position is not equal to the center of the screen continue
                if ((self.xprime_axis != self.x_center) and (flag == False)):

                    print("Phand centering Going on")

                    # Calculate how far away from the center
                    dx = self.xprime_axis - self.x_center
                    dx = abs(dx)

                    print("Phand MOVED CENTERX_axis: ", self.x_center)
                    print("Phand MOVED xprime_axis: ", self.xprime_axis)
                    #if above threshold for movement
                    if (dx >= 20):
                        print("Adjust Focus")
                        self._moveMotors(self.xprime_axis, -1, dx, -1)

            else:
                print("No Gesture Detected")
                x = x - 1

            if cv2.waitKey(1) == 27:
                self._shut_down()
                break

        print("___________****TIME OUT*****__________")
        self._start_up()
        return

    def _changeLight(self, x_axis):
        x = 25
        while x != 0:
            # get image from webcam
            image = self.webcam.read()

            #different classifier for different modes
            self.is_vhand = self.detection.is_item_detected_in_image(
                'data/fist.xml', image)

            #check which hand gesture detected

            if self.is_vhand:
                x = 25
                self.is_vhand = False
                #Get new x position
                self.xprime_axis = self.detection.x_axis
                #If the new position is different from the intial position take the absoute value to find the difference
                if ((self.xprime_axis != self.x_axis) and (flag == True)):
                    dx = self.xprime_axis - self.x_axis
                    dx = abs(dx)

                    print("x_axis: ", self.x_axis)
                    print("Vhand MOVED xprime_axis: ", self.xprime_axis)
                    print("dx: ", dx)

                    #if above threshold for movement
                    if (dx >= 20):

                        #Hand Ready to be Tracked
                        flag = False

                #If the new position is not equal to the center of the screen continue
                if ((self.xprime_axis != self.x_center) and (flag == False)):

                    print("Vhand centering Going on")

                    #Calculate how far away from the center
                    dx = self.xprime_axis - self.x_center
                    dx = abs(dx)

                    print("Vhand MOVED CENTERX_axis: ", self.x_center)
                    print("Vhand MOVED xprime_axis: ", self.xprime_axis)
                    #if above threshold for movement
                    if (dx >= 20):
                        print("Adjust Light Intensity")
                        self._moveMotors(self.xprime_axis, -1, dx, -1)

            else:
                print("No Gesture Detected")
                x = x - 1

            if cv2.waitKey(1) == 27:
                self._shut_down()
                break

        print("___________****TIME OUT*****__________")
        self._start_up()
        return

    def _powerLight(self):
        print(" LIGHTS ON/OFF ")

        return

    def _moveMotors(xpos, ypos, dx, dy):
        xcounter = 0
        ycounter = 0
        #If the new position is to the left of the center
        if (xpos < x_center):
            #Increase the motor
            print("")
            if (xcounter < dx):
                #MOTOR INCREASE FUNCTION
                print("")
                increase()
                xcounter = xcounter + 1

        #If the new position is to the right of the center
        elif (xpos > x_center):
            #Decrease the motor
            print("")
            if (xcounter < dx):
                #MOTOR DECREASE FUNCTION
                print("")
                decrease()
                xcounter = xcounter + 1

        #If the new position is above the center
        if ((ypos < y_center) and (ypos != -1)):
            #Increase the MOTOR
            if (ycounter < dy):
                print("")
                #MOTOR INCREASE FUNCTION
                increase()
                ycounter = ycounter + 1

        #If the new position is below the centering
        elif ((ypos > y_center) and (ypos != -1)):
            print("")
            #Decrease the Motor
            if (ycounter < dy):
                #MOTOR DECREASE FUNCTION
                print("")
                decrease()
                ycounter = ycounter + 1

        return

    #stops webcam and return camera
    def _shut_down(self):
        self.webcam.stop()
        self.webcam.stream.release()

    def main(self):
        # setup and run OpenGL
        return
示例#12
0
class HandTracker:

    def __init__(self):
        self.webcam = WebcamVideoStream()
        self.webcam.start()
        self.detection = Detection()
        #self.servoYaw = Servo(205,409,0)
        #self.servoPitch = Servo(205,409,4)
        #self.servoYaw.neutralPos()
        #self.servoPitch.neutralPos()

        self.system = System()
        self.system.servoSetAllNeutral()
        #lights instances

        #hand gesture status
        self.is_okay  = False
        self.is_vhand = False
        self.is_phand = False
        self.is_palm  = False
        self.is_fist  = False

        #get location of hand when tracking
        self.x_axis = 0.0
        self.y_axis = 0.0

        #get tracker location
        self.yprime_axis = 0
        self.xprime_axis = 0

        #get middle of frame
        self.x_center = (self.webcam.camera().get(cv2.CAP_PROP_FRAME_WIDTH))/2;
        self.y_center = (self.webcam.camera().get(cv2.CAP_PROP_FRAME_HEIGHT))/2;


    def _start_up(self):

        while  self.is_okay == False:
        # get image from webcam
            image = self.webcam.read()
            print("WAITING FOR OK SIGN")

        # look for the OK sign to start up
            self.is_okay = self.detection.is_item_detected_in_image('data/ok_cascade_48x30.xml', image.copy(), 4 )

            if self.is_okay:
             # recognized OK Sign
                print("OK GESTURE Detected ")
                self.system.turnOnGreen()
                #self._delay(700)
                #self.system.turnOffGreen()
                self.is_okay = False
            # move to modes stage
                self._modes()
            else:
                self.system.blinkRed()

            if cv2.waitKey(1) == 27 :
                self._shut_down()
                break


    def _modes (self):
        self.system.turnOffGreen()
        # Look to recognize a gesture
        while True:
        # get image from webcam
            image = self.webcam.read()

        #different classifier for different modes
            self.is_phand = self.detection.is_item_detected_in_image('data/phand_cascade.xml', image, 30 )
#phand_cascade
            self.is_fist =self.detection.is_item_detected_in_image('data/fist.xml', image, 4 )
#fist.xml
            self.is_vhand =self.detection.is_item_detected_in_image('data/vhand_cascade.xml', image,4 )
#vhand_cascade
            self.is_palm =self.detection.is_item_detected_in_image('data/goodpalm.xml', image ,20)
#goodpalm

        #check which hand gesture detected

            #Fist hand gesture
            if self.is_fist:
               self.system.blinkGreen()
               self.is_fist = False
               print("Fist detected, See if it moved" )
               self.x_axis= self.detection.x_axis
               self.y_axis=self.detection.y_axis
               self._keepCenter(self.x_axis,self.y_axis)
            #Phand gesture
            if self.is_phand:
               self.system.blinkGreen()
               self.is_phand = False
               print("Phand detected, See if it moved" )
               self.x_axis= self.detection.x_axis
               self._moveFocus(self.x_axis)
            #Vhand gesture
            if self.is_vhand:
               self.system.blinkGreen()
               self.is_vhand = False
               print("Vhand detected, See if it moved" )
               self.x_axis= self.detection.x_axis
               self._powerLight()
            #Palm gesture
            if self.is_palm:
               self.system.blinkGreen()
               self.is_palm = False
               print("Palm detected, See if it moved" )
               self.x_axis= self.detection.x_axis
               self._changeLight(self.x_axis)
            #Escape from program
            if cv2.waitKey(1) == 27 :
                self._modes()
                break
        return

    def _powerButton(self):
        #turn on/off L.E.D

        return

    def _keepCenter(self , x_axis ,y_axis):
        self.system.turnOnLaser()
        flag = True
        x = 50
        while x != 0:
        # get image from webcam
            image = self.webcam.read()

        #different classifier for different modes
            self.is_fist = self.detection.is_item_detected_in_image('data/fist.xml', image, 5)

        #check which hand gesture detected

            if self.is_fist:
                self.system.blinkGreen()
                x = 50
                self.is_fist = False
                #Get new position x and y positions
                self.xprime_axis= self.detection.x_axis
                self.yprime_axis=self.detection.y_axis

                #If the new position is different from the intial position take the absoute value to find the difference
                if ((self.xprime_axis != self.x_axis  or  self.yprime_axis != self.y_axis) and (flag == True)):
                            dx = self.xprime_axis - self.x_axis
                            dx = abs(dx)

                            dy = self.yprime_axis - self.y_axis
                            dy = abs(dy)

                            print("x_axis: " , self.x_axis , " y_axis: " , self.y_axis)
                            print("Fist MOVED xprime_axis: " , self.xprime_axis , "Fist moved yprime_axis: " , self.yprime_axis)
                            print("dx: ", dx , "dy: ", dy)

                            #if above threshold for movement
                            if( dx >= 15 or dy >= 15 ):

                                #Hand Ready to be Tracked
                                flag = False
                #If the new position is not equal to the center of the screen continue
                if ((self.xprime_axis != self.x_center  or  self.yprime_axis != self.y_center) and (flag == False)):

                            print("Fist centering Going on")

                            # Calculate how far away from the center
                            dx = self.xprime_axis - self.x_center
                            dx = abs(dx)

                            dy = self.yprime_axis - self.y_center
                            dy = abs(dy)

                            print("Fist MOVED CENTERX_axis: " , self.x_center , "Fist moved CENTERY_axis: " , self.y_center)
                            print("Fist MOVED xprime_axis: " , self.xprime_axis , "Fist moved yprime_axis: " , self.yprime_axis)
                            #if above threshold for movement
                            if( dx >= 15 or dy >= 15 ):
                                print("Movement of Motors")
                                self._moveMotors(self.xprime_axis,self.yprime_axis,dx,dy)

            else:
                print("No Gesture Detected")
                self.system.blinkRed()
                x = x-1


            if cv2.waitKey(1) == 27 :
                self._shut_down()
                break

        print("___________****TIME OUT*****__________")
        self.system.blinkRed()
        self.system.blinkRed()
        self.system.turnOffLaser()
        self._start_up()
        return

    def _moveFocus(self , x_axis):
        flag = True
        x = 50
        while x != 0:
        # get image from webcam
            image = self.webcam.read()

        #different classifier for different modes
            self.is_phand = self.detection.is_item_detected_in_image('data/phand_cascade.xml', image, 5 )

        #check which hand gesture detected

            if self.is_phand:
                x = 50
                self.is_phand = False
                #Get the new x position
                self.xprime_axis= self.detection.x_axis
                #If the new x position is different from the intial position take the absoute value to find the difference
                if ((self.xprime_axis != self.x_axis) and (flag == True)):
                            dx = self.xprime_axis - self.x_axis
                            dx = abs(dx)

                            print("x_axis: " , self.x_axis)
                            print("Phand MOVED xprime_axis: " , self.xprime_axis)
                            print("dx: ", dx)

                            #if above threshold for movement
                            if( dx >= 15):

                                #Hand Ready to be Tracked
                                flag = False

                #If the new position is not equal to the center of the screen continue
                if ((self.xprime_axis != self.x_center) and (flag == False)):

                            print("Phand centering Going on")

                            # Calculate how far away from the center
                            dx = self.xprime_axis - self.x_center
                            dx = abs(dx)

                            print("Phand MOVED CENTERX_axis: " , self.x_center)
                            print("Phand MOVED xprime_axis: " , self.xprime_axis)
                            #if above threshold for movement
                            if( dx >= 15):
                                print("Adjust Focus")
                                self._moveSpotSize(self.xprime_axis,dx)

            else:
                print("No Gesture Detected")
                x = x-1


            if cv2.waitKey(1) == 27 :
                self._shut_down()
                break

        print("___________****TIME OUT*****__________")
        self.system.blinkRed()
        self._start_up()
        return

    def _changeLight(self , x_axis):
        flag = True
        x = 50
        while x != 0:
        # get image from webcam
            image = self.webcam.read()

        #different classifier for different modes
            self.is_palm  = self.detection.is_item_detected_in_image('data/goodpalm.xml', image, 12 )

        #check which hand gesture detected

            if self.is_palm:
                self.system.blinkGreen()
                x = 50
                self.is_vhand = False
                #Get new x position
                self.xprime_axis= self.detection.x_axis
                #If the new position is different from the intial position take the absoute value to find the difference
                if ((self.xprime_axis != self.x_axis) and (flag == True)):
                            dx = self.xprime_axis - self.x_axis
                            dx = abs(dx)

                            print("x_axis: " , self.x_axis)
                            print("Palm MOVED xprime_axis: " , self.xprime_axis)
                            print("dx: ", dx)

                            #if above threshold for movement
                            if( dx >= 15):

                                #Hand Ready to be Tracked
                                flag = False

                #If the new position is not equal to the center of the screen continue
                if ((self.xprime_axis != self.x_center) and (flag == False)):

                            print("PALM centering Going on")

                            #Calculate how far away from the center
                            dx = self.xprime_axis - self.x_center
                            dx = abs(dx)

                            print("PALM MOVED CENTERX_axis: " , self.x_center)
                            print("Palm MOVED xprime_axis: " , self.xprime_axis)
                            #if above threshold for movement
                            if( dx >= 15):
                                print("Adjust Light Intensity")
                                self._changeBrightness(self.xprime_axis, dx)

            else:
                print("No Gesture Detected")
                x = x-1


            if cv2.waitKey(1) == 27 :
                self._shut_down()
                break

        print("___________****TIME OUT*****__________")
        self.system.blinkRed()
        self._start_up()
        return

    def _powerLight(self):

        if(self.system.lampLight.curpos != 0):
	        print(" LIGHT OFF")
	        self.system.lampLight.setMin()
        else:
                print(" LIGHT ON")
                self.system.lampLight.setMax()

        return

    def _moveMotors(self,xpos,ypos, dx, dy):
        xcounter = 0
        ycounter = 0

        #If the new position is to the left of the center
        if(xpos < self.x_center):
            #Increase the motor
            #print("")
            if( xcounter < dx):
                #MOTOR INCREASE FUNCTION
              #  print("")
                self.system.servoYaw.increaseRate(5)
                #time.sleep(0.005)
                xcounter = xcounter+1

        #If the new position is to the right of the center
        elif( xpos > self.x_center):
            #Decrease the motor
            #print("")
            if( xcounter < dx):
                #MOTOR DECREASE FUNCTION
                #print("")
                self.system.servoYaw.decreaseRate(5)
                #time.sleep(0.005)
                xcounter = xcounter+1

        #If the new position is above the center
        if((ypos < self.y_center) and (ypos != -1)):
            #Increase the MOTOR
            if( ycounter < dy):
                #print("")
                #MOTOR INCREASE FUNCTION
                self.system.servoPitch.increaseRate(5)
                #time.sleep(0.005)
                ycounter = ycounter+1

        #If the new position is below the centering
        elif((ypos > self.y_center) and (ypos != -1)):
            #print("")
            #Decrease the Motor
            if( ycounter < dy):
                #MOTOR DECREASE FUNCTION
                #print("")
                self.system.servoPitch.decreaseRate(5)
                #time.sleep(0.005)
                ycounter = ycounter+1


        return

    def _moveSpotSize(self,xpos, dx):
        xcounter = 0
        #If the new position is to the left of the center
        if(xpos < self.x_center):
        #Increase the motor
        #print("")
            if( xcounter < dx):
            #MOTOR INCREASE FUNCTION
            #  print("")
                self.system.lampSpot.decreaseRate(5)
                xcounter = xcounter+1

        #If the new position is to the right of the center
        elif( xpos > self.x_center):
            #Decrease the motor
            #print("")
            if( xcounter < dx):
            #MOTOR DECREASE FUNCTION
            #print("")
                self.system.lampSpot.increaseRate(5)
                xcounter = xcounter+1


        return

    def _changeBrightness(self,xpos, dx):
        xcounter = 0
        #If the new position is to the left of the center
        if(xpos < self.x_center):
        #Increase the motor
        #print("")
            if( xcounter < dx):
            #MOTOR INCREASE FUNCTION
                print("Change Brightness: INCREASE")
                self.system.lampLight.increaseRate(150)
                print("CURRENT POS: " + str(self.system.lampLight.curpos))
                xcounter = xcounter+1

        #If the new position is to the right of the center
        elif( xpos > self.x_center):
            #Decrease the motor
            #print("")
            if( xcounter < dx):
            #MOTOR DECREASE FUNCTION
                print("Change Brighness: DECREASE")
                self.system.lampLight.decreaseRate(150)
                print("CURRENT POS DECREASE: " + str(self.system.lampLight.curpos))
                xcounter = xcounter+1


        return

    def _delay(self, count):
         while(count > 0):
             count-=1
         return

    #stops webcam and return camera
    def _shut_down (self):
        self.webcam.stop()
        self.webcam.stream.release()

    def main(self):
        # setup and run OpenGL
        return
示例#13
0
from webcam import WebcamVideoStream, FPS
#import argparse
import cv2
import numpy as np
from hough import houghOverlay
import time

# loading the Overlay
hw = houghOverlay('hwvhough2.bit')

# Some parameters to perform the simulation
camera = 0
iterations = 100

print("[INFO] sampling THREADED frames from webcam")
vs = WebcamVideoStream(hw.frame, src=camera).start()

# Hardware
print('Hardware execution...')
fps = FPS().start()

while fps._numFrames < iterations:
    #hw.frame[:] = vs.read()
    [lines, segments] = hw.HoughLines(20, 30, 80, 5, 30)
    #time.sleep(6e-3)
    fps.update()

fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
示例#14
0
        help="Dim. da caixa do filtro gaussiano, insira valores impares")
    ap.add_argument("-t",
                    "--threshold",
                    type=int,
                    default=160,
                    help="Treshold da binarização")
    ap.add_argument(
        "-l",
        "--limite",
        type=int,
        default=50,
        help="Porcentagem de branco minima encontrada para ativar a saída")
    args = vars(ap.parse_args())

    #Classe para ler os frames da camera/video com multithread
    camera = WebcamVideoStream(src=args['source']).start()
    #Classe para contar o FPS do código
    fps = FPS().start()

    while True:

        #Le o frame disponivel da camera através da classe WebcamVideoStream
        frame = camera.read()
        #        frame = imutils.resize(frame, width=680)

        #Converte para escala de cinza, aplica um filtro gaussiano e binariza
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (args['gaussian'], args['gaussian']), 0)
        T, bin = cv2.threshold(blur, args['threshold'], 255, cv2.THRESH_BINARY)

        #Soma a quantidade de pontos brancos na imagem
    # parser.add_argument('--nhands', dest='num_hands', type=int, default=2, help='Max number of hands to detect')
    # parser.add_argument('--num_workers', dest='num_workers', type=int, default=4, help='Number of workers for multiprocessing')
    # parser.add_argument('--queue-size', dest='queue_size', type=int, default=5, help='Size of the multiprocessing queue')
    # parser.add_argument('--net_type', dest='net_type', type=str, default='mb1-ssd', help='SSD network type',
    # choices=["mb1-ssd", "mb2-ssd-lite"])
    # parser.add_argument('--model_path', dest='model_path', type=str, help="Trained model path", required=True)
    parser.add_argument('--display',
                        dest='display',
                        type=bool,
                        default=False,
                        help="Display boxes")

    args = parser.parse_args()

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    cap_params = {}
    frame_processed = 0
    cap_params['im_width'], cap_params['im_height'] = video_capture.size()
    cap_params['threshold'] = threshold

    print(cap_params, args)

    sender = ImageSender(connect_to='tcp://192.168.1.86:5555')

    start_time = datetime.datetime.now()
    num_frames = 0
    fps = 0
    index = 0
def test_cam(args):
    """Function to predict for a camera image stream
    """

    ctypes.CDLL("../TRT_object_detection/lib/libflattenconcat.so")
    COCO_LABELS = coco.COCO_CLASSES_LIST

    # initialize
    TRT_LOGGER = trt.Logger(trt.Logger.INFO)
    trt.init_libnvinfer_plugins(TRT_LOGGER, '')
    runtime = trt.Runtime(TRT_LOGGER)

    # compile model into TensorRT
    if not os.path.isfile(model.TRTbin):
        dynamic_graph = model.add_plugin(gs.DynamicGraph(model.path))
        uff_model = uff.from_tensorflow(dynamic_graph.as_graph_def(),
                                        model.output_name,
                                        output_filename='tmp.uff')

        with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
        ) as network, trt.UffParser() as parser:
            builder.max_workspace_size = 1 << 28
            builder.max_batch_size = 1
            builder.fp16_mode = True

            parser.register_input('Input', model.dims)
            parser.register_output('MarkOutput_0')
            parser.parse('tmp.uff', network)
            engine = builder.build_cuda_engine(network)

            buf = engine.serialize()
            with open(model.TRTbin, 'wb') as f:
                f.write(buf)

    # create engine
    with open(model.TRTbin, 'rb') as f:
        buf = f.read()
        engine = runtime.deserialize_cuda_engine(buf)

    # create buffer
    host_inputs = []
    cuda_inputs = []
    host_outputs = []
    cuda_outputs = []
    bindings = []
    stream = cuda.Stream()

    for binding in engine:
        size = trt.volume(
            engine.get_binding_shape(binding)) * engine.max_batch_size
        host_mem = cuda.pagelocked_empty(size, np.float32)
        cuda_mem = cuda.mem_alloc(host_mem.nbytes)

        bindings.append(int(cuda_mem))
        if engine.binding_is_input(binding):
            host_inputs.append(host_mem)
            cuda_inputs.append(cuda_mem)
        else:
            host_outputs.append(host_mem)
            cuda_outputs.append(cuda_mem)
    context = engine.create_execution_context()

    image_queue = LifoQueue()
    depth_result_queue = LifoQueue()
    #object_result_queue = LifoQueue()
    cuda_lock = Lock()

    # Initialize and start threads for object detection and depth inference
    #object_detection_thread = ObstacleDetectionThread(image_queue, object_result_queue)
    depth_inference_thread = DepthInferenceThread(image_queue,
                                                  depth_result_queue,
                                                  cuda_lock, args)

    # Initialize camera to capture image stream
    # Change the value to 0 when using default camera
    video_stream = WebcamVideoStream(src=args.webcam).start()

    if not args.no_display:
        print("Trying to initinalize DisplayImage()")
        # Object to display images
        image_display = DisplayImage(not args.no_process)
        print("Finished initializing DisplayImage()")
    # Flag that records when 'q' is pressed to break out of inference loop below
    quit_inference = False

    def on_release(key):
        if key == keyboard.KeyCode.from_char('q'):
            nonlocal quit_inference
            quit_inference = True
            return False

    keyboard.Listener(on_release=on_release).start()
    print("Finished starting keyboard listener")
    #object_detection_thread.start()
    depth_inference_thread.start()
    print("Started depth_inference_thread")

    #finished = True
    disp_resized = None
    danger_level = None
    original_width = 640
    original_height = 480

    # Number of frames to capture to calculate fps
    num_frames = 5
    curr_time = np.zeros(num_frames)
    with torch.no_grad():
        print("Starting inference loop")
        while True:
            if quit_inference:
                if args.no_display:
                    print('-> Done')
                break

            # Capture and send frame to obstacle detection and depth inference thread to be process
            frame = video_stream.read()
            copy_frame = frame

            # Capture and send frame to obstacle detection and depth inference thread to be process
            #if finished:
            print("Sent image to depth thread")
            image_queue.put(copy_frame)
            #    finished = False
            #else:
            #    print("Still doing last frame")

            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image = cv2.resize(image, (model.dims[2], model.dims[1]))
            image = (2.0 / 255.0) * image - 1.0
            image = image.transpose((2, 0, 1))
            np.copyto(host_inputs[0], image.ravel())

            start_time = time.time()
            print("Right before copying inputs, acquiring lock")
            try:
                cuda_lock.acquire()
                print("Object acquired lock")
                cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
                print("Right before execute")
                context.execute_async(bindings=bindings,
                                      stream_handle=stream.handle)
                print("Finished execute")
                cuda.memcpy_dtoh_async(host_outputs[1], cuda_outputs[1],
                                       stream)
                print("Finished copying outputs")
                cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0],
                                       stream)
                print("Finished copying outputs 2")
                stream.synchronize()
                print("Synchronized stream")
                cuda_lock.release()
                print("Object released lock")
                print("execute times " + str(time.time() - start_time))
            except:
                print("Object couldn't acquire lock, skipping")
                continue

            output = host_outputs[0]
            height, width, channels = frame.shape
            for i in range(int(len(output) / model.layout)):
                prefix = i * model.layout
                index = int(output[prefix + 0])
                label = int(output[prefix + 1])
                conf = output[prefix + 2]
                xmin = int(output[prefix + 3] * width)
                ymin = int(output[prefix + 4] * height)
                xmax = int(output[prefix + 5] * width)
                ymax = int(output[prefix + 6] * height)

                if conf > 0.7:
                    print("Detected {} with confidence {}".format(
                        COCO_LABELS[label], "{0:.0%}".format(conf)))
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (0, 0, 255), 3)
                    cv2.putText(frame, COCO_LABELS[label],
                                (xmin + 10, ymin + 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                                2, cv2.LINE_AA)

            # Calculate the fps
            curr_time[1:] = curr_time[:-1]
            curr_time[0] = time.time()
            fps = num_frames / (curr_time[0] - curr_time[len(curr_time) - 1])
            print("Requesting depth thread to send data back")
            # Receive results from threads
            #frame = None
            print("Requesting obstacle thread to send data back")
            #detections, frame = object_result_queue.get()
            try:
                disp_resized, danger_level = depth_result_queue.get()
                #finished = True
            except:
                print("Didn't get frame from depth thread -- still working")

            #print(f"Detections: {detections}")
            print(danger_level)
            original_width = 640
            original_height = 480
            if not args.no_display and disp_resized is not None:
                print("About to use image_display")
                # DISPLAY
                # Generate color-mapped depth image
                image_display.display(frame,
                                      disp_resized,
                                      fps,
                                      original_width,
                                      original_height,
                                      blended=not args.no_blend)
            #if frame is not None:
            #cv2.imshow("Object detection", frame)
            #else:
            #    continue
            #cv2.waitKey(1)
            else:
                print(f"FPS: {fps}")

    # When everything is done, stop camera stream
    video_stream.stop()

    depth_inference_thread.join()