Example #1
0
def new_execute():
    global recst
    global recbtn
    GPIO.setmode(GPIO.BOARD)
    GPIO.setup(recbtn, GPIO.IN)
    GPIO.add_event_detect(recbtn,
                          GPIO.FALLING,
                          callback=btn_th,
                          bouncetime=200)
    phpath = "./data/rec/"
    os.makedirs(phpath, exist_ok=True)

    # Left Camera
    camera0 = nano.Camera(device_id=0, flip=2, width=224, height=224, fps=60)
    # Right Camera
    camera1 = nano.Camera(device_id=1, flip=2, width=224, height=224, fps=60)
    # wait rec button
    print("REC wait ...")
    waitrec()
    # rec start
    print("REC START! ...")
    #    recloop_old(camera0, "./data/apex/", 0.1)
    #    recloopDual_old(camera0,camera1, phpathL,phpathR, 0.1)
    #    recloopStereo_old(camera0,camera1, phpathMx, phpathL,phpathR, 0.1)
    recloopDual(camera0, camera1, phpath, 0.05)
    # testloop()
    print("REC END!! ...")
    camera0.release()
    camera1.release()
    # finish
    del camera0
    GPIO.cleanup()
Example #2
0
def init_nanocam(imgwd, imghg, fpsc, angle):
    # Left Camera
    cameraL = nano.Camera(device_id=0,
                          flip=angle,
                          width=imgwd,
                          height=imghg,
                          fps=fpsc)
    # Right Camera
    cameraR = nano.Camera(device_id=1,
                          flip=angle,
                          width=imgwd,
                          height=imghg,
                          fps=fpsc)
    return cameraL, cameraR
Example #3
0
def doexecute():
    print("start")
    CATEGORIES = ['apex']

    device = torch.device('cuda')
    model = torchvision.models.resnet18(pretrained=False)
    model.fc = torch.nn.Linear(512, 2 * len(CATEGORIES))
    model = model.cuda().eval().half()
    # model.load_state_dict(torch.load('model.pth'))
    model.load_state_dict(torch.load('data/model.pth'))

    print("1")

    data = torch.zeros((1, 3, 224, 224)).cuda().half()

    model_trt = torch2trt(model, [data], fp16_mode=True)

    torch.save(model_trt.state_dict(), 'road_following_model_trt.pth')

    print("2")

    model_trt = TRTModule()
    model_trt.load_state_dict(torch.load('road_following_model_trt.pth'))
    print("model load end")

    car = NvidiaRacecar()

    # Left Camera
    camera0 = nano.Camera(device_id=0, flip=2, width=224, height=224, fps=60)
    # Right Camera
    camera1 = nano.Camera(device_id=1, flip=2, width=224, height=224, fps=60)

    STEERING_GAIN = 0.75
    STEERING_BIAS = 0.00

    cnt = 0
    while True:
        image = camera0.read()
        image = preprocess(image).half()
        output = model_trt(image).detach().cpu().numpy().flatten()
        x = float(output[0])
        car.steering = x * STEERING_GAIN + STEERING_BIAS
        car.throttle = 0.5
        print(str(cnt) + ":" + str(x) + ":")
        cnt = cnt + 1
Example #4
0
def prepare():
    # 走行Button
    GPIO.setmode(GPIO.BOARD)
    GPIO.setup(recbtn, GPIO.IN)
    GPIO.add_event_detect(gobtn,
                          GPIO.FALLING,
                          callback=btn_thrd,
                          bouncetime=200)

    # Left Camera
    camera0 = nano.Camera(device_id=0, flip=2, width=224, height=224, fps=60)
    # Right Camera
    camera1 = nano.Camera(device_id=1, flip=2, width=224, height=224, fps=60)

    CATEGORIES = ['apex']
    device = torch.device('cuda')
    model = torchvision.models.resnet18(pretrained=False)
    model.fc = torch.nn.Linear(512, 2 * len(CATEGORIES))
    model = model.cuda().eval().half()
    # model.load_state_dict(torch.load('model.pth'))
    model.load_state_dict(torch.load('data/model.pth'))

    data = torch.zeros((1, 3, 224, 224)).cuda().half()
    model_trt = torch2trt(model, [data], fp16_mode=True)
    torch.save(model_trt.state_dict(), 'road_following_model_trt.pth')

    model_trt = TRTModule()
    model_trt.load_state_dict(torch.load('road_following_model_trt.pth'))

    car = NvidiaRacecar()

    STEERING_GAIN = -0.75
    STEERING_BIAS = 0.00

    car.throttle = 0.25
    cnt = 0
    while True:
        image = camera0.read()
        image = preprocess(image).half()
        output = model_trt(image).detach().cpu().numpy().flatten()
        x = float(output[0])
        car.steering = x * STEERING_GAIN + STEERING_BIAS
        print(str(cnt) + ":" + str(x) + ":")
        cnt = cnt + 1
Example #5
0
 def __init__(self, settings, camSpecs):
     # Create the Camera instance
     self.camera = nano.Camera(flip=2, width=1280, height=720, fps=60)
     print('CSI Camera ready? - ', self.camera.isReady())
     # read the first camera image
     frame_read = self.camera.read()
     # Setup the variables we need, hopefully this function stays active
     self.yolo = YOLO()
     height, width = frame_read.shape[:2]
     self.yolo.init(width, height, time.time(), settings, camSpecs)
 def __init__(self, settings, camSpecs, unitTest):
     self.unitTest = unitTest
     if self.unitTest:
         self.cap = cv2.VideoCapture(settings.inputFilename)  # Local Stored video detection - Set input video
         ret, frame_read = self.cap.read()  # Capture frame and return true if frame present
     else:
         # Create the Camera instance
         self.camera = nano.Camera(flip=2, width=1280, height=720, fps=60)
         print('CSI Camera ready? - ', self.camera.isReady())
         # read the first camera image
         frame_read = self.camera.read()
     # Setup the variables we need, hopefully this function stays active
     self.yolo = YOLO()
     height, width = frame_read.shape[:2]
     self.yolo.init(width, height, time.time(), settings, camSpecs)
Example #7
0
def main(args=None):
    rclpy.init(args=args)
    node = JarvisCameraHandle()
    cap = nano.Camera(flip=2, width=node.width, height=node.height, fps=25)
    while rclpy.ok():
        try:
            if cap.isReady():
                node.frame = cap.read()
                node.has_frame = True
            rclpy.spin_once(node)
        except KeyboardInterrupt:
            cap.release()
            break
    cap.release()
    node.destroy_node()
    rclpy.shutdown()
Example #8
0
def cam():
    # Create the Camera instance
    camera = nano.Camera(device_id=0, flip=0, width=640, height=480, fps=30)
    print('CSI Camera ready? - ', camera.isReady())
    while camera.isReady():
        try:
            # read the camera image
            frame = camera.read()
            # display the frame
            cv2.imshow("Video Frame", frame)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
        except KeyboardInterrupt:
            break

    # close the camera instance
    camera.release()

    # remove camera object
    del camera
Example #9
0
def cam(): 
    # Create the Camera instance
    camera = nano.Camera(device_id=0, flip=0, width=640, height=480, fps=30)
    print('CSI Camera ready? - ', camera.isReady())
    while camera.isReady():
            # read the camera image
            frame = camera.read()
            canny_image = canny(frame)
            cropped_image = region_of_interest(canny_image)
            lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength= 125, maxLineGap=10)
            line_image = display_lines(frame, lines)
            combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
            cv2.imshow("results", cropped_image)
            # display the frame
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

    # close the camera instance
    camera.release()

    # remove camera object
    del camera   
Example #10
0
def run_camera():
    # Create the Camera instance
    camera = nano.Camera(camera_type=1,
                         device_id=0,
                         width=640,
                         height=480,
                         fps=30)
    print('USB Camera ready? - ', camera.isReady())
    while camera.isReady():
        try:
            # read the camera image
            frame = camera.read()
            #detect the person now
            global isPersonDetected
            global lpd_deadline
            global last_detected

            if (detectPerson(frame)):
                time_now = time.time()
                if (time_now - last_detected > lpd_deadline):
                    last_detected = time_now
                    isPersonDetected = True
                else:
                    isPersonDetected = False

            # display the frame
            cv2.imshow("Video Frame", frame)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

            # Definetly want to eventually change this after testing
            # Should be running on its own thread
            # if isPersonDetected:
            #     break
        except KeyboardInterrupt:
            break

    # close the camera instance
    camera.release()
Example #11
0
def run_camera():
    global isPersonDetected
    global lpd_deadline
    global last_detected
    global real_time_detect_person

    # Create the Camera instance
    camera = nano.Camera(camera_type=1,
                         device_id=0,
                         width=640,
                         height=480,
                         fps=30)
    print('USB Camera ready? - ', camera.isReady())
    while camera.isReady():
        try:
            # read the camera image
            frame = camera.read()

            # detect the person now
            if (detectPerson(frame)):
                time_now = time.time()
                real_time_detect_person = True
                if (time_now - last_detected > lpd_deadline):
                    last_detected = time_now
                    isPersonDetected = True
                else:
                    isPersonDetected = False
            else:
                real_time_detect_person = False
            # display the frame
            cv2.imshow("Video Frame", frame)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        except KeyboardInterrupt:
            break

    # close the camera instance
    camera.release()
def capture_image():
    # grab global references to the output frame and lock variables
    global outputFrame, lock
    # Create the Camera instance
    camera = nano.Camera(flip=2, width=1280, height=720, fps=30)
    print('CSI Camera ready? - ', camera.isReady())
    # Now that the init is done, lets get our IP and print to terminal so we know where to connect
    myip = [
        l for l in ([
            ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
            if not ip.startswith("127.")
        ][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())
                 for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
                 ][0][1]]) if l
    ][0][0]
    print("Please connect to http://" + str(myip) +
          ":5000 to see the video feed.")
    while camera.isReady():
        # read the camera image
        frame_read = camera.read()
        # Setup the variables we need, hopefully this function stays active
        with lock:
            # get the lock and set the image to the current one
            outputFrame = frame_read
Example #13
0
def processImages(settings, camSpecs):
    firstIteration = True
    yolo = None

    # grab global references to the output frame and lock variables
    global outputFrame, lock

    if settings.useCamera:
        # Create the Camera instance
        camera = nano.Camera(flip=2, width=1280, height=720, fps=30)
        print('CSI Camera ready? - ', camera.isReady())
        while camera.isReady():
            try:
                # read the camera image
                frame_read = camera.read()
                if firstIteration:
                    # Setup the variables we need, hopefully this function stays active
                    yolo = YOLO()
                    height, width = frame_read.shape[:2]
                    yolo.init(width, height, time.time(), settings, camSpecs)
                    firstIteration = False

                    # Now that the init is done, lets get our IP and print to terminal so we know where to connect
                    myip = [
                        l for l in ([
                            ip for ip in socket.gethostbyname_ex(
                                socket.gethostname())[2]
                            if not ip.startswith("127.")
                        ][:1], [[(s.connect(('8.8.8.8', 53)),
                                  s.getsockname()[0], s.close()) for s in [
                                      socket.socket(socket.AF_INET,
                                                    socket.SOCK_DGRAM)
                                  ]][0][1]]) if l
                    ][0][0]
                    print("Please connect to http://" + str(myip) +
                          ":5000 to see the video feed.")
                else:
                    image, timestamp, coordinates = yolo.readFrame(
                        frame_read, time.time())
                    with lock:
                        if image is not None:
                            outputFrame = image.copy()
                        if coordinates is not None:
                            outputCoordinates = coordinates
                    # Sleep just a bit so control-c works
                    time.sleep(.001)
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break
            except KeyboardInterrupt:
                break

        # close the camera instance
        camera.release()

        # remove camera object
        del camera
    else:
        #os.chdir("A:\\Users\edwar\Downloads")
        print("Opening captured video file ", settings.inputFilename)
        cap = cv2.VideoCapture(
            settings.inputFilename
        )  # Local Stored video detection - Set input video

        currentTime = 0.0
        fps = 10.0

        while True:  # Load the input frame and write output frame.
            ret, frame_read = cap.read(
            )  # Capture frame and return true if frame present
            # For Assertion Failed Error in OpenCV
            if not ret:  # Check if frame present otherwise he break the while loop
                cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
            else:
                height, width, layers = frame_read.shape
                new_h = int(height)
                new_w = int(width)
                frame_read = cv2.resize(frame_read, (new_w, new_h))
                # We do some special setup if this is the first frame, otherwise just send the image
                if firstIteration:
                    # Setup the variables we need, hopefully this function stays active
                    yolo = YOLO()
                    height, width = frame_read.shape[:2]
                    yolo.init(width, height, time.time(), settings, camSpecs)
                    firstIteration = False

                    # Now that the init is done, lets get our IP and print to terminal so we know where to connect
                    myip = [
                        l for l in ([
                            ip for ip in socket.gethostbyname_ex(
                                socket.gethostname())[2]
                            if not ip.startswith("127.")
                        ][:1], [[(s.connect(('8.8.8.8', 53)),
                                  s.getsockname()[0], s.close()) for s in [
                                      socket.socket(socket.AF_INET,
                                                    socket.SOCK_DGRAM)
                                  ]][0][1]]) if l
                    ][0][0]
                    print("Please connect to http://" + str(myip) +
                          ":5000 to see the video feed.")
                else:
                    image, timestamp, coordinates = yolo.readFrame(
                        frame_read, time.time())
                    with lock:
                        if image is not None:
                            outputFrame = image.copy()
                        if coordinates is not None:
                            outputCoordinates = coordinates
                    currentTime += 1.0 / fps
                    # Sleep just a bit so control-c works
                    time.sleep(.001)
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break
Example #14
0
import cv2

# from nanocamera.NanoCam import Camera
import nanocamera as nano

if __name__ == '__main__':
    # requires the RTSP location. Something like this: rtsp://localhost:8888/stream
    # For RTSP camera, the camera_type=2.
    # This only supports H.264 codec for now

    # a location for the rtsp stream
    rtsp_location = "192.168.1.26:8554/stream"
    # Create the Camera instance
    camera = nano.Camera(camera_type=2,
                         source=rtsp_location,
                         width=640,
                         height=480,
                         fps=30)
    print('RTSP Camera is now ready')
    while True:
        try:
            # read the camera image
            frame = camera.read()
            # display the frame
            cv2.imshow("Video Frame", frame)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
        except KeyboardInterrupt:
            break

    # close the camera instance
Example #15
0
            cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
    return line_image    

def average_slope_intercept(frame, lines):
    rail = []
    for line in lines:
        x1, y1, x2, y2 = line.reshape(4)
        parameters = np.polyfit((x1,x2), (y1, y2), 1)
        slope = parameters[0]
        intercept = parameters[1]
        if slope > 2
            rail.append((Slope, intercept))

if __name__ == '__main__':
    # Create the Camera instance
    camera = nano.Camera(device_id=0, flip=0, width=640, height=480, fps=30)
    print('CSI Camera ready? - ', camera.isReady())
    while camera.isReady():
            # read the camera image
            frame = camera.read()
            canny_image = canny(frame)
            cropped_image = region_of_interest(canny_image)
            lines = cv2.HoughLinesP(cropped_image, 1, np.pi/180, 50, np.array([]), minLineLength=100, maxLineGap=15)
            line_image = display_lines(frame, lines)
            combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
            cv2.imshow("results",combo_image)
            # display the frame
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
            
Example #16
0
        # create the shape
        rect = Rectangle((x1, y1), width, height, fill=False, color='white')
        # draw the box
        ax.add_patch(rect)
        # draw text and score in top left corner
        label = "%s (%.3f)" % (v_labels[i], v_scores[i])
        pyplot.text(x1, y1, label, color='white')
    # show the plot
    pyplot.show()


# load yolov3 model
model = load_model('yolo.h5')

# Create the Camera instance
camera = nano.Camera(flip=0, width=640, height=480, fps=30)
print('CSI Camera is now ready')
while True:
    try:
        # read the camera image
        image = camera.read()
        # scale pixel values to [0, 1]
        image = image.astype('float32')
        image /= 255.0
        # add a dimension so that we have one sample
        image = expand_dims(image, 0)
        input_w, input_h = image.shape[1], image.shape[0]

        # make prediction
        yhat = model.predict(image)
        # summarize the shape of the list of arrays
Example #17
0
import nanocamera as nano

parser = argparse.ArgumentParser(description="Inference from web-cam")

parser.add_argument(
    "--resolution",
    type=int,
    nargs=2,
    metavar=("width", "height"),
    default=(1280 // 2, 720 // 2),
)
parser.add_argument("--quality", help="jpeg quality", type=int, default=90)
args = parser.parse_args()

# Accept connections on all tcp addresses, port 5555
sender = imagezmq.ImageSender(connect_to="tcp://*:5555", REQ_REP=False)

sender_name = socket.gethostname()  # send hostname with each image
width, height = args.resolution
jpeg_quality = args.quality  # 0 to 100, higher is better quality
cam = nano.Camera(flip=0, width=width, height=height, fps=30)

while True:  # send images until Ctrl-C
    image = cam.read()
    # sender.send_image(sender_name, image)
    ret_code, jpg_buffer = cv2.imencode(
        ".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
    jpg_buffer = jpg_buffer.tobytes()
    reply = sender.send_jpg(sender_name, jpg_buffer)
    cv.fillPoly(mask, polygon, 255)
    cropped_edges = cv.bitwise_and(edges, mask)
    return cropped_edges


# define a range of black color in HSV

lower_black = np.array([0, 0, 0])
upper_black = np.array([227, 100, 70])

# Rectangular Kernel
rectKernel = cv.getStructuringElement(cv.MORPH_RECT, (7, 7))

# Create the Camera instance for 640 by 480
camera = nano.Camera()

if __name__ == '__main__':

    print('Started')
    print("Beginning Transmitting to channel: Happy_Robots")
    now = time()

    # commencing subtraction
    while True:
        try:
            # fetching each frame
            frame = camera.read()

            if frame is None:
                break
Example #19
0
def takepicture(filename):
    if (camera.isReady()):
        frame = camera.read()
        cv2.imwrite(filename, frame)


def update_class1():
    n_class1 = len(os.listdir("./class1"))
    tmp_c1_name = "./class1/class1_" + str(n_class1) + ".jpg"
    takepicture(tmp_c1_name)
    sleep(3.0)


def update_class2():
    n_class2 = len(os.listdir("./class2"))
    tmp_c2_name = "./class2/class2_" + str(n_class2) + ".jpg"
    takepicture(tmp_c2_name)
    sleep(3.0)


camera = nanocamera.Camera(flip=0, width=1280, height=800, fps=30)
sleep(2.0)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(38, GPIO.IN)
GPIO.setup(37, GPIO.IN)
while (True):
    if (GPIO.input(38)):
        update_class1()
    if (GPIO.input(37)):
        update_class2()
Example #20
0
import cv2

# from nanocamera.NanoCam import Camera
import nanocamera as nano

if __name__ == '__main__':
    # requires the Camera streaming url. Something like this: http://localhost:80/stream
    # For IP/MJPEG camera, the camera_type=3.
    # This works with only camera steaming MJPEG format and not H.264 codec for now

    # a location for the camera stream
    camera_stream = "192.168.1.26:80"

    # Create the Camera instance
    camera = nano.Camera(camera_type=3, source=camera_stream, width=640, height=480, fps=30)
    print('MJPEG/IP Camera is now ready')
    while True:
        try:
            # read the camera image
            frame = camera.read()
            # display the frame
            cv2.imshow("Video Frame", frame)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
        except KeyboardInterrupt:
            break

    # close the camera instance
    camera.release()
def threadVid():
    # Get access to the webcam. The method is different depending on if this is running on a laptop or a Jetson Nano.
    #if running_on_jetson_nano():
    #    #RTSP_ADDR = 'rtsp://*****:*****@192.168.0.100:554/cam/realmonitor?channel=1&subtype=0'
    #    # Accessing the camera with OpenCV on a Jetson Nano requires gstreamer with a custom gstreamer source string
    #    #print(get_jetson_gstreamer_source(RTSP_ADDR))
    #    #video_capture = cv2.VideoCapture(get_jetson_gstreamer_source(0), cv2.CAP_GSTREAMER)
    #    video_capture = open_cam_usb(1, 320, 240)
    #else:
    #    # Accessing the camera with OpenCV on a laptop just requires passing in the number of the webcam (usually 0)
    #    # Note: You can pass in a filename instead if you want to process a video file instead of a live camera stream
    #    video_capture = cv2.VideoCapture(0)

    #video_capture = cv2.VideoCapture(RTSP_ADDR)
    #video_capture = cv2.VideoCapture(0)
    #video_capture = cv2.VideoCapture("rtsp://*****:*****@192.168.0.100:554/cam/realmonitor?channel=1&subtype=0")
    #video_capture_thermal = cv2.VideoCapture("rtsp://*****:*****@192.168.0.100:554/cam/realmonitor?channel=2&subtype=0")
    video_capture = nano.Camera(camera_type=1,
                                device_id=0,
                                width=640,
                                height=480,
                                fps=30,
                                enforce_fps=True)

    # Track how long since we last saved a copy of our known faces to disk as a backup.
    number_of_faces_since_save = 0

    while video_capture.isReady():
        # Grab a single frame of video
        frame = video_capture.read()
        #ret_thermal, frame_thermal = video_capture_thermal.read()

        #video_capture.grab()
        #video_capture_thermal.grab()

        #if ret == 0:# or ret_thermal == 0:
        #    continue

        #frame = cv2.resize(frame, (640, 480))
        #frame_thermal = cv2.resize(frame_thermal, (480, 640))

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Find all the face locations and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame,
                                                         model='cnn')
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        # Loop through each detected face and see if it is one we have seen before
        # If so, we'll give it a label that we'll draw on top of the video.
        face_labels = []
        for face_location, face_encoding in zip(face_locations,
                                                face_encodings):
            # See if this face is in our list of known faces.
            metadata = lookup_known_face(face_encoding)

            # If we found the face, label the face with some useful information.
            if metadata is not None:
                time_at_door = datetime.now(
                ) - metadata['first_seen_this_interaction']
                face_label = "At door {}s".format(
                    int(time_at_door.total_seconds()))

            # If this is a brand new face, add it to our list of known faces
            else:
                face_label = "New visitor!"

                # Grab the image of the the face from the current frame of video
                top, right, bottom, left = face_location
                face_image = small_frame[top:bottom, left:right]
                face_image = cv2.resize(face_image, (150, 150))

                # Add the new face to our known face data
                register_new_face(face_encoding, face_image)

            face_labels.append(face_label)

        # Draw a box around each face and label each face
        for (top, right, bottom,
             left), face_label in zip(face_locations, face_labels):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            cv2.putText(frame, face_label, (left + 6, bottom - 6),
                        cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1)

        # Display recent visitor images
        number_of_recent_visitors = 0
        for metadata in known_face_metadata:
            # If we have seen this person in the last minute, draw their image
            if datetime.now() - metadata["last_seen"] < timedelta(
                    seconds=10) and metadata["seen_frames"] > 5:
                # Draw the known face image
                x_position = number_of_recent_visitors * 150
                frame[30:180,
                      x_position:x_position + 150] = metadata["face_image"]
                number_of_recent_visitors += 1

                # Label the image with how many times they have visited
                visits = metadata['seen_count']
                visit_label = "{} visits".format(visits)
                if visits == 1:
                    visit_label = "First visit"
                cv2.putText(frame, visit_label, (x_position + 10, 170),
                            cv2.FONT_HERSHEY_DUPLEX, 0.6, (255, 255, 255), 1)

        if number_of_recent_visitors > 0:
            cv2.putText(frame, "Visitors at Door", (5, 18),
                        cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1)

        # Yield video frame to flask
        #frame_concat = np.concatenate((frame,frame_thermal),axis=1)
        _, jpeg = cv2.imencode('.jpg', frame)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() +
               b'\r\n\r\n')

        # Display the final frame of video with boxes drawn around each detected fames
        ##cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        #if cv2.waitKey(1) & 0xFF == ord('q'):
        #    save_known_faces()
        #    break

        # We need to save our known faces back to disk every so often in case something crashes.
        if len(face_locations) > 0 and number_of_faces_since_save > 100:
            save_known_faces()
            number_of_faces_since_save = 0
        else:
            number_of_faces_since_save += 1

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
Example #22
0
                                     map_location=device),
                          strict=False)

if args.model_type != "trt":
    model = model.eval().to(device=device, dtype=precision)

width, height = args.resolution

if nano is None:
    cam = Camera(width=width, height=height)
else:
    # See https://picamera.readthedocs.io/en/release-1.13/fov.html
    cam = nano.Camera(
        flip=0,
        width=width,
        height=height,
        fps=30,
        capture_width=640,
        capture_height=480,
    )

if pyfakewebcam is not None and args.fake_cam:
    fake_cam = pyfakewebcam.FakeWebcam("/dev/video1", cam.width, cam.height)
else:
    fake_cam = None

dsp = Displayer("MattingV2",
                cam.width,
                cam.height,
                show_info=(not args.hide_fps))
dsp.fake_cam = fake_cam
Example #23
0
        s3.upload_file(local_file, bucket, s3_file)
        print("Upload Successful")
        return True
    except FileNotFoundError:
        print("The file was not found")
        return False
    except NoCredentialsError:
        print("Credentials not available")
        return False


#this initializes a CSI camera device
#camera = nano.Camera(flip=0,width=640,height=480,fps=30)

#this initializes a USB camera device
camera = nano.Camera(camera_type=1, device_id=1, width=640, height=480, fps=30)

myMQTTClient = AWSIoTMQTTClient("JetsonNano")
myMQTTClient.configureEndpoint(
    "a2mzhorq6795m9-ats.iot.us-east-2.amazonaws.com", 8883)
myMQTTClient.configureCredentials("AmazonRootCA1.pem",
                                  "902d95efbc-private.pem.key",
                                  "902d95efbc-certificate.pem.crt")
myMQTTClient.configureOfflinePublishQueueing(-1)
myMQTTClient.configureDrainingFrequency(2)
myMQTTClient.configureConnectDisconnectTimeout(10)
myMQTTClient.configureMQTTOperationTimeout(5)

allowedConnectingTime = time.time() + 10

if time.time() < allowedConnectingTime:
Example #24
0
    ## for single USB (left and rigth frames are "glued together )
    cap = cv2.VideoCapture(int(cam_ids[0]))
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width * 2)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
else:
    ### for double USB
    # cap_left = cv2.VideoCapture(int(cam_ids[0]))
    # cap_right = cv2.VideoCapture(int(cam_ids[1]))
    # cap_left.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    # cap_left.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    # cap_right.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    # cap_right.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    # nano.Camera()
    cap_left = nano.Camera(device_id=0, flip=2, width=800, height=480, fps=30)
    cap_right = nano.Camera(device_id=1, flip=2, width=800, height=480, fps=30)

########################################################################

#############################################################################
## GET ALL CAMERA MATRICES AND COEFFICIENTS
camera_matrix_left, camera_matrix_right, map_left_x, map_left_y, map_right_x, map_right_y, Q = init_calibration(
    left_xml, right_xml, image_size, resolution)
################################################################################

##############################################################################
# INITIALIZE IMAGE WINDOWS
windowName = "Live Camera Input"
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
cv2.resizeWindow(windowName, width, height)
"""

# from nanocamera.NanoCam import Camera
import nanocamera as nano

if __name__ == '__main__':
    # you can see connected USB cameras by running : ls /dev/video* on the terminal
    # for usb camera /dev/video2, the device_id will be 2

    # Create the Camera instance
    try:
        # Create the Camera instance
        print("camera stream")
        camera = nano.Camera(camera_type=1,
                             device_id=0,
                             width=640,
                             height=480,
                             fps=30,
                             debug=True)
    except Exception as e:
        print("Exception occurred ------ ")
        print("Exception Type - ", e)

    else:
        print('USB Camera ready? - ', camera.isReady())
        while True:
            try:
                # read the camera image
                frame = camera.read()
                print("do something with frame")
            # send_to_cloud(frame)
            # display the frame
Example #26
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import nanocamera as nano
import uuid

# Left Camera
camera0 = nano.Camera(device_id=0, flip=2, width=224, height=224, fps=30)
# Right Camera
camera1 = nano.Camera(device_id=1, flip=2, width=224, height=224, fps=30)

imgL = camera0.read()
imgR = camera1.read()
s_uuid = str(uuid.uuid1())

print("snap_shot!")
filenameL = './nanocam_test/%d_%d_%s_L.jpg' % (0, 0, s_uuid)
filenameR = './nanocam_test/%d_%d_%s_R.jpg' % (0, 0, s_uuid)
filenameM = './nanocam_test/%d_%d_%s_M.jpg' % (0, 0, s_uuid)
imgMx = cv2.addWeighted(src1=imgL, alpha=0.5, src2=imgR, beta=0.5, gamma=0)

cv2.imwrite(filenameL, imgL)
cv2.imwrite(filenameR, imgR)
cv2.imwrite(filenameM, imgMx)

camera0.release()
camera1.release()