class Camera:
    def __init__(self, height=480, width=640):
        # Intrinsics results obtained from calib module
        self._K = np.array([[676.74, 0, 317.4], [0, 863.29, 252.459],
                            [0, 0, 1]])
        # Extrinisics
        angles = np.array([90, 90, 0]) * np.pi / 180
        R = eulerAnglesToRotationMatrix(angles)  # Rotation matrix  WCS->CCS
        T = np.array([[0.0], [-0.156],
                      [0.0]])  # WCS expressed in camera coordinate system
        self._Rt = np.hstack((R, T))
        self._camera = CSICamera(width, height)
        print("Camera Initialized - Height = {}, Width = {}".format(
            height, width))

    def get_Rt(self):
        return self._Rt

    def get_K(self):
        return self._K

    def get_frame_cv2(self):
        """
        Returns an ndarray in BGR format (OpenCV format)
        """
        return self._camera.read()

    def get_frame_PIL(self):
        """
        Returns a Image object in RGB (PIL format)
        """
        image = self._camera.read()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(image)
        return image
Exemplo n.º 2
0
def take_some_pictures(path):

    camera = CSICamera(width=1920, height=1080, capture_device=0)

    # Warm up the camera (Takes 12 images to warm up)
    for i in range(30):
        camera.read()
        time.sleep(1 / 30)

    # Take Pictures (Max 100)
    cnt = 0
    while True:
        time.sleep(1 / 30)
        read_camera(path, camera, cnt)
        cnt += 1
        if cnt > 10:
            cnt = 0
            break
Exemplo n.º 3
0
class Camera:
    def __init__(self, fps=10, img_size=256):
        self.fps = fps
        self.img_size = img_size
        self.cam = CSICamera(width=img_size,
                             height=img_size,
                             capture_width=1080,
                             capture_height=720,
                             capture_fps=fps)

    def capture(self):
        self.frame = self.cam.read()
        return self.frame
def cam_loop(images, end):
    cap = CSICamera(width=224,
                    height=224,
                    capture_width=1080,
                    capture_height=720,
                    capture_fps=30)
    while True:
        img = cap.read()
        print("reading image")
        if img is not None:
            images.put(img)
        if end.value == 1:
            cap.release()
            return
class VideoCSIReader(VideoReader):
    def __init__(self):
        self._camera = CSICamera(width=224,
                                 height=224,
                                 capture_width=400,
                                 capture_height=300,
                                 capture_fps=30)

    def read_frame(self, show_preview=False):
        img = self._camera.read()

        if img is None:
            return None

        if show_preview:
            cv2.imshow("preview", img)
            cv2.waitKey(1)

        return img
Exemplo n.º 6
0
try:
    # Car
    car = NvidiaRacecar()
    car.throttle_gain = THROTTLE_GAIN
    car.steering_offset = STEERING_OFFSET

    # Camera
    camera = CSICamera(width=CAMERA_WIDTH, height=CAMERA_HEIGHT)

    # Control Loop
    while True:
        if SHOW_LOGS:
            start_time = time.time()

        camera_frame = camera.read()
        cropped_frame = center_crop_square(camera_frame)
        resized_frame = cv2.resize(cropped_frame, (FRAME_SIZE, FRAME_SIZE))
        preprocessed_frame = preprocess_image(resized_frame)
        output = model_trt(preprocessed_frame).detach().clamp(
            -1.0, 1.0).cpu().numpy().flatten()

        steering = float(output[0])
        car.steering = steering

        throttle = float(output[1])
        car.throttle = throttle

        if SHOW_LOGS:
            fps = int(1 / (time.time() - start_time))
            print("fps: " + str(int(fps)) + ", steering: " + str(steering) +
Exemplo n.º 7
0
"""
Simple test script to validate the motor controls and the steering and the camera and the ultrasonic sensors
"""

from jetcam.csi_camera import CSICamera
import ipywidgets
from IPython.display import display
from jetcam.utils import bgr8_to_jpeg

camera_in = CSICamera(width=224,
                      height=224,
                      capture_width=1080,
                      capture_height=720,
                      capture_fps=30)  # Default Nvidia Camera Setup

image_widget = ipywidgets.Image(format='jpeg')

while True:
    image_widget.value = bgr8_to_jpeg(camera_in.read())
    display(image_widget)

# Test motors
import actuators
drive_train = actuators.ServoMotor()
Exemplo n.º 8
0
import time

# create the camera object
camera = CSICamera(width=224,
                   height=224,
                   capture_width=1080,
                   capture_height=720,
                   capture_fps=30)

# variable to store the captured images and the current image
imageList = []
currentImg = None

# read the camera and set to running. This will mean that we will only have to get the last value of the camera to get
# the latest frame
img = camera.read()
camera.running = True

print('Start taking frames')

# perform for 300 images
for i in range(400):

    # get image from the camera
    img = camera.value
    img_rotate_180 = cv2.rotate(img, cv2.ROTATE_180)

    # rotate the image
    currentImg = img_rotate_180

    # add the image to list of captured images
Exemplo n.º 9
0
import cv2
from jetcam.csi_camera import CSICamera

file_path = "/home/nano/Desktop/output/"
#480x320
camera_right = CSICamera(capture_device=0, width=1280, height=720)
camera_left = CSICamera(capture_device=1, width=1280, height=720)
counter = 1

while True:
    image_right = camera_right.read()
    image_left = camera_left.read()
    cv2.imshow("right", image_right)
    cv2.imshow("left", image_left)

    if cv2.waitKey(1) & 0xFF == ord('s'):  # press s to save image
        print("save capture{}".format(counter))
        cv2.imwrite(file_path + "right{}.jpg".format(counter), image_right)
        cv2.imwrite(file_path + "left{}.jpg".format(counter), image_left)
        counter = counter + 1

    elif cv2.waitKey(1) & 0xFF == ord(
            'q'):  # press q to stop, or directly ctrl + C (lol)
        break

    else:
        #print("|")
        continue
cv2.destroyAllWindows()
Exemplo n.º 10
0
fps = 60
vname = "/mnt/cv2video-{}p-{}.avi".format(
    h,
    datetime.now().strftime("%Y%m%d-%H%M%S-%f"))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
#fourcc = cv2.VideoWriter_fourcc(*'XVID')  # cv2.VideoWriter_fourcc() does not exist
#fourcc = cv2.VideoWriter_fourcc(*'X264')  # cv2.VideoWriter_fourcc() does not exist
fourcc = cv2.VideoWriter_fourcc(
    *'MJPG')  # cv2.VideoWriter_fourcc() does not exist
video_writer = cv2.VideoWriter(vname, fourcc, 30, (w, h))
#video_writer = cv2.VideoWriter("/media/ramdisk/output.avi", fourcc, 30, (w, h))
# record video
lFPSbeg = datetime.now()
while True:
    try:
        frame = capture.read()
        video_writer.write(frame)
        #
        #fps computation
        lFPSrun = (datetime.now() - lFPSbeg).seconds
        lFPSfnm = lFPSfnm + 1
        if lFPSrun > 0:
            FPSavg = lFPSfnm / lFPSrun
        else:
            FPSavg = 0
        cfpst = "FPS: %d t: %dsec %.2ffps" % (lFPSfnm, lFPSrun, FPSavg)
        if use_display:
            if True or (lFPSfnm % 10) == 0:
                cv2.putText(frame, cfpst, (10, 50), cv2.FONT_HERSHEY_SIMPLEX,
                            0.8, (0, 255, 0), 2, cv2.LINE_AA)
                cv2.imshow('Video Stream', frame)
Exemplo n.º 11
0
#
ESC = 27
Mm = 0  #max matches
cFk = 0

tsrfocr = TSRFrameOCR()
tsrfocr.start()

#camera = cv2.VideoCapture ('/home/jetson/Work/dataset/GOPR1415s.mp4')
#camera = cv2.VideoCapture ('/home/jetson/Work/dataset/GP011416s.mp4')
camera = CSICamera(width=1280, height=720)

while True:
    try:
        imgCamColor = camera.read()
        #
        cFk = cFk + 1
        if imgCamColor is not None:
            result = imgCamColor
            check_red_circles(imgCamColor)
            #fps computation
            cFps_sec = datetime.now().second
            lFps_k = lFps_k + 1
            if lFps_sec != cFps_sec:
                lFps_c = lFps_k - 1
                lFps_k = 0
            if lFps_M < lFps_k:
                lFps_M = lFps_k
            lFps_sec = cFps_sec
            if show_fps == True:
totalFrames = 0

# initialize the log file
logFile = None

# initialize the list of various points used to calculate the avg of
# the vehicle speed
# points = [("A", "B"), ("B", "C"), ("C", "D")]

# start the frames per second throughput estimator
fps = FPS().start()

# loop over the frames of the stream
while True:
    # read the frames from the camera
    frame_right = camera_right.read()
    frame_left = camera_left.read()
    
    # rectify the frames
    # use left_rectify as detection frame input
    frame_right_rectify = cv2.remap(frame_right, cam_config.right_map1, cam_config.right_map2, cv2.INTER_LINEAR)
    frame_left_rectify = cv2.remap(frame_left, cam_config.left_map1, cam_config.left_map2, cv2.INTER_LINEAR)
    
    # create gray-scale frames for depth graph
    frame_right_gs = cv2.cvtColor(frame_right_rectify, cv2.COLOR_BGR2GRAY)
    frame_left_gs = cv2.cvtColor(frame_left_rectify, cv2.COLOR_BGR2GRAY)
    
    # create depth graph "disp"
    # create 3D coordinate model "threeD"
    num = 10 #num & blockSize can be modified for better outcome
    blockSize = 31
Exemplo n.º 13
0
class MainGUI:

	def __init__(self, root):

		# Context variable declarations and loading		
		self.running = False
		self.WIDTH = 224
		self.HEIGHT = 224
		self.thresh = 127
		self.round = 0
		self.minimum_joints = 4
		self.path = './images/'		
		self.mdelay_sec = 10
		self.mtick = self.mdelay_sec
		self.mask = None
		self.calibrate = True # Flag to show calibration pose over camera feed
		self.calibration_pose = cv2.imread('./images/cal_pose.jpg', cv2.IMREAD_COLOR)
		
		#Loading model and model data
		with open('./tasks/human_pose/human_pose.json', 'r') as f:
			human_pose = json.load(f)

		self.topology = trt_pose.coco.coco_category_to_topology(human_pose)

		self.num_parts = len(human_pose['keypoints'])
		self.num_links = len(human_pose['skeleton'])
		
		self.data = torch.zeros((1, 3, self.HEIGHT, self.WIDTH)).cuda()

		self.OPTIMIZED_MODEL = './tasks/human_pose/resnet18_baseline_att_224x224_A_epoch_249_trt.pth'
		self.model_trt = TRTModule()
		self.model_trt.load_state_dict(torch.load(self.OPTIMIZED_MODEL))

		self.mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
		self.std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
		self.device = torch.device('cuda')
	
		self.parse_objects = ParseObjects(self.topology)
		self.draw_objects = DrawObjects(self.topology)

		# Start camera
		if USBCam:
			self.camera = USBCamera(width=self.WIDTH, height=self.HEIGHT, capture_fps=30)
		else:
			self.camera = CSICamera(width=self.WIDTH, height=self.HEIGHT, capture_fps=30)
		
		self.frame=Tk.Frame(root)
		self.root=root
		# Create editable title 
		self.titleVar = Tk.StringVar()
		self.title= Tk.Label(root, textvariable=self.titleVar, font="Verdana 36")
		self.titleVar.set("Pose Estimation Game")
		self.title.pack(side=Tk.TOP)
		self.frame.pack(side=Tk.LEFT, fill=Tk.BOTH, expand=1)	
		
		# Create image capture figure
		# Set as Frame with three possible images (live feed, mask/pose to make, image captured)
		# Image row
		self.im_row = Tk.Frame(self.frame)
		self.feed_label = Tk.Label(self.im_row)
		self.feed_label.pack(side=Tk.LEFT)
		self.mask_label = Tk.Label(self.im_row)
		self.pose_label = Tk.Label(self.im_row)
		
		# Create editable description label
		self.desTextVar = "Please select an option from the right"
		self.desText = Tk.Label(self.frame, text=self.desTextVar, font="Verdana 12")

		#Create Combobox for selection (Steps are currently in comments)
		#Grab maps from repository
		#Parse map names to develop choices
		#group map names into array
		self.levels = []

		choices = ["Easy", "Medium", "Hard"]
		#Put map names in combo box	
		self.ddVar = Tk.StringVar()
		self.ddVar.set('Select a Choice')
		self.dropDown = Tk.OptionMenu(self.frame, self.ddVar, *choices)
		
		# This function binds a function that loads all images for level upon the selection of 
		# an option in the dropdown menu
		self.ddVar.trace('w', self.levels_select)

		# Create inital button panel
		self.buttonPanel = ButtonPanel(root)


		self.im_row.pack()
		self.desText.pack()
		self.buttonPanel.pack()
		self.root.after(10, self.camera_loop)
		MainGUI.updateToTitle(self)

	def updateToTitle(self):
		# unpack unused widgets
		self.dropDown.pack_forget()
		self.mask_label.pack_forget()
		self.pose_label.pack_forget()

		# Show wireframe and calibration pose on title screen
		self.calibrate = True
		self.running = False

		# build title frame
		self.titleVar.set("Pose Estimation Game")
		self.desText.configure(text ="Please select an option from the right")
		
		# set button commands
		self.buttonPanel.button1.configure(text="Pose Now!", command=lambda:MainGUI.updateToSelect(self))
		self.buttonPanel.button2.configure(text="Exit",command=self.root.destroy)

	def updateToSelect(self):
		# reset possible levels
		self.levels = []
		self.round = 0
		self.total = 0
		self.ddVar.set('Select a Choice')

		# unpack unused widgets
		self.pose_label.pack_forget()

		# Show only camera feed
		self.calibrate = False
		self.running = False

		# Reset mask object
		self.mask_label.configure(image='')
		self.mask = None

		# Build select frame
		self.titleVar.set("Select Your Pose")
		self.desText.configure(text ="Select an Option from Below")
		self.dropDown.pack()

		# Show mask (or representative image) from selected choice
		self.mask_label.pack(side=Tk.LEFT)

		# set button commands
		self.buttonPanel.button1.configure(text="Select", command=lambda:MainGUI.updateToPose(self))
		self.buttonPanel.button2.configure(text="Main Menu", command=lambda:MainGUI.updateToTitle(self))



	def updateToPose(self):
		# Check for valid difficulty selection and cancel if invalid
		curSelection = self.ddVar.get()
		if(curSelection == "Select a Choice"):
			return
		
		# unpack unused widgets
		self.dropDown.pack_forget()
		self.pose_label.pack_forget()

		# Show only camera feed
		self.calibrate = False

		# Select mask to use for this round
		mask_img=cv2.imread(self.levels[self.round], cv2.IMREAD_GRAYSCALE)
		self.mask=cv2.threshold(mask_img, self.thresh, 255, cv2.THRESH_BINARY)[1]

		# Show mask to user
		img = Image.fromarray(self.mask)
		imgtk = ImageTk.PhotoImage(image=img)
		self.mask_label.imgtk = imgtk
		self.mask_label.configure(image=imgtk)	


		self.titleVar.set("Pose Now")
		self.buttonPanel.button1.configure(text="Main Menu", command=lambda:MainGUI.updateToTitle(self))
		self.buttonPanel.button2.configure(text=" ", command=MainGUI.blankCommand)
		self.running = True
		timer = 10
		self.desText.configure(text = "Time to Evaluation: " + str(timer) + "s")
		self.root.after(1000, lambda:MainGUI.countDown(self, timer))
		
	def pose_estimate(self):
		# Run pose estimation and display overlay
		img = self.img
		data = self.preprocess(img)
		cmap, paf = self.model_trt(data)
		cmap, paf = cmap.detach().cpu(), paf.detach().cpu()
		counts, objects, peaks = self.parse_objects(cmap, paf)#, cmap_threshold=0.15, link_threshold=0.15)
		self.draw_objects(img, counts, objects, peaks)

		# Extract point coordinates
		height = img.shape[0]
		width = img.shape[1]
		objcnt = 0
		count = int(counts[0])
		points = []
		for i in range(count):
			obj = objects[0][i]
			C = obj.shape[0]
			for j in range(C):
				k = int(obj[j])
				if k>= 0:
					peak = peaks[0][j][k]
					x = round(float(peak[1]) * width)
					y = round(float(peak[0]) * height)
					points.insert(objcnt, [x, y])
					objcnt = objcnt+1


		overlay = cv2.cvtColor(self.mask, cv2.COLOR_GRAY2RGB)
		alpha = 0.3
		cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0 , img)
		
		img = Image.fromarray(img)
		imgtk = ImageTk.PhotoImage(image=img)
		self.pose_label.imgtk = imgtk
		self.pose_label.configure(image=imgtk)

		return points

	def pose_score(self, points):
		if len(points) < self.minimum_joints:
			return None # Return no score if no pose detected
		
		correct = 0
		# Locate points in mask and mark if point is over mask or not
		for point in points:
			xi = point[0]
			yi = point[1]
			point_val = self.mask[yi, xi]
			print("Point: "+str(xi)+", "+str(yi))
			# print("OBJx = ",xi)
			# print("OBJy = ",yi)
			# print(point_val)
			if point_val >= 255:
				print ('Correct!')
				correct = correct + 1
			else:
				print ('Wrong!')
		score = (correct / len(points)) * 100
		return score
	
	def updateToEval(self):
		# Show only camera feed
		self.calibrate = False
		
		# Show image from pose estimation
		self.pose_label.pack(side=Tk.LEFT)

		# Run pose estimation and return list of identified coordinates
		calc_points = self.pose_estimate()
		
		# Get a score based on correct points over mask
		score = self.pose_score(calc_points)


		
		if score is not None:		
			pretext="Pose Accuracy: " + str(round(score, 2))+ "%"
			self.total = self.total + score
		else:
			pretext="Didn't detect a pose from player."
			self.total = self.total + 0
		
		#Move to next round counter
		self.round = self.round + 1

		#Calculate average score and add text
		avg_score = self.total/self.round		
		totaltext = pretext+"\nAverage Score: " + str(round(avg_score, 2))+"%"
		self.desText.configure(text=totaltext)		
		
		self.titleVar.set("Pose Evaluation")
		if self.round < len(self.levels):	
			# If we are not on the last image, give option to move to next image	
			self.buttonPanel.button1.configure(text="Next Pose", command=lambda:MainGUI.updateToPose(self))
			self.buttonPanel.button2.configure(text="Main Menu", command=lambda:MainGUI.updateToTitle(self))
		else:
			# If we are done with all images in the level, give option to return to main menu
			self.buttonPanel.button1.configure(text="Main Menu", command=lambda:MainGUI.updateToTitle(self))
			self.buttonPanel.button2.configure(text="Exit",command=self.root.quit)

	def countDown(self, timer):
		if self.running is True:
			self.desText.configure(text = "Time to Evaluation: " + str(timer) + "s")
			timer = timer - 1
			if(timer >= 0):
				self.root.after(1000, lambda:MainGUI.countDown(self, timer))
			else:
				MainGUI.updateToEval(self)

	#Camera Displays Here
	def camera_loop(self):
		img = self.camera.read()
		self.img = img # Load image into context variable
		if self.calibrate is True:
			data = self.preprocess(img)
			cmap, paf = self.model_trt(data)
			cmap, paf = cmap.detach().cpu(), paf.detach().cpu()
			counts, objects, peaks = self.parse_objects(cmap, paf)#, cmap_threshold=0.15, link_threshold=0.15)
			self.draw_objects(img, counts, objects, peaks)
			overlay = self.calibration_pose
			alpha = 0.3
			cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0 , img)
		img = Image.fromarray(img)
		imgtk = ImageTk.PhotoImage(image=img)
		self.feed_label.imgtk = imgtk
		self.feed_label.configure(image=imgtk)		
		self.root.after(10, self.camera_loop)

	def levels_select(self, *args):
		# grab poses from directory
		curPath = self.path + self.ddVar.get()
		print(curPath)
		for r, d, f in os.walk(curPath):
			for file in f:
				if '.jpg' in file or '.png' in file:
					self.levels.append(curPath + '/' + file)
		print(self.levels)		
		
	def preprocess(self, img):
		image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
		image = PIL.Image.fromarray(image)
		image = transforms.functional.to_tensor(image).to(self.device)
		image.sub_(self.mean[:, None, None]).div_(self.std[:, None, None])
		return image[None, ...]	

	def blankCommand():
		print("Error: Button should not be pushed")
Exemplo n.º 14
0
                   height=320,
                   capture_width=1080,
                   capture_height=720,
                   capture_fps=30)

PATH_TO_SAVED_MODEL = './fine_tuned_model/savd_model'
PATH_TO_LABELS = '../cig_butts/tf_record/label_map.pbtxt'

detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)

category_index = label_map_util.create_category_index_from_labelmap(
    PATH_TO_LABELS, use_display_name=True)

while True:
    # get the image
    image = camera.read()  # BRG8 numpy array

    # The input needs to be a tensor
    input_tensor = tf.convert_to_tensor(image)
    # The model expects a batch of images, so add an axis with `tf.newaxis`.
    input_tensor = input_tensor[tf.newaxis, ...]

    # input_tensor = np.expand_dims(image_np, 0)
    detections = detect_fn(input_tensor)

    # All outputs are batches tensors.
    # Convert to numpy arrays, and take index [0] to remove the batch dimension.
    # We're only interested in the first num_detections.
    num_detections = int(detections.pop('num_detections'))
    detections = {
        key: value[0, :num_detections].numpy()
Exemplo n.º 15
0
# For csi camera
from jetcam.csi_camera import CSICamera
# For Servo motor
import board
import busio

print("Aneu desconectant")
for j in range(40):
    print(40-j)
    time.sleep(1)

torch.cuda.set_device(0)
camera = CSICamera(width=640, height=480)

#print("Camera initialized")
image = camera.read()
cv2.imwrite('/home/dlinano/Pictures/pic1.jpeg', image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img = Image.fromarray(image)
ori_width, ori_height = img.size

# image transform, to torch float tensor 3xHxW
img = img_transform(img)
img = torch.unsqueeze(img, 0)
#print("img size = " + str(img.size()))

with torch.no_grad():
    orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=False)
    net_encoder = MobileNetV2Dilated(orig_mobilenet, dilate_scale=8)
    enc_weights = "/home/dlinano/maweights/ade20k-mobilenetv2dilated-c1_deepsup/encoder_epoch_20.pth"
    net_encoder.load_state_dict(torch.load(enc_weights, map_location='cuda:0'), strict=False)
Exemplo n.º 16
0
from jetcam.csi_camera import CSICamera
import cv2

camera = CSICamera(width=224,
                   height=224,
                   capture_width=1080,
                   capture_height=720,
                   capture_fps=30)

camera.read()

image = camera.value

#camera.running = True

cv2.imshow("image", image)

cv2.waitKey(5000)

#video = cv2.VideoCapture(0)

#check, frame = video.read()

#print(frame)
Exemplo n.º 17
0
tsr_fs = TSRframeSave()
tsr_fs.start()
#while display.IsOpen():
while True:
    try:
        # capture the image
        if csi_camera == False:
            img, width, height = camera.CaptureRGBA(zeroCopy=True)
            jetson.utils.cudaDeviceSynchronize()
            # create a numpy ndarray that references the CUDA memory
            # it won't be copied, but uses the same memory underneath
            aimg = jetson.utils.cudaToNumpy(img, width, height, 4)
            #print ("img shape {}".format (aimg1.shape))
            aimg1 = cv2.cvtColor(aimg.astype(np.uint8), cv2.COLOR_RGBA2BGR)
        else:
            aimg1 = cv2.flip(camera.read(), -1)
        #
        cFk = cFk + 1
        #
        if save_video == True:
            # add frame to video
            #video_writer.write (aimg1)
            tsr_vs.save(aimg1)
        # do filter and classification
        kTS = "{}".format(datetime.now().strftime("%Y%m%d-%H%M%S-%f"))
        # on 10watt nvpmodel -m0 && jetson_clocks:
        # img_subrange 28fps
        # check_red_circles 28fps
        # subrange + classify 38fps
        # red detect + classify: approx.30fps-60fps
        ###