def main(): # create camera object. initial calibration will be executed by constructor cap = ht301_hacklib.HT301() ret, frame = cap.read() # read frame from thermal camera time.sleep(3) ret, frame = cap.read() # read frame from thermal camera offset = frame.astype(np.float32) np.save("noise_pattern_calibration.npy", offset) print("Calibration successfull!") cap.release() return 0
#!/usr/bin/python3 import numpy as np import cv2 import math import ht301_hacklib import utils import time draw_temp = True cap = ht301_hacklib.HT301() cv2.namedWindow("HT301", cv2.WINDOW_NORMAL) while (True): ret, frame = cap.read() info, lut = cap.info() frame = frame.astype(np.float32) # Sketchy auto-exposure frame -= frame.min() frame /= frame.max() frame = (np.clip(frame, 0, 1) * 255).astype(np.uint8) frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET) if draw_temp: utils.drawTemperature(frame, info['Tmin_point'], info['Tmin_C'], (55, 0, 0)) utils.drawTemperature(frame, info['Tmax_point'], info['Tmax_C'], (0, 0, 85)) utils.drawTemperature(frame, info['Tcenter_point'], info['Tcenter_C'],
def main(): ########### camera ############# # create camera object. initial calibration will be executed by constructor cap = ht301_hacklib.HT301() # read calibration array from file offset = np.load("noise_pattern_calibration.npy") # create a Contrast Limited Adaptive Histogram Equalization object. default: 5.0, (6, 6) clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(6, 6)) # open v4l2 output device videooutput = os.open(VIDEO_OUT, os.O_RDWR) # configure parameters for output device vid_format = v4l2_format() vid_format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT if fcntl.ioctl(videooutput, VIDIOC_G_FMT, vid_format) < 0: print("ERROR: unable to get video format!") return -1 framesize = VID_WIDTH * VID_HEIGHT * 3 vid_format.fmt.pix.width = VID_WIDTH vid_format.fmt.pix.height = VID_HEIGHT # pixel format vid_format.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24 vid_format.fmt.pix.sizeimage = framesize vid_format.fmt.pix.field = V4L2_FIELD_NONE if fcntl.ioctl(videooutput, VIDIOC_S_FMT, vid_format) < 0: print("ERROR: unable to set video format!") return -1 ################################ # video loop while (True): # cycle only one map per button press if rc_channel > 1800: ch_state = True if prev_ch_state == False: cyclecolormaps() else: ch_state = False prev_ch_state = ch_state ret, frame = cap.read() # read frame from thermal camera info, lut = cap.info( ) # get hottest and coldest spot and its temperatures # automatic gain control frame = frame.astype(np.float32) if calibration_offset == True: # apply calibration offset frame = frame - offset + np.mean(offset) frame = (255 * ((frame - frame.min()) / (frame.max() - frame.min()))).astype( np.uint8) # cast frame to values from 0 to 255 if colormaps[selectedmap] == -1: # digital detail enhancement algorithm framebuffer = frame clahe.apply(framebuffer, frame) frame = cv2.applyColorMap(frame, colormaps[selectedmap - 1]) else: # apply colormap frame = cv2.applyColorMap(frame, colormaps[selectedmap]) if flipped_camera == True: # rotate the temperature points with the image if the thermal camera is mounted upside down (coldx, coldy) = info['Tmin_point'] (warmx, warmy) = info['Tmax_point'] coldestpoint = (VID_WIDTH - coldx, VID_HEIGHT - coldy) warmestpoint = (VID_WIDTH - warmx, VID_HEIGHT - warmy) frame = cv2.flip(frame, -1) # flip the frame else: coldestpoint = info['Tmin_point'] warmestpoint = info['Tmax_point'] if draw_temp: # color: BGR utils.drawTemperature(frame, coldestpoint, info['Tmin_C'], (200, 200, 200)) # coldest spot utils.drawTemperature(frame, warmestpoint, info['Tmax_C'], (30, 30, 30)) # hottest spot #utils.drawTemperature(frame, info['Tcenter_point'], info['Tcenter_C'], (0, 255, 255)) # center spot # output frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB ) # convert opencv bgr to rgb for the v4l2 pixelformat written = os.write(videooutput, frame.data) # write frame to output device cap.release() if mavlink: vehicle.close() return 0
def main(): ########### camera ############# cap = ht301_hacklib.HT301( ) # create camera object. initial calibration will be executed by contructor videooutput = os.open(VIDEO_OUT, os.O_RDWR) # open v4l2 output device # configure params for output device vid_format = v4l2_format() vid_format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT if fcntl.ioctl(videooutput, VIDIOC_G_FMT, vid_format) < 0: print("ERROR: unable to get video format!") return -1 framesize = VID_WIDTH * VID_HEIGHT * 3 vid_format.fmt.pix.width = VID_WIDTH vid_format.fmt.pix.height = VID_HEIGHT # NOTE: change this according to below filters... # Chose one from the supported formats on Chrome: YUV420, Y16, Z16, INVZ, # YUYV, RGB24, MJPEG, JPEG vid_format.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24 vid_format.fmt.pix.sizeimage = framesize vid_format.fmt.pix.field = V4L2_FIELD_NONE if fcntl.ioctl(videooutput, VIDIOC_S_FMT, vid_format) < 0: print("ERROR: unable to set video format!") return -1 ################################ while (True): # cycle only one map per button press ##if vehicle.channels[rc_channel] > 1800: ## ch_state = True ## if prev_ch_state == False: ## cyclecolormaps() ##else: ## ch_state = False ##prev_ch_state = ch_state ret, frame = cap.read() # read frame from thermal camera info, lut = cap.info( ) # get hottest and coldest spot and its temperatures frame = frame.astype(np.float32) if selectedmap != 0: # do not apply anything when 0 is selected. original frame # Sketchy auto-exposure frame -= frame.min() frame /= frame.max() frame = (np.clip(frame, 0, 1) * 255).astype(np.uint8) frame = cv2.applyColorMap(frame, colormaps[selectedmap]) # apply colormap # rotate the temperature points with the image (coldx, coldy) = info['Tmin_point'] (warmx, warmy) = info['Tmax_point'] coldestpoint = (VID_WIDTH - coldx, VID_HEIGHT - coldy) warmestpoint = (VID_WIDTH - warmx, VID_HEIGHT - warmy) frame = cv2.flip(frame, -1) if draw_temp: utils.drawTemperature(frame, coldestpoint, info['Tmin_C'], (0, 0, 255)) # coldest spot utils.drawTemperature(frame, warmestpoint, info['Tmax_C'], (255, 0, 0)) # hottest spot #utils.drawTemperature(frame, info['Tcenter_point'], info['Tcenter_C'], (0,255,255)) # center spot # write frame to output device written = os.write(videooutput, frame.data) cap.release() ##vehicle.close() return 0