def adjust_exposure(goal): direction = 0 UP = 1 DOWN = -1 sensor.set_auto_gain(False) img = sensor.snapshot() stats = img.get_statistics() adjuster = 16384 while(abs(stats.l_mean() - goal) > 10 and adjuster > 2): if(stats.l_mean() < goal): before_exposure_us = sensor.get_exposure_us() sensor.set_auto_exposure(False, exposure_us = before_exposure_us + adjuster) if (sensor.get_exposure_us() == before_exposure_us): adjuster = 2 if(direction != UP): adjuster = adjuster >> 1 direction = UP else: before_exposure_us = sensor.get_exposure_us() sensor.set_auto_exposure(False, exposure_us = before_exposure_us - adjuster) if (sensor.get_exposure_us() == before_exposure_us): adjuster = 2 if(direction != DOWN or sensor.get_exposure_us() < adjuster): adjuster = adjuster >> 1 direction = DOWN img = sensor.snapshot() stats = img.get_statistics()
def initialize_camera(): err_counter = 0 while 1: try: sensor.reset() #Reset sensor may failed, let's try some times break except: err_counter = err_counter + 1 if err_counter == 20: lcd.draw_string(lcd.width() // 2 - 100, lcd.height() // 2 - 4, "Error: Sensor Init Failed", lcd.WHITE, lcd.RED) time.sleep(0.1) continue sensor.set_pixformat(sensor.RGB565) # The memory can't analyze models with resolution higher than QVGA # So better we train the model with QVGA too sensor.set_framesize(sensor.QVGA) #QVGA=320x240 #sensor.set_framesize(sensor.VGA) #VGA=640x480 # Optimze this settings to get best picture quality sensor.set_auto_exposure(False, exposure_us=500) sensor.set_auto_gain( False ) #, gain_db=100) # must turn this off to prevent image washout... sensor.set_auto_whitebal(True) # turn this off for color tracking sensor.run(1)
def init(is_debug, pixformat, delay_time): #关闭串口,防止初始化过程溢出 uart.deinit() uart2.deinit() sensor.reset() sensor.set_pixformat(sensor.RGB565) #RGB565 sensor.set_framesize(sensor.QVGA) #320*240 sensor.set_gainceiling(128) #增益上限 2,4,8,16,32,64,128 sensor.set_contrast(3) #对比度 -3至3 sensor.set_brightness(0) #亮度。-3至+3 sensor.set_saturation(3) #饱和度。-3至+3 sensor.set_auto_exposure(True) #自动曝光 sensor.skip_frames(time=delay_time) sensor.set_auto_gain(False) # 在进行颜色追踪时,必须关闭 sensor.set_auto_whitebal(False) # 在进行颜色追踪时,必须关闭 #重新打开串口 uart.init(115200, timeout_char=1000) uart2.init(115200, timeout_char=1000) #判断是否debug模式 global CompetitionScene if is_debug == True: CompetitionScene = 0 else: CompetitionScene = 1
def sensor_config(data): global processing gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db = struct.unpack("<fIfff", data) sensor.set_auto_gain(False, gain_db) sensor.set_auto_exposure(False, exposure_us) sensor.set_auto_whitebal(False, (r_gain_db, g_gain_db, b_gain_db)) processing = False return struct.pack("<fIfff",gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db)
def initialize(): sensor.reset() sensor.set_pixformat( sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7) #GRAYSCALE, RGB565,BAYER sensor.set_framesize(sensor.QVGA) sensor.set_auto_whitebal(False) sensor.set_auto_gain(False) sensor.set_auto_exposure(False, exposure_us=100) # make smaller to go faster sensor.skip_frames(time=2000)
def setLighting(case): sensor.set_auto_gain(False) sensor.set_auto_exposure(False) if case == 1: light_mode_auto() if case == 2: light_mode_home() if case == 3: light_mode_night() if case == 4: light_mode_cloudy() if case == 5: light_mode_office()
def reset_sensor(): sensor.reset() sensor.set_pixformat( sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE) sensor.set_framesize(FRAME_SIZE) sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \ int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION) - BOTTOM_PX_TO_REMOVE)) sensor.set_auto_exposure(True) #sensor.set_auto_exposure(False, exposure_us=500) sensor.skip_frames(time=1400) if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False) if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False)
def init(self, robot_): self.robot = robot_ if self.robot == self.ROBOT_O: #O_bot self.thresholds = [ (35, 100, 26, 78, 22, 72), #Ball (68, 100, -19, 27, 41, 81), #Yellow Goal (20, 41, -6, 15, -55, -15) ] # Blue Goal self.window = (59, 18, 184, 181) elif self.robot == self.ROBOT_P2: #P2_bot self.thresholds = [ (39, 100, 26, 78, 22, 72), #Ball (68, 100, -19, 27, 41, 81), #Yellow Goal (20, 41, -6, 25, -55, -15) ] # Blue Goal self.window = (71, 4, 193, 191) # sensor setup sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) #Resolution sensor.set_windowing(self.window) sensor.skip_frames(time=1000) sensor.set_auto_exposure(False) sensor.set_auto_whitebal(False) # Need to let the above settings get in... sensor.skip_frames(time=250) # === GAIN === curr_gain = sensor.get_gain_db() sensor.set_auto_gain(False, gain_db=curr_gain) # === EXPOSURE === curr_exposure = sensor.get_exposure_us() sensor.set_auto_exposure(False, exposure_us=int(curr_exposure * 0.5)) # === WHITE BAL === sensor.set_auto_whitebal( False, rgb_gain_db=(-6.02073, -3.762909, 3.33901)) #Must remain false for blob tracking sensor.set_brightness(2) sensor.set_contrast(2) sensor.set_saturation(2) sensor.skip_frames(time=500)
def init(is_debug,delay_time): uart.deinit() sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) sensor.set_contrast(3) sensor.set_brightness(-3) sensor.set_auto_exposure(True) sensor.skip_frames(time = delay_time) sensor.set_auto_whitebal(False) uart.init(115200,timeout_char=1000) lcd.init() global CompetitionScene if is_debug==True: CompetitionScene=0 else: CompetitionScene=1
def exposure_compensation(): """ use P control to set the exposure time according to the clamps brightness """ P = 2000 pid = P_control(P) pid.SetPoint = 40 END = 50 expo = sensor.get_exposure_us() for i in range(1, END): img = sensor.snapshot() llist = [] for r in clamp_roi(roi): lab = lab_median(img, r) img = img.draw_rectangle(r) llist.append(lab[0]) l_mean = int(sum(llist) / len(llist)) print("l_mean = ", l_mean) img = img.draw_rectangle(roi) if abs(l_mean - pid.SetPoint) <= 1: sensor.set_saturation(2) return pid.update(l_mean) output = pid.output expo += int(output) if expo < 10000: sensor.set_auto_gain(0) expo = sensor.get_exposure_us() sensor.set_auto_exposure(0, expo) P = P / 2 continue if sensor.get_exposure_us() >= 120190: sensor.set_saturation(2) return # max exposure time sensor.set_auto_exposure(0, expo) sensor.skip_frames(n=60) print("exp time", sensor.get_exposure_us())
def init(self, gain_db=0, shutter_us=500000, framesize=sensor.WQXGA2, force_reset=True, flip=False): if self.simulate: self.shutter = shutter_us self.gain = gain_db self.snap_started = False return if force_reset or self.has_error or self.gain != gain_db or self.shutter != shutter_us or self.framesize != framesize or self.flip != flip: sensor.reset() sensor.set_pixformat(self.pixfmt) sensor.set_framesize(framesize) if flip: # upside down camera sensor.set_vflip(True) sensor.set_hmirror(True) self.flip = flip self.framesize = framesize if shutter_us < 0: sensor.set_auto_exposure(True) else: if shutter_us > 500000: sensor.__write_reg(0x3037, 0x08) # slow down PLL if shutter_us > 1000000: pyb.delay(100) sensor.__write_reg(0x3037, 0x18) # slow down PLL if shutter_us > 1500000: pyb.delay(100) sensor.__write_reg(0x3036, 80) # slow down PLL # warning: doesn't work well, might crash pyb.delay(200) sensor.set_auto_exposure(False, shutter_us) self.shutter = shutter_us if gain_db < 0: sensor.set_auto_gain(True) else: sensor.set_auto_gain(False, gain_db) self.gain = gain_db self.wait_init = 2 self.width = sensor.width() self.height = sensor.height()
def init(is_debug, pixformat, delay_time): uart.deinit() sensor.reset() if pixformat == "GRAY": sensor.set_pixformat(sensor.GRAYSCALE) elif pixformat == "RGB": sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) sensor.set_gainceiling(128) sensor.set_contrast(3) sensor.set_brightness(0) sensor.set_saturation(3) sensor.set_auto_exposure(True) sensor.skip_frames(time=delay_time) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) uart.init(115200, timeout_char=1000) global CompetitionScene if is_debug == True: CompetitionScene = 0 else: CompetitionScene = 1
def adjust_exposure(self, goal): """Adjust the exposure of the sensor until the average L (in LAB) value reaches the desired goal.""" self.logger.debug("goal=", goal) direction = 0 UP = 1 DOWN = -1 sensor.set_auto_gain(False) img = sensor.snapshot() if self.snapshot == None else self.snapshot stats = img.get_statistics() adjuster = self.adjuster while (abs(stats.l_mean() - goal) > 10 and adjuster > 2): self.logger.debug("adjuster=", adjuster, ", sensor.get_exposure_us()=", sensor.get_exposure_us()) if (stats.l_mean() < goal): before_exposure_us = sensor.get_exposure_us() sensor.set_auto_exposure(False, exposure_us=before_exposure_us + adjuster) if (sensor.get_exposure_us() == before_exposure_us): adjuster = 2 if (direction != UP): adjuster = adjuster >> 1 direction = UP else: before_exposure_us = sensor.get_exposure_us() sensor.set_auto_exposure(False, exposure_us=before_exposure_us - adjuster) if (sensor.get_exposure_us() == before_exposure_us): adjuster = 2 if (direction != DOWN or sensor.get_exposure_us() < adjuster): adjuster = adjuster >> 1 direction = DOWN img = self.take() stats = img.get_statistics() self.adjuster = 16384
def LENetTest(loopCnt=600, isFull=False, barLen=80): sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.CIF) # Set frame size to QVGA (320x240) sensor.set_windowing((96, 96)) # Set 128x128 window. sensor.set_auto_gain(True) sensor.set_auto_whitebal(False) sensor.set_auto_exposure(False) sensor.skip_frames(time=400) # Wait for settings take effect. net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() tAvg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() t0 = time.ticks() tmp_img = img.copy().binary([(120, 255)], invert=True) lst = net.search(tmp_img, threshold=0.8, min_scale=1.0, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softmax=False) t1 = time.ticks() - t0 tAvg = tAvg * 0.9 + t1 * 0.1 img.draw_string(4, 8, 'LENET', color=(0, 0, 0)) lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) for obj in lst: print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value())) img.draw_rectangle(obj.rect(), color=(255, 255, 255)) img.draw_string(4, 4, labels[obj.index()]) print('algo time cost : %.2f ms' % (tAvg))
sensor_size = sensor.VGA sensor.set_pixformat(sensor_format) sensor.set_framesize(sensor_size) if img_width != sensor.width() or img_height != sensor.height(): sensor.set_windowing((int((sensor.width()-img_width)/2),int((sensor.height()-img_height)/2),img_width,img_height)) sensor.skip_frames(time = 2000) # get the current the current exposure and gains and send them to the remote cam so that the # 2 cams have the same image settings sensor.snapshot() gain_db = sensor.get_gain_db() exposure_us = sensor.get_exposure_us() print("exposure is " + str(exposure_us)) rgb_gain_db = sensor.get_rgb_gain_db() sensor.set_auto_gain(False, gain_db) sensor.set_auto_exposure(False, exposure_us) sensor.set_auto_whitebal(False, rgb_gain_db) result = interface.call("sensor_config", struct.pack("<fIfff", gain_db, exposure_us, rgb_gain_db[0], rgb_gain_db[1], rgb_gain_db[2])) if result is not None: gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db = struct.unpack("<fIfff", result) print("ret is " + str(exposure_us)) else: # apparently something went wrong with the remote cam # stopping there in this case print("The remote cam did not respond correcly. Stopping here...") exit(1) clock = time.clock() idx = 0
from pyb import LED threshold_index = 4 # 0 for red, 1 for green, 2 for blue # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green/blue things. You may wish to tune them... thresholds = [(50, 100, -8, 26, -4, 18)] sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) sensor.set_windowing((500, 440)) sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking sensor.set_auto_exposure(False,1200) clock = time.clock() a=0 led1=LED(1) led2=LED(2) uart = UART(3,115200) #串口3,波特率115200 # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. def send_data_packet(x, y): temp = struct.pack(">bHHb", #格式为俩个字符俩个整型 0xAA, #帧头1 int(x), # up sample by 4 #数据1 int(y), # up sample by 4 #数据2 0xAE) #帧头2 uart.write(temp)
if blob.pixels() > max_size: max_blob=blob max_size = blob.pixels() return max_blob sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((320, 240)) sensor.skip_frames(time = 1000) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) sensor.set_brightness(0) clock = time.clock() exposure_time = sensor.get_exposure_us() sensor.set_auto_exposure(False, \ exposure_us = int(exposure_time * EXPOSURE_TIME_SCALE)) xPositionNow = 0 yPositionNow = 0 xPositionLast = 0 yPositionLast = 0 imageSize = 128 while(True): clock.tick() img = sensor.snapshot() blobs = img.find_blobs([thresholds], roi=roi1, pixels_threshold=5, area_threshold=5, merge=True) for b in blobs: max_blob=find_max(blobs) img.draw_rectangle(max_blob.rect())
# This example shows off how to find circles in the image using the Hough # Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform # # Note that the find_circles() method will only find circles which are completely # inside of the image. Circles which go outside of the image/roi are ignored... import sensor, image, time, pyb import frc_can from pyb import UART from math import sqrt sensor.reset() sensor.set_pixformat(sensor.RGB565) # grayscale is faster sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time=2500) sensor.set_auto_exposure(False) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) pyb.LED(1).off() pyb.LED(3).off() original_exposure = sensor.get_exposure_us() sensor.set_auto_exposure(False, int(.50 * original_exposure)) clock = time.clock() # Histogram baseline for yellow power-cell #hist = [39, 90, -40, 29, 44, 95] hist = [37, 98, -68, 21, 34, 99]
# ||| UART SETUP ||| uart = UART(3, 9600, timeout_char = 1000) # ||| SENSOR SETUP ||| sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time = 1000) # ||| GAIN ||| curr_gain = sensor.get_gain_db() sensor.set_auto_gain(False, gain_db=curr_gain) # ||| EXPOSURE ||| curr_exposure = sensor.get_exposure_us() sensor.set_auto_exposure(False, exposure_us = int(curr_exposure)) # ||| WHITE BAL ||| sensor.set_auto_whitebal(False, rgb_gain_db=curr_wbal) # ||| SET VALUES & WINDOWING ||| sensor.set_windowing(vwin_val) sensor.set_saturation(3) sensor.set_brightness(3) #Change to -3 sensor.set_contrast(3) # ||| INDICATOR LED ||| LED(1).on() time.sleep(100) LED(1).off()
SEARCHING_PIXEL_THRESHOLD = SEARCHING_AREA_THRESHOLD TRACKING_RESOLUTION = sensor.QQVGA TRACKING_AREA_THRESHOLD = 256 TRACKING_PIXEL_THRESHOLD = TRACKING_AREA_THRESHOLD TRACKING_EDGE_TOLERANCE = 0.05 # Blob can move 5% away from the center. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE sensor.set_framesize(SEARCHING_RESOLUTION) sensor.skip_frames(time = 1000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. sensor.set_auto_gain(False) # Turn off as it will oscillate. sensor.set_auto_exposure(False, exposure_us=EXPOSURE_MICROSECONDS) sensor.skip_frames(time = 1000) # sensor_w and sensor_h are the image sensor raw pixels w/h (x/y are 0 initially). x, y, sensor_w, sensor_h = sensor.ioctl(sensor.IOCTL_GET_READOUT_WINDOW) while(True): clock.tick() img = sensor.snapshot() # We need to find an IR object to track - it's likely to be really bright. blobs = img.find_blobs(TRACKING_THRESHOLDS, area_threshold=SEARCHING_AREA_THRESHOLD, pixels_threshold=SEARCHING_PIXEL_THRESHOLD) if len(blobs):
import sensor, image, time from pyb import * sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) sensor.skip_frames(10) sensor.set_auto_exposure(False) sensor.set_auto_whitebal(False, (-6.317097, -6.02073, -6.774588)) sensor.set_brightness(-3) uart = UART(3, 115200, timeout_char=1000) red_threshold = (51, 9, 15, 88, -16, 66) #(82, 18, 40, 90, 3, 51) yellow_threshold = (82, 13, -29, 55, 26, 68) #(82, 36, 2, 53, 29, 71) blue_threshold = (68, 24, -34, 9, -59, 3) #(64, 19, -7, 28, -60, -26) area = 3500 time_out = 120 * 1000 servo1 = Servo(1) servo2 = Servo(2) def send_finish_flag(): uart.writechar(0xFF) delay(1) uart.writechar(0x46) delay(1) uart.writechar(0x23) delay(1)
# English: labels = ["9", "8", "7", "10", "ACE", "JACK", "QUEEN", "KING"] # German: #labels=["9","8","7","10","Ass","Bube","Dame","Koenig"] sensor.reset() # initialize color images sensor.set_pixformat(sensor.RGB565) # The memory can't analyze models with resolution higher than QVGA # and the model is trained with QVGA too sensor.set_framesize(sensor.QVGA) #QVGA=320x240 # It is a good idea to tune the camera exposure # and disable the auto-gain function. # Please tune this settings to get best picture quality sensor.set_auto_exposure(False, exposure_us=125) # 500 sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(True) # turn this off for color tracking # only use a squared window sensor.set_windowing((224, 224)) sensor.run(1) lcd.clear() while (True): # get an image snapshot img = sensor.snapshot() # let the model find something... fmap = kpu.forward(task, img) plist = fmap[:]
def __init__(self, draw_stats=False, draw_lines=False, draw_lap_times=False, draw_timer=False, draw_line_stats=False, save_first_frame=False, flip_text=False, mirror_text=False, rotate_text=0, flip_travel_direction=False): # Setup hardware self.red_led = pyb.LED(1) self.green_led = pyb.LED(2) self.blue_led = pyb.LED(3) self.infra_led = pyb.LED(4) self.usb_serial = pyb.USB_VCP() # Serial Port # Auto gain and white balance settings #sensor.set_auto_gain(False) # must be turned off for color tracking #sensor.set_auto_whitebal(False) # must be turned off for color tracking # Set line finding parameters self.min_degree = 45 self.max_degree = 135 self.threshold = 1000 self.theta_margin = 25 # Max angle of lines to be merged and considered one self.rho_margin = 25 # Max spacing between lines along the rho axis self.x_stride = 2 self.y_stride = 8 # Configure IO pins for signaling self.action_pin = pyb.Pin('P7', pyb.Pin.OUT_OD, pyb.Pin.PULL_NONE) self.page_pin = pyb.Pin('P8', pyb.Pin.OUT_OD, pyb.Pin.PULL_NONE) self.lap_pin = pyb.Pin('P9', pyb.Pin.OUT_OD, pyb.Pin.PULL_NONE) # Configure the imaging sensor sensor.reset() # Initialize the sensor sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format sensor.set_framesize(sensor.QQQVGA) # Set frame size sensor.set_auto_exposure(True, exposure_us=5000) # Smaller means faster sensor.skip_frames(time=2000) # Wait for settings take effect # Delta value in pixels for x/y for tracking new lines self.line_id_max_delta = 40 # Max frames without a line before line history is cleared self.frames_before_line_purge = 200 # should be inverse -> sensor.get_exposure_us / 25 # Default travel direction is from top to bottom self.flip_travel_direction = flip_travel_direction # Configure clock for tracking FPS self.clock = time.clock() # Configure the lcd screen. lcd.init() # Initialize image buffer self.img = sensor.snapshot() #Allocate memory for exceptions in async/timer driven code micropython.alloc_emergency_exception_buf(100) # Debugging only # Allocation for interrupt callbacks self._timer = pyb.Timer(13) self._timer.init(freq=10) self._timer.callback(self._cb) self._render_ref = self.render self._pin_reset_ref = self.pin_reset # Scale, sensor to screen self.scale = 1.5 # Show performance/debug statistics/info self.draw_stats = draw_stats self.draw_lines = draw_lines self.draw_lap_times = draw_lap_times self.draw_timer = draw_timer self.draw_line_stats = draw_line_stats self.line_draw_color = (255, 0, 0) self._fps = None self._known_lines = [] self._lap_timestamp = None self.lap_timestamps = [] self._lap_notification_timestamp = None self.lap_notification_timeout = 5000 self.save_first_frame = save_first_frame self.flip_text = flip_text self.mirror_text = mirror_text self.rotate_text = rotate_text
# High FPS Example # # This example shows off how to make the frame rate of the global shutter camera extremely # high. To do so you need to set the resolution to a low value such that pixel binning is # activated on the camera and then reduce the maximum exposure time. # # When the resolution is 320x240 or less the camera reads out pixels 2x faster. When the # resolution is 160x120 or less the camera reads out pixels 4x faster. This happens due # to pixel binning which is automatically activated for you to increase the readout speed. # # While the readout speed may increase the camera must still expose the image for the request # time so you will not get the maximum readout speed unless you reduce the exposure time too. # This results in a dark image however so YOU NEED A LOT of lighting for high FPS. import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) - make smaller to go faster sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected # to the IDE. The FPS should increase once disconnected.
def set_b_range(self, b_range): self.b_min = 0 self.b_max = b_range def get_thresholds(self, l_count, a_count, b_count): tmp = [(self.l_min + l_count, self.l_max + l_count, \ self.a_min + a_count, self.a_max + a_count, \ self.b_min + b_count, self.b_max + b_count)] return tmp img = sensor.snapshot() while (img.get_statistics().l_mean() < 60): sensor.set_auto_exposure(False, exposure_us=sensor.get_exposure_us() + 200) img = sensor.snapshot() thresholdHolder = ThresholdHolder() list_of_thresholds = [] print("looping") l_min = 100 l_max = 0 a_min = 120 a_max = -120 b_min = 120 b_max = -120 #(23, 76, -23, 43, -76, 15)
import sensor, image, time, math, pyb from pyb import LED from pyb import USB_VCP blue_led = LED(3) green_led = LED(2) sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) sensor.set_windowing((640, 80)) sensor.skip_frames(30) sensor.set_auto_gain(True) sensor.set_auto_whitebal(True) sensor.set_auto_exposure(True) sensor.set_vflip(True) sensor.set_hmirror(True) clock = time.clock() temp = '' result = '' usb = USB_VCP() def barcode_name(code): if (code.type() == image.EAN2): return "EAN2" if (code.type() == image.EAN5): return "EAN5" if (code.type() == image.EAN8): return "EAN8" if (code.type() == image.UPCE): return "UPCE"
def biggest(a, b, c): # 先比较a和b if a > b: maxnum = a else: maxnum = b # 再比较maxnum和c if c > maxnum: maxnum = c return maxnum find_initpoint() #自定义函数 确定电机回到原点 sensor.set_auto_exposure(False, 600) #设置曝光 sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking sensor.set_saturation(1) #设置饱和度 clock = time.clock() while (True): clock.tick() img = sensor.snapshot() color_value = p_in_7.value( ) # get value, 0 or 1#读入p_in_7引脚的值 抽签确定的球的颜色 0为黑 1为白 judge_value = p_in_8.value() # get value, 0 or 1#读入p_in_8引脚的值 判断是否开始分球 Ready_value = p_in_9.value() # get value, 0 or 1#读入p_in_9引脚的值 射球是否就绪
# CIFAR is a convolutional nueral network designed to classify it's field of view into several # different object types and works on RGB video data. # # In this example we slide the LeNet detector window over the image and get a list of activations # where there might be an object. Note that use a CNN with a sliding window is extremely compute # expensive so for an exhaustive search do not expect the CNN to be real-time. import sensor, image, time, os, nn sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((128, 128)) # Set 128x128 window. sensor.skip_frames(time=750) # Don't let autogain run very long. sensor.set_auto_gain(False) # Turn off autogain. sensor.set_auto_exposure(False) # Turn off whitebalance. # Load cifar10 network (You can get the network from OpenMV IDE). net = nn.load('/cifar10.network') # Faster, smaller and less accurate. # net = nn.load('/cifar10_fast.network') labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] clock = time.clock() while(True): clock.tick() img = sensor.snapshot() # net.search() will search an roi in the image for the network (or the whole image if the roi is not # specified). At each location to look in the image if one of the classifier outputs is larger than
# You have to turn automatic gain control and automatic white blance off # otherwise they will change the image gains to undo any exposure settings # that you put in place... sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) # Need to let the above settings get in... sensor.skip_frames(time = 500) current_exposure_time_in_microseconds = sensor.get_exposure_us() print("Current Exposure == %d" % current_exposure_time_in_microseconds) # Auto exposure control (AEC) is enabled by default. Calling the below function # disables sensor auto exposure control. The additionally "exposure_us" # argument then overrides the auto exposure value after AEC is disabled. sensor.set_auto_exposure(False, \ exposure_us = int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE)) print("New exposure == %d" % sensor.get_exposure_us()) # sensor.get_exposure_us() returns the exact camera sensor exposure time # in microseconds. However, this may be a different number than what was # commanded because the sensor code converts the exposure time in microseconds # to a row/pixel/clock time which doesn't perfectly match with microseconds... # If you want to turn auto exposure back on do: sensor.set_auto_exposure(True) # Note that the camera sensor will then change the exposure time as it likes. # Doing: sensor.set_auto_exposure(False) # Just disables the exposure value update but does not change the exposure # value the camera sensor determined was good. while(True):
# Selective Search Example import sensor, image, time from random import randint sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 2000) # Wait for settings take effect. sensor.set_auto_gain(False) sensor.set_auto_exposure(False, exposure_us=10000) clock = time.clock() # Create a clock object to track the FPS. while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. rois = img.selective_search(threshold = 200, size = 20, a1=0.5, a2=1.0, a3=1.0) for r in rois: img.draw_rectangle(r, color=(255, 0, 0)) #img.draw_rectangle(r, color=(randint(100, 255), randint(100, 255), randint(100, 255))) print(clock.fps())
import sensor, time, image, pyb from pyb import Pin # 相机参数设置 sensor.reset() # 初始化相机 sensor.set_pixformat(sensor.GRAYSCALE) # 设置灰度像素模式,每个像素8bit sensor.set_framesize(sensor.B128X128) # 设置图像大小,用于帧差异 sensor.set_windowing((92,112)) # 设置窗口ROI sensor.skip_frames(10) # 跳过一些帧,等待感光元件变稳定 # 降低环境因素的影响 sensor.set_auto_gain(True) # 开启自动增益 sensor.set_auto_whitebal(True) # 开启自动白平衡 sensor.set_auto_exposure(True) # 开启自动曝光 # 定义IO口 # 输入引脚 p_in0 = Pin('P0', Pin.IN, Pin.PULL_UP) # 设置P0为“按键1”输入引脚,并开启上拉电阻 p_in1 = Pin('P1', Pin.IN, Pin.PULL_UP) # 设置P1为“按键2”输入引脚,并开启上拉电阻 p_in2 = Pin('P2', Pin.IN, Pin.PULL_UP) # 设置P2为“按键3”输入引脚,并开启上拉电阻 p_in3 = Pin('P3', Pin.IN, Pin.PULL_UP) # 设置P3为“按键4”输入引脚,并开启上拉电阻 # 输出引脚 RED_LED_PIN = 1 # 红色LED输出引脚 GREEN_LED_PIN = 2 # 绿色LED输出引脚 BLUE_LED_PIN = 3 # 蓝色LED输出引脚 p_out0 = Pin('P4', Pin.OUT_PP) # 设置p_out为输出引脚 p_out1 = Pin('P5', Pin.OUT_PP) # 设置p_out为输出引脚 p_out2 = Pin('P6', Pin.OUT_PP) # 设置p_out为输出引脚 p_out3 = Pin('P7', Pin.OUT_PP) # 设置p_out为输出引脚
clock = time.clock() # Create a clock object to track the FPS. # You have to turn automatic gain control and automatic white blance off # otherwise they will change the image gains to undo any exposure settings # that you put in place... sensor.set_auto_gain(False) # Need to let the above settings get in... sensor.skip_frames(time=500) current_exposure_time_in_microseconds = sensor.get_exposure_us() print("Current Exposure == %d" % current_exposure_time_in_microseconds) # Auto exposure control (AEC) is enabled by default. Calling the below function # disables sensor auto exposure control. The additionally "exposure_us" # argument then overrides the auto exposure value after AEC is disabled. sensor.set_auto_exposure(False, \ exposure_us = int(current_exposure_time_in_microseconds * EXPOSURE_TIME_SCALE)) print("New exposure == %d" % sensor.get_exposure_us()) # sensor.get_exposure_us() returns the exact camera sensor exposure time # in microseconds. However, this may be a different number than what was # commanded because the sensor code converts the exposure time in microseconds # to a row/pixel/clock time which doesn't perfectly match with microseconds... # If you want to turn auto exposure back on do: sensor.set_auto_exposure(True) # Note that the camera sensor will then change the exposure time as it likes. # Doing: sensor.set_auto_exposure(False) # Just disables the exposure value update but does not change the exposure # value the camera sensor determined was good. while (True):
GAIN_SCALE = 1.0 sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) # Print out the initial gain for comparison. print("Initial gain == %f db" % sensor.get_gain_db()) sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # You have to turn automatic exposure control and automatic white blance off # otherwise they will change the image exposure to undo any gain settings # that you put in place... sensor.set_auto_exposure(False) sensor.set_auto_whitebal(False) # Need to let the above settings get in... sensor.skip_frames(time = 500) current_gain_in_decibels = sensor.get_gain_db() print("Current Gain == %f db" % current_gain_in_decibels) # Auto gain control (AGC) is enabled by default. Calling the below function # disables sensor auto gain control. The additionally "gain_db" # argument then overrides the auto gain value after AGC is disabled. sensor.set_auto_gain(False, \ gain_db = current_gain_in_decibels * GAIN_SCALE) print("New gain == %f db" % sensor.get_gain_db()) # sensor.get_gain_db() returns the exact camera sensor gain decibels.