# Note: While this should work on any board, the board should have an SDRAM to be of any use. import sensor, image, time # Number of frames to pre-allocate and record N_FRAMES = 500 sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) # This frame size must match the image size passed to ImageIO sensor.set_windowing((120, 120)) sensor.skip_frames(time = 2000) clock = time.clock() # Write to memory stream stream = image.ImageIO((120, 120, sensor.GRAYSCALE), N_FRAMES) for i in range(0, N_FRAMES): clock.tick() stream.write(sensor.snapshot()) print(clock.fps()) while (True): # Rewind stream and play back stream.seek(0) for i in range(0, N_FRAMES): img = stream.read(copy_to_fb=True, pause=True)) # Do machine vision algorithms on the image here.
import sensor, image, time # Number of frames to pre-allocate and record N_FRAMES = 500 sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) # This frame size must match the image size passed to ImageIO sensor.set_windowing((120, 120)) sensor.skip_frames(time=2000) clock = time.clock() # Write to memory stream stream = image.ImageIO((120, 120, 2), N_FRAMES) for i in range(0, N_FRAMES): clock.tick() stream.write(sensor.snapshot()) print(clock.fps()) while (True): # Rewind stream and play back at 100FPS stream.seek(0) for i in range(0, N_FRAMES): img = stream.read(copy_to_fb=True) # Do machine vision algorithms on the image here. time.sleep_ms(10)
# Multi Color Blob Tracking Example # # This example shows off multi color blob tracking using the OpenMV Cam. import sensor, image, time, math img_src = "cam" # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... thresholds = [(36, 77, 40, 79, 33, 81)] # You may pass up to 16 thresholds above. However, it's not really possible to segment any # scene with 16 thresholds before color thresholds start to overlap heavily. if (img_src == "stream"): img_reader = image.ImageIO("/marker_stream.bin", "r") else: sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. Don't set "merge=True" becuase that will merge blobs which we don't want here. blobs = []
# Note: While this should work on any board, the board should have an SDRAM to be of any use. import sensor, image, time # Number of frames to pre-allocate and record N_FRAMES = 500 sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) # This frame size must match the image size passed to ImageIO sensor.set_windowing((120, 120)) sensor.skip_frames(time = 2000) clock = time.clock() # Write to memory stream stream = image.ImageIO((120, 120, sensor.RGB565), N_FRAMES) for i in range(0, N_FRAMES): clock.tick() stream.write(sensor.snapshot()) print(clock.fps()) while (True): # Rewind stream and play back stream.seek(0) for i in range(0, N_FRAMES): img = stream.read(copy_to_fb=True, pause=True) # Do machine vision algorithms on the image here.
import image, pyb, time clock = time.clock() img_reader = image.ImageIO("/test_stream.bin", "r") while (True): clock.tick() img = img_reader.read(copy_to_fb=True, loop=True, pause=True) img.draw_line((0, 0, img.width(), img.height()), color=(255, 0, 0), thickness=10) img.draw_rectangle(104, 79, 119, 96) time.sleep(1) img_write.close
#sensor.set_saturation(1) original_exposure = sensor.get_exposure_us() sensor.set_auto_exposure(False, int(0.15 * original_exposure)) clock = time.clock() uart = UART(1, 115200) color = bytearray(3) color[0] = 0x15 color[1] = 0xFF color[2] = 0x00 img_writer = image.ImageIO("/test_stream.bin", "w") start = pyb.millis() while pyb.elapsed_millis(start) < record_time: clock.tick() img = sensor.snapshot().gamma_corr(gamma=1.4, contrast=1.2, brightness=-0.2) pixie.setColor(color) time.sleep(0.1) #lidar_frame = lidar.readLidar() ## Send out our results.
# USE THIS EXAMPLE WITH A USD CARD! # # This example shows how to use the Image Reader object to replay snapshots of what your # OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms. # Altered to allow full speed reading from SD card for extraction of sequences to the network etc. # Set the new pause parameter to false import sensor, image, time snapshot_source = False # Set to true once finished to pull data from sensor. sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) sensor.skip_frames(time = 2000) clock = time.clock() stream = None if snapshot_source == False: stream = image.ImageIO("/stream.bin", "r") while(True): clock.tick() if snapshot_source: img = sensor.snapshot() else: img = stream.read(copy_to_fb=True, loop=True, pause=True) # Do machine vision algorithms on the image here. print(clock.fps())