# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a z_rotation value below and you should see the r output be equal to that. if(0): expected_rotation = 20.0 extra_fb.rotation_corr(z_rotation=(-expected_rotation)) # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a zoom value below and you should see the z output be equal to that. if(0):
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. for y in range(0, sensor.height(), BLOCK_H): for x in range(0, sensor.width(), BLOCK_W): displacement = extra_fb.find_displacement(img, \ roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H)) # Below 0.1 or so (YMMV) and the results are just noise. if(displacement.response() > 0.1): pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation()) pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation())
packet_sequence += 1 uart.write(temp) sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.B64X32) # Set frame size to 64x32... (or 64x64)... sensor.skip_frames(time=2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) while (True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. displacement = extra_fb.find_displacement(img) extra_fb.replace(img) # Offset results are noisy without filtering so we drop some accuracy. sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 if (displacement.response() > MAV_OPTICAL_FLOW_confidence_threshold): send_optical_flow_packet(sub_pixel_x, sub_pixel_y,
checksum(temp, MAV_OPTICAL_FLOW_extra_crc)) packet_sequence += 1 uart.write(temp) update_led() sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.B64X32) # Set frame size to 64x32... (or 64x64)... sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. displacement = extra_fb.find_displacement(img) extra_fb.replace(img) # Offset results are noisy without filtering so we drop some accuracy. sub_pixel_x = int(-displacement.x_translation() * 35) sub_pixel_y = int(displacement.y_translation() * 53) send_optical_flow_packet(sub_pixel_x, sub_pixel_y, displacement.response())
global flag flag = 1 tim = Timer(4, freq=20) # create a timer object using timer 4 - trigger at 1Hz tim.callback(tick) # set the callback to our tick function #--------------------------------------while循环开始-----------------------------------------# while (True): #led.on() if (flag == 1): img = sensor.snapshot() img_old = img.copy() img.lens_corr(1.5) # for 2.8mm lens...摄像头畸变纠正 #--------------------------------------光流定点-----------------------------------------# old = sensor.alloc_extra_fb(16, 16, sensor.GRAYSCALE) old.replace(sensor.snapshot().mean_pooled(4, 4)) new_img = sensor.snapshot().mean_pooled(4, 4) displacement = old.find_displacement(new_img) old_img.replace(new_img) delta_x0 = int(displacement.x_translation() * 5) / 5.0 delta_y0 = int(displacement.y_translation() * 5) / 5.0 delta_x = 10 * delta_x0 delta_y = 10 * delta_y0 #--------------------------------------检测直线交点的位置---------------------------------------# lines = img_old.find_lines(threshold=1000, theta_margin=50, rho_margin=50) for i in range(0, len(lines) - 1): for j in range(i + 1, len(lines)): l0x1 = lines[i].x1()
1] != remote_g_gain_db or rgb_gain_db[2] != remote_b_gain_db: print("rgb_gain_db is " + str(rgb_gain_db) + " but we got [" + str(remote_r_gain_db) + "," + str(remote_g_gain_db) + "," + str(remote_b_gain_db) + "]") exit(1) else: print("Could not configure the remote cam!") exit(1) # skip frames as this is exactly what the remote is doing on its side sensor.skip_frames(time=2000) # save the ref image used for the diff print("About to save background image...") ref_img = sensor.alloc_extra_fb(img_width, img_height, sensor_format) data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565) ref_img.replace(sensor.snapshot().remap(data_fb, right=False, upside_down=True)) sensor.dealloc_extra_fb() print("Saved background image - Now frame differencing!") # now add an additional part that will convey the mask info sensor.set_windowing((int((sensor.width() - img_width) / 2) - 2, int((sensor.height() - img_height) / 2), img_width, img_height + mask_height)) time.sleep(500) clock = time.clock() idx = 0 data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565)
green_led = LED(2) green_led.off() blue_led = LED(3) blue_led.off() class DataTX(): img_width = 320 img_height = 240 sent = False buff = None dataTX = DataTX() dataRX = DataTX() dataTX.buff = sensor.alloc_extra_fb(DataTX.img_width, DataTX.img_height, sensor.RGB565) dataRX.buff = sensor.alloc_extra_fb(DataTX.img_width, DataTX.img_height, sensor.RGB565) usb = USB_VCP() blue_led.on() spi_error = False debug_image = False control = CameraSlaveControl() control.column_offset = 10 control.row_offset = 10 control.column_zoom_numerator = 22 control.column_zoom_denominator = 20
if col == begin: continue img.draw_line(begin, col, end, col, color=120, thickness=1) return img sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.B64X64) sensor.set_auto_whitebal(False) # Turn off white balance. sensor.set_auto_gain(False, gain_db=8) sensor.skip_frames(time=2000) clock = time.clock() buffer1 = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) buffer2 = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) sensor.skip_frames(time=500) buffer1.replace(sensor.snapshot()) oddframe = True # Tracks if the frame number is odd or not. pyb.LED(BLUE_LED_PIN).on() # indicator on for i in range(1000): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. if (oddframe): oddframe = False buffer2.replace(img)
face_cascade = image.HaarCascade("frontalface", stages=25) num_faces = 0 while (True): # Capture snapshot img = sensor.snapshot() # Find objects. # Note: Lower scale factor scales-down the image more and detects smaller objects. # Higher threshold results in a higher detection rate, with more false positives. objects = img.find_features(face_cascade, threshold=0.60, scale_factor=1.15) if num_faces == len(objects): continue else: num_faces = len(objects) # Send full image over USB for r in objects: led.on() tmp_fb = sensor.alloc_extra_fb(r[2], r[3], sensor.RGB565) send_img = img.copy(r,copy_to_fb=tmp_fb).compress() usb.send(b"IMGS") usb.send(ustruct.pack("<L", send_img.size())) usb.send(send_img) usb.send(b"IMGE") sensor.dealloc_extra_fb() img.draw_rectangle(r) led.off()
clock = time.clock() # Create a clock object to track the FPS. # windowing window_width = 320 # max 320 window_height = 150 # max 240 roi = ((sensor.width() / 2) - (window_width / 2), (sensor.height() / 2) - (window_height / 2), window_width, window_height) sensor.set_windowing((int(roi[0]), int(roi[1]), int(roi[2]), int(roi[3]))) # positional settings sensor.set_hmirror(True) sensor.set_vflip(False) # variables extra_fb = sensor.alloc_extra_fb(window_width, window_height, sensor.GRAYSCALE) uart = pyb.UART(3, 9600, timeout_char=1000) red_led = pyb.LED(1) green_led = pyb.LED(2) blue_led = pyb.LED(3) ir_leds = pyb.LED(4) trigger_threshold = 5 # debug show_debug = True debug_color = (0, 0, 0) min_mo_life_to_show = 5 debug_detected_duration = 50 debug_detected_counter = debug_detected_duration
def face_recog(pic_name, vi_ip): print("~~~~~~~~~~~~~~~~FACE_RECOG~~~~~~~~~~~~~~~~~~~~~~") gc.collect() snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse() d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height())) pyb.LED(2).on() pyb.LED(3).on() name_lbp_list = [] uos.chdir("/CamFaces") for filename in uos.listdir("/CamFaces"): if filename.endswith(".pgm"): try: img = None img = image.Image(filename, copy_to_fb=True).mask_ellipse() sensor.alloc_extra_fb(img.width(), img.height(), sensor.GRAYSCALE) d1 = img.find_lbp((0, 0, img.width(), img.height())) dist = image.match_descriptor(d0, d1, 50) sensor.dealloc_extra_fb() pname = filename und_loc = pname.index('_') pname = pname[0:(und_loc)] name_lbp_list.append(pname) name_lbp_list.append(dist) continue except Exception as e: print(e) print("error producing LBP value") else: print("file found that is not of type pgm") print(name_lbp_list) gc.collect() end = 0 name_avg = [] i = 0 start = 0 while i < len(name_lbp_list): if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] != name_lbp_list[i + 2]): end = i + 2 face = [] face = name_lbp_list[start:end] print(face) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) start = i + 2 i += 2 face = [] face = name_lbp_list[(end):(len(name_lbp_list))] print(face) gc.collect() j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) print(name_avg) lbps = [] k = 1 while k < len(name_avg): lbps.append(name_avg[k]) k += 2 print(lbps) gc.collect() min_lbp = min(lbps) print(min_lbp) ind = lbps.index(min(lbps)) ind += 1 found_person = name_avg[2 * ind - 2] id_name = "The person you are looking at is: " + found_person print(id_name) uos.remove("/snapshot-person.pgm") pyb.LED(2).off() pyb.LED(3).off() chost = vi_ip cport = 8080 client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM) client.connect((chost, cport)) print("connected to visually impaired user's smartphone") to_send = id_name + "\n" client.send(to_send.encode()) print("sent name to phone") client.close() gc.collect() return
# Lens Correction import sensor, image, time sensor.reset() sensor.set_framesize(sensor.VGA) sensor.set_pixformat(sensor.RGB565) sensor.set_windowing((160, 120, 320, 240)) sensor.skip_frames(time=2000) data_fb = sensor.alloc_extra_fb(320, 240, sensor.RGB565) clock = time.clock() count = 0 remap = False while (True): clock.tick() # test without remap, with remap QVGA and with remap QQVGA img = sensor.snapshot() if remap: img.remap(data_fb, right=False, upside_down=False) count += 1 if count == 100: print("remap QVGA GRAYSCALE") remap = True sensor.set_framesize(sensor.VGA) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_windowing((160, 120, 320, 240)) elif count == 200:
sensor.snapshot() #get the gains and exposure gain_db = sensor.get_gain_db() exposure_us = sensor.get_exposure_us() print("exposure is " + str(exposure_us)) rgb_gain_db = sensor.get_rgb_gain_db() # deactivate the auto settings, use the given gains and exposure and wait for a bit sensor.set_auto_gain(False, gain_db) sensor.set_auto_exposure(False, exposure_us) sensor.set_auto_whitebal(False, rgb_gain_db) sensor.skip_frames(time=2000) # allocate extra buffers to store the ref image and to data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565) ref_image = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565) # save the ref image used for the diff function print("About to save background image...") img = sensor.snapshot() img.remap(data_fb, right=True) ref_image.replace(img) print("Saved background image - Now frame differencing!") # now add an additional part that will convey the mask info sensor.set_windowing((int( (sensor.width() - img_width) / 2), int((sensor.height() - img_height) / 2), img_width, img_height + mask_height)) time.sleep(500) clock = time.clock()
sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE RGB565 sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) sensor.set_windowing( (width_frame, height_frame)) # look at center 240x240 pixels of the VGA resolution. sensor.skip_frames(time=200) # Let new settings take affect. sensor.set_auto_whitebal(False) # Turn off white balance. sensor.set_auto_gain(False) clock = time.clock() # Tracks FPS. #extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb = sensor.alloc_extra_fb(width_frame, height_frame, sensor.GRAYSCALE) print("About to save background image...") sensor.skip_frames(time=200) # Give the user time to get ready. extra_fb.replace(sensor.snapshot()) print("Saved background image - Now frame differencing!") while (True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # Replace the image with the "abs(NEW-OLD)" frame difference. img = img.difference(extra_fb) edges = img.find_edges(image.EDGE_CANNY, threshold=(10, 80)) blobs = img.find_blobs([(30, 255)],
def face_recog(pic_name, vi_ip): print("~~~~~~~~~~~~~~~~FACE_RECOG~~~~~~~~~~~~~~~~~~~~~~") gc.collect() #garbage collection #find LBP value for snapshot saved in face_detect snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse() d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height())) # turn on lights signaling facial recognition calculations starting pyb.LED(2).on() pyb.LED(3).on() #find LBP values for each image received in server_recv name_lbp_list = [] uos.chdir( "/CamFaces" ) # change directory to where all the images from server_recv are stored for filename in uos.listdir("/CamFaces"): if filename.endswith(".pgm"): try: img = None img = image.Image(filename, copy_to_fb=True).mask_ellipse() sensor.alloc_extra_fb( img.width(), img.height(), sensor.GRAYSCALE) #allocate more space for images d1 = img.find_lbp((0, 0, img.width(), img.height())) dist = image.match_descriptor( d0, d1, 50) #set threshold lower than 70 to tighten matching algo sensor.dealloc_extra_fb() # extracting the person's name from the file name pname = filename und_loc = pname.index('_') pname = pname[0:(und_loc)] # add the person's name and LBP value for the image to the list name_lbp_list.append(word) name_lbp_list.append(dist) continue except Exception as e: print(e) print("error producing LBP value") else: print("file found that is not of type pgm") print(name_lbp_list) gc.collect() #garbage collection # finding average LBP values for each name end = 0 name_avg = [] i = 0 start = 0 while i < len(name_lbp_list): # for names 1 thru n-1 if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] != name_lbp_list[i + 2]): end = i + 2 face = [] face = name_lbp_list[start:end] print(face) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) start = i + 2 i += 2 face = [] face = name_lbp_list[(end):(len(name_lbp_list))] print(face) gc.collect() #garbage collection # special case: find average LBP value for last name in list (name n) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) print(name_avg) lbps = [] k = 1 while k < len(name_avg): lbps.append(name_avg[k]) k += 2 print(lbps) gc.collect() #garbage collection # find minimum average LBP and associated person name min_lbp = min(lbps) print(min_lbp) ind = lbps.index(min(lbps)) ind += 1 found_person = name_avg[2 * ind - 2] id_name = "The person you are looking at is: " + found_person print(id_name) #delete snapshot of person uos.remove("/snapshot-person.pgm") # turn off lights signaling facial recognition calculations done pyb.LED(2).off() pyb.LED(3).off() #TCP client socket to send name of the person recognized to the visually impaired user's smartphone chost = vi_ip cport = 8080 client = usocket.socket( usocket.AF_INET, usocket.SOCK_STREAM) #TCP client socket with IPv4 addressing client.connect((chost, cport)) print("connected to visually impaired user's smartphone") to_send = id_name + "\n" client.send(to_send.encode()) print("sent name to phone") client.close() #client closed gc.collect() #garbage collection return