def main(labels=None, model_addr="/sd/m.kmodel", sensor_window=(224, 224), lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing(sensor_window) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) if not labels: with open('labels.txt', 'r') as f: exec(f.read()) if not labels: print("no labels.txt") img = image.Image(size=(320, 240)) img.draw_string(90, 110, "no labels.txt", color=(255, 0, 0), scale=2) lcd.display(img) return 1 try: img = image.Image("startup.jpg") lcd.display(img) except Exception: img = image.Image(size=(320, 240)) img.draw_string(90, 110, "loading model...", color=(255, 255, 255), scale=2) lcd.display(img) task = kpu.load(model_addr) try: while (True): img = sensor.snapshot() t = time.ticks_ms() fmap = kpu.forward(task, img) t = time.ticks_ms() - t plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax) img.draw_string(0, 0, "%.2f : %s" % (pmax, labels[max_index].strip()), scale=2) img.draw_string(0, 200, "t:%dms" % (t), scale=2) lcd.display(img) except Exception as e: raise e finally: kpu.deinit(task)
def free(): try: if FaceReco.is_load: tmp = kpu.deinit(FaceReco.task_fd) tmp = kpu.deinit(FaceReco.task_ld) tmp = kpu.deinit(FaceReco.task_fe) #t, FaceReco.task_fd = FaceReco.task_fd, None #del t #t, FaceReco.task_ld = FaceReco.task_ld, None #del t #t, FaceReco.task_fe = FaceReco.task_fe, None #del t t, FaceReco.img_face = FaceReco.img_face, None del t FaceReco.record_ftr = [] FaceReco.record_ftrs = [] button_io.home_button.disirq() FaceReco.start_processing = False FaceReco.is_load = False gc.collect() except Exception as e: print(e) # see py_kpu_deinit error will mp_raise_TypeError
def main(model_addr=0x300000, lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) task = kpu.load(model_addr) anchors = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1] try: while(True): img = sensor.snapshot() t = time.ticks_ms() objects = kpu.run_yolo2(task, img) t = time.ticks_ms() - t if objects: for obj in objects: img.draw_rectangle(obj.rect()) img.draw_string(0, 200, "t:%dms" %(t), scale=2) lcd.display(img) except Exception as e: sys.print_exception(e) finally: kpu.deinit(task)
def free(): #print(HowMany.free) try: if HowMany.is_load: kpu.deinit(HowMany.task) HowMany.is_load = False except Exception as e: print(e) # see py_kpu_deinit error will mp_raise_TypeError
def free(): #print(MaybeIs.free) try: if MaybeIs.is_load: kpu.deinit(MaybeIs.task) MaybeIs.is_load = False except Exception as e: print(e) # see py_kpu_deinit error will mp_raise_TypeError
def cleanup(self): self.close_recorder() try: uos.umount(self._ramdisk_mount_point) except OSError as e: print(e) if self._task: kpu.deinit(self._task) self._task = None
def main(anchors, labels=None, model_addr="/sd/m.kmodel"): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) sensor.run(1) lcd.init(type=1) lcd.clear(lcd.WHITE) if not labels: with open('labels.txt', 'r') as f: exec(f.read()) if not labels: print("no labels.txt") img = image.Image(size=(320, 240)) img.draw_string(90, 110, "no labels.txt", color=(255, 0, 0), scale=2) lcd.display(img) return 1 try: img = image.Image("startup.jpg") lcd.display(img) except Exception: img = image.Image(size=(320, 240)) img.draw_string(90, 110, "loading model...", color=(255, 255, 255), scale=2) lcd.display(img) task = kpu.load(model_addr) kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1] try: while (True): img = sensor.snapshot() t = time.ticks_ms() objects = kpu.run_yolo2(task, img) t = time.ticks_ms() - t if objects: for obj in objects: pos = obj.rect() img.draw_rectangle(pos) img.draw_string(pos[0], pos[1], "%s : %.2f" % (labels[obj.classid()], obj.value()), scale=2) img.draw_string(0, 200, "t:%dms" % (t), scale=2) lcd.display(img) except Exception as e: sys.print_exception(e) finally: kpu.deinit(task)
def on_draw(self): if not self.__initialized: self.__lazy_init() try: while True: img = sensor.snapshot() # Take an image from sensor print("progress 4 OK!") # Run the detection routine bbox = kpu.run_yolo2(self.task, img) if bbox: for i in bbox: print(i) img.draw_rectangle(i.rect()) lcd.display(img) home_button = self.get_system().home_button # TODO led_w = self.get_system().led_w if home_button.value() == 0 and self.but_stu == 1: if led_w.value() == 1: led_w.value(0) else: led_w.value(1) self.but_stu = 0 if home_button.value() == 1 and self.but_stu == 0: self.but_stu = 1 except KeyboardInterrupt: a = kpu.deinit(task) sys.exit()
def free(): try: if FaceReco.is_load: tmp = kpu.deinit(FaceReco.model) FaceReco.is_load = False except Exception as e: print(e) # see py_kpu_deinit error will mp_raise_TypeError
def find_face(): lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) sensor.run(1) task = kpu.load(0x300000) anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) kpu.init_yolo2(task, 0.5, 0.3, 5, anchor) while (True): img = sensor.snapshot() code = kpu.run_yolo2(task, img) if code: for i in code: img.draw_rectangle(i.rect()) lcd.display(img) kpu.deinit(task)
def main(model_addr=0x300000, lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): try: sensor.reset() except Exception as e: raise Exception( "sensor reset fail, please check hardware connection, or hardware damaged! err: {}" .format(e)) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) anchors = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) try: task = None task = kpu.load(model_addr) kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1] while (True): img = sensor.snapshot() t = time.ticks_ms() objects = kpu.run_yolo2(task, img) t = time.ticks_ms() - t if objects: for obj in objects: img.draw_rectangle(obj.rect()) img.draw_string(0, 200, "t:%dms" % (t), scale=2) lcd.display(img) except Exception as e: raise e finally: if not task is None: kpu.deinit(task)
def main(labels = None, model_addr="/sd/m.kmodel", lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): gc.collect() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) if not labels: raise Exception("no labels.txt") task = kpu.load(model_addr) try: while(True): img = sensor.snapshot() t = time.ticks_ms() fmap = kpu.forward(task, img) t = time.ticks_ms() - t plist=fmap[:] pmax=max(plist) max_index=plist.index(pmax) img.draw_string(0,0, "%.2f\n%s" %(pmax, labels[max_index].strip()), scale=2, color=(255, 0, 0)) img.draw_string(0, 200, "t:%dms" %(t), scale=2, color=(255, 0, 0)) lcd.display(img) except Exception as e: sys.print_exception(e) finally: kpu.deinit(task)
def measure_fps(model_file): task = kpu.load(model_file) kpu.set_outputs(task, 0, 1, 1, 2) clock = time.clock() fps_ = [] for i in range(20): img = sensor.snapshot() clock.tick() fmap = kpu.forward(task, img) lcd.display(img, oft=(0, 0)) fps_.append(clock.fps()) average_fps = sum(fps_) / len(fps_) print(average_fps) global fps_result fps_result = average_fps _ = kpu.deinit(task)
def main(): ## main task, graph, counter, uart = init(threshold=0.5, patience=4) frame_idx = 0 try: while (True): # get image img = sensor.snapshot().rotation_corr(z_rotation=0.0) # detect boxes a = img.pix_to_ai() code = kpu.run_yolo2(task, img) # set frame currF = Frame(frame_idx, img, code) # calc track diff_idx = graph.track(currF) # counting counter.vanish_update(graph.F_list[-1].bboxes, graph.F_list[-2].bboxes, graph.is_decrease) counter.count() # display on IDE #img = currF.draw_frames() #img = img.copy((32, 32, 160, 160)) #img.draw_string(0 ,0, str(counter.counter[LEFT])+","+str(counter.counter[RIGHT]), #color=(0,255,0), scale=3) #a = lcd.display(img) # to Gray msg = str(counter.counter[LEFT])+ DIV + \ str(counter.counter[RIGHT])+ DIV + str(currF.num_object) _ = uart.write(msg) #_ = uart.write(img) print(counter.counter) # finalize frame_idx += 1 time.sleep(0.05) except Exception as e: # need delete kpu_task when keyboard interrupt a = kpu.deinit(task) del task gc.collect() print(e)
def seeotheritems(): #7second delay global taskfe global a global task global yolonum global anchor classes = [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] anchored = (1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52) kpu.deinit(taskfe) kpu.deinit(task) tasktw = kpu.load("/sd/model/20class.kmodel") uart_B.write(" loaded 20 ") kpu.init_yolo2(tasktw, 0.5, 0.3, 5, anchored) imgother = sensor.snapshot() imgother.pix_to_ai() detectcode = kpu.run_yolo2(tasktw, imgother) if detectcode: led_r.value(0) led_b.value(0) for i in detectcode: imgother = imgother.draw_rectangle(i.rect()) for i in detectcode: imgother = imgother.draw_string(i.x(), i.y(), str(classes[i.classid()]), color=(255, 250, 250)) imgother = imgother.draw_string(i.x(), i.y() + 12, '%f1.3' % i.value(), color=(255, 250, 250)) imgother.save("/sd/yoloimages/" + str(yolonum) + ".jpg", quality=70) utime.sleep_ms(50) yolonum += 1 uart_B.write(" |Yolo|> " + str(classes[i.classid()]) + " <||") f = open("/sd/printoutput.txt", "a+") f.write("Yolo detected: " + str(classes[i.classid()]) + "\n\r") f.close() del (imgother) kpu.deinit(tasktw) del (tasktw) gc.collect() uart_B.write(" killed ") task = kpu.load("/sd/facedetect.kmodel") taskfe = kpu.load("/sd/model/FE.smodel") utime.sleep_ms(10) led_r.value(1) led_b.value(1) kpu.init_yolo2(task, 0.5, 0.3, 5, anchor) uart_B.write(" restarted ")
def inference(model_file): task = kpu.load(model_file) kpu.set_outputs(task, 0, 1, 1, 2) clock = time.clock() while (True): img = sensor.snapshot() clock.tick() fmap = kpu.forward(task, img) fps = clock.fps() plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax) a = lcd.display(img, oft=(0, 0)) lcd.draw_string( 0, 128, "%.2f:%s " % (pmax, labels[max_index].strip())) _ = kpu.deinit(task)
def measure_latency(model_file): task = kpu.load(model_file) kpu.set_outputs(task, 0, 1, 1, 2) clock = time.clock() latency_ = [] for i in range(20): img = sensor.snapshot() clock.tick() t1 = time.ticks_us() fmap = kpu.forward(task, img) t2 = time.ticks_diff(time.ticks_us(), t1) / 1000 lcd.display(img, oft=(0, 0)) latency_.append(t2) average_latency = sum(latency_) / len(latency_) print(average_latency) global latency_result latency_result = average_latency _ = kpu.deinit(task)
dist_str = "%.1f"%(dist) print("[DISTANCE]: " + dist_str) img.draw_string(2,47, dist_str,scale=3) lcd.display(img) continue name,dist = get_nearest(feature_list,plist) #print(clock.fps()) if dist < 50 and name != "exclude": #50 is modified from original value 200 img.draw_rectangle(1,46,222,132,color=br.get_color(0,255,0),thickness=3) img.draw_string(2,47 +30, "%s"%(name),scale=3) if old_name != name: namestring = str(name) + "\n" #UART to StickC uart_Port.write(namestring) #UART to StickC # print(name) #modified from original lcd.display(img) br.play_sound("/sd/voice/"+name+".wav") old_name = name else: old_name = '' # output img.draw_string(2,47, "%.2f "%(dist),scale=3) lcd.display(img) kpu.fmap_free(fmap) except KeyboardInterrupt: kpu.deinit(task) sys.exit() uart_Port.deinit() #UART to StickC del uart_Port #UART to StickC
#tested with frimware 5-0.22 import sensor,image,lcd import KPU as kpu lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) sensor.set_vflip(1) sensor.run(1) classes = ["racoon"] task = kpu.load(0x200000) #change to "/sd/name_of_the_model_file.kmodel" if loading from SD card a = kpu.set_outputs(task, 0, 7,7,30) #the actual shape needs to match the last layer shape of your model(before Reshape) anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828) a = kpu.init_yolo2(task, 0.3, 0.3, 5, anchor) #tweak the second parameter if you're getting too many false positives while(True): img = sensor.snapshot().rotation_corr(z_rotation=90.0) a = img.pix_to_ai() code = kpu.run_yolo2(task, img) if code: for i in code: a=img.draw_rectangle(i.rect(),color = (0, 255, 0)) a = img.draw_string(i.x(),i.y(), classes[i.classid()], color=(255,0,0), scale=3) a = lcd.display(img) else: a = lcd.display(img) a = kpu.deinit(task)
#print(labels_txt) labels=tuple([str(i) for i in labels_txt.split(",")]) #print(labels) import time last = time.ticks_ms() while True: try: #KpuTask = kpu.load(0x5C0000) KpuTask = kpu.load("/sd/yolov2.kmodel") kpu.init_yolo2(KpuTask, 0.6, 0.3, 5, anchor) while True: #print(time.ticks_ms() - last) last = time.ticks_ms() img = camera.get_image() things = kpu.run_yolo2(KpuTask, img) if things: for pos in range(len(things)): i = things[pos] img.draw_rectangle(320 - (i.x() + i.w()), i.y(), i.w(), i.h()) img.draw_string(320 - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), labels[i.classid()]), color=(0, 255, 0)) ## gc.collect() # have bug when reply 3 lcd.display(img) except KeyboardInterrupt as e: pass finally: kpu.deinit(KpuTask) #break
color=(255, 0, 0), scale=2)) greetback = False a = 0 lcd.display( imgv.draw_string(0, 5, " pic: " + str(currentImage) + ".jpg " + str(score), color=(255, 250, 250), scale=1)) img = img.resize(100, 100) lcd.display(imgv.draw_image(img, 0, 128)) #a = img del (img) pd = False if pd == True: lcd.display( image.Image("/sd/videopic/idle_" + str(n) + ".jpg", copy_to_fb=True)) utime.sleep_ms(10) pd = True n += 1 if n >= 68: n = 0 except Exception as x: a = kpu.deinit(taskfe) a = kpu.deinit(task) a = kpu.deinit(taskkp) sys.print_exception(x, file="/sd/error.txt") uart_B.write(str(x) + " " + str(x.args[0]))
last = time.ticks_ms() img = camera.obj.get_image() HowManyThings = kpu.run_yolo2(HowManyTask, img) if HowManyThings: for pos in range(len(HowManyThings)): i = HowManyThings[pos] img.draw_rectangle(320 - (i.x() + i.w()), i.y(), i.w(), i.h()) img.draw_string(320 - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), classes[i.classid()]), color=(0, 255, 0)) ## gc.collect() # have bug when reply 3 lcd.display(img) except KeyboardInterrupt as e: pass finally: kpu.deinit(HowManyTask) #break try: FaceRecoModel = kpu.load(0x2C0000) kpu.init_yolo2(FaceRecoModel, 0.5, 0.3, 5, (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)) while True: #print(time.ticks_ms() - last) last = time.ticks_ms() img = camera.obj.get_image() img.pix_to_ai() # Run the detection routine FaceRecoBbox = kpu.run_yolo2(FaceRecoModel, img) if FaceRecoBbox: for i in FaceRecoBbox: # print(i)
def main(): servo_freq = 50 # Hz servo_vert = Servo(pin=10, freq=servo_freq, min_duty=7, max_duty=11.5, timer=Timer.TIMER0, channel=Timer.CHANNEL0, initial_pos=0.5) servo_hor = Servo(pin=11, freq=servo_freq, min_duty=2.8, max_duty=11.5, timer=Timer.TIMER0, channel=Timer.CHANNEL1) lcd.init(freq=15000000) lcd.direction(lcd.YX_LRDU) sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) # 20 class yolo classes = [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] # target_class = 14 # person # target_class = 7 # cat target_class = 4 # bottle # target_class = 19 # monitor task = kpu.load(0x500000) anchor = (1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52) # For face detector yolo # task = kpu.load(0x300000) # anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) # target_class = 0 a = kpu.init_yolo2(task, 0.1, 0.3, 5, anchor) sensor.run(1) while (True): img = sensor.snapshot() code = kpu.run_yolo2(task, img) target_boxes = [] if code: target_boxes = [d for d in code if d.classid() == target_class] if target_boxes: # change turret position target = max(target_boxes, key=lambda d: d.value()) a = img.draw_rectangle(target.rect()) a = lcd.display(img) for i in code: lcd.draw_string(target.x(), target.y() + 12, '%f1.3' % target.value(), lcd.RED, lcd.WHITE) target_center_x = target.x() + target.w() // 2 target_center_y = target.y() + target.h() // 2 servo_hor.pos += 0.00015 * (sensor.width() // 2 - target_center_x) servo_vert.pos -= 0.0005 * (sensor.height() // 2 - target_center_y) # print('hor', servo_hor.pos) # print('vert', servo_vert.pos) else: a = lcd.display(img) kpu.deinit(task)
while(True): clock.tick() img = sensor.snapshot() code = kpu.run_yolo2(task, img) infostr = str(clock.fps()) + ' : ' for i in range(len(classes)): count[i][idx] = 0 if code: for i in code: infostr = infostr + classes[i.classid()][0] + ', ' count[i.classid()][idx] = 1 for i in range(len(classes)): if sum(count[i]) > avg_len/2: if classes[i][1]: uart.write(classes[i][1] + 'gairuyo\r') print(infostr) #print(count) idx = (idx + 1) % avg_len model = kpu.deinit(task) uart.deinit () del uart
def __del__(self): _ = kpu.deinit(self._m_fe) _ = kpu.deinit(self._m_ld) _ = kpu.deinit(self._m_fd)
sout.value(SOUT) print('nothing detected') if (FLAG): #///人脸识别///# check_key() #按键检测 check_key2() #按键检测 img = sensor.snapshot() #从摄像头获取一张图片 clock.tick() #记录时刻,用于计算帧率 code = kpu.run_yolo2(task_fd, img) # 运行人脸检测模型,获取人脸坐标位置 if key_pressed2 == 1: #如果检测到按键 print("shifted.") key_pressed2 = 0 #重置按键状态 FLAG = 0 a = kpu.deinit(task_fe) a = kpu.deinit(task_ld) a = kpu.deinit(task_fd) task = kpu.load('/sd/mask.kmodel') a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchorMsk) #初始化人脸检测模型 continue if code: # 如果检测到人脸 faceVector += 1 for i in code: # 迭代坐标框 # Cut face and resize to 128x128 a = img.draw_rectangle(i.rect()) # 在屏幕显示人脸方框 face_cut = img.cut(i.x(), i.y(), i.w(), i.h()) # 裁剪人脸部分图片到 face_cut face_cut_128 = face_cut.resize(128, 128) # 将裁出的人脸图片 缩放到128 * 128像素 a = face_cut_128.pix_to_ai() # 将裁出图片转换为kpu接受的格式