def measure_fps(model_file): task = kpu.load(model_file) kpu.set_outputs(task, 0, 1, 1, 2) clock = time.clock() fps_ = [] for i in range(20): img = sensor.snapshot() clock.tick() fmap = kpu.forward(task, img) lcd.display(img, oft=(0, 0)) fps_.append(clock.fps()) average_fps = sum(fps_) / len(fps_) print(average_fps) global fps_result fps_result = average_fps _ = kpu.deinit(task)
def __init__(self, detector): self.object_detected = None self.FaceDetector = detector self.landmark_task = kpu.load(0x6BD000) a = kpu.set_outputs(self.landmark_task, 0, 1, 1, 10)
def inference(model_file): task = kpu.load(model_file) kpu.set_outputs(task, 0, 1, 1, 2) clock = time.clock() while (True): img = sensor.snapshot() clock.tick() fmap = kpu.forward(task, img) fps = clock.fps() plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax) a = lcd.display(img, oft=(0, 0)) lcd.draw_string( 0, 128, "%.2f:%s " % (pmax, labels[max_index].strip())) _ = kpu.deinit(task)
def measure_latency(model_file): task = kpu.load(model_file) kpu.set_outputs(task, 0, 1, 1, 2) clock = time.clock() latency_ = [] for i in range(20): img = sensor.snapshot() clock.tick() t1 = time.ticks_us() fmap = kpu.forward(task, img) t2 = time.ticks_diff(time.ticks_us(), t1) / 1000 lcd.display(img, oft=(0, 0)) latency_.append(t2) average_latency = sum(latency_) / len(latency_) print(average_latency) global latency_result latency_result = average_latency _ = kpu.deinit(task)
def __init__(self, FileName, Label, bool=False): self.row = global_value.row global_value.row = global_value.row + 1 self.percent = 0 self.image_class = 0 self.file_name = FileName self.labels = Label if bool: self.image_objects_task = kpu.load(self.file_name) a = kpu.set_outputs(self.image_objects_task, 0, 1, 1, len(self.labels)) else: pass
def init_kpu(threshold=0.3): classes = ["person"] task = kpu.load( 0x300000 ) #change to "/sd/name_of_the_model_file.kmodel" if loading from SD card a = kpu.set_outputs( task, 0, 7, 7, 30 ) #the actual shape needs to match the last layer shape of your model(before Reshape) anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828) a = kpu.init_yolo2( task, threshold, 0.3, 5, anchor ) #tweak the second parameter if you're getting too many false positives return task
def __init__(self): self.object_detected = None self.percent = 0 self.classes = ['face'] self.anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) self.x_center = 0 self.y_center = 0 self.object_detection_task = kpu.load(0x659000) a = kpu.set_outputs(self.object_detection_task, 0, 7, 7, 30) a = kpu.init_yolo2(self.object_detection_task, 0.1, 0.1, 5, self.anchor)
def __init__(self, FileName, Classes, Anchor, bool=False): self.row = global_value.row global_value.row = global_value.row + 1 self.object_detected = None self.percent = 0 self.file_name = FileName self.classes = Classes self.anchor = Anchor self.x_center = 0 self.y_center = 0 if bool: self.object_detected_task = kpu.load(self.file_name) a = kpu.set_outputs(self.object_detected_task, 0, 7, 7, 5 * (5 + len(self.classes))) a = kpu.init_yolo2(self.object_detected_task, 0.1, 0.3, 5, self.anchor) else: pass
def __init__(self, lm_detector): self.id = -1 self.max_score = 0 self.threshold = 85 self.LandmarkDetector = lm_detector self.db = _read_db() offset_x = 0 offset_y = -15 self.dst_point = [(44 + offset_x, 59 + offset_y), (84 + offset_x, 59 + offset_y), (64 + offset_x, 82 + offset_y), (47 + offset_x, 105), (81 + offset_x, 105)] self.img_face = image.Image(size=(128, 128)) a = self.img_face.pix_to_ai() self.fe_task = kpu.load(0x708000) a = kpu.set_outputs(self.fe_task, 0, 1, 1, 128)
import sensor, image, lcd, time import KPU as kpu lcd.init() lcd.rotation(2) sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) sensor.run(1) clock = time.clock() classes = ['u', 'us'] task = kpu.load(0x500000) kpu.set_outputs(task, 0, 35, 7, 7) anchor = (1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52) a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor) while (True): clock.tick() img = sensor.snapshot() objects = kpu.run_yolo2(task, img) print(clock.fps()) if objects: for obj in objects: img.draw_rectangle(obj.rect(), color=(0, 255, 0), thickness=3) img.draw_string(obj.x(), obj.y(), classes[obj.classid()], color=(0, 255, 0), scale=2)
6.718375, 9.01025) a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor) while (True): img = sensor.snapshot() code = kpu.run_yolo2(task, img) if code: for i in code: xx = i.x() - 10 yy = i.y() - 10 ww = i.w() + 15 hh = i.h() + 10 face_cut = img.cut(xx, yy, ww, hh) face_cut = face_cut.resize(128, 128) a = face_cut.pix_to_ai() a = kpu.set_outputs(tid, 0, 1, 1, 2) fmap = kpu.forward(tid, face_cut) plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax) print(plist) if plist[0] >= 0.94: a = img.draw_rectangle(xx, yy, ww, hh, color=(255, 0, 0), thickness=4) elif plist[1] >= 0.98: a = img.draw_rectangle(xx,
#setup LCD screen lcd.init() lcd.rotation(2) #read label file f=open("labels.txt","r") labels=f.readlines() f.close() #setup CNN task = kpu.load(0x200000) #Kmodel V4 need set output shape manually #set_outputs(int idx, int w, int h, int ch) kpu.set_outputs(task,0,6,1,1) timep = 0 while(True): fps = 1000/(time.ticks_ms() - timep) timep = time.ticks_ms() img = sensor.snapshot() img = img.resize(224,224) a = img.pix_to_ai() fmap = kpu.forward(task,img) plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax)
if (current_time - btn_time) >= 500: btn_time = time.ticks() fm.register(8, fm.fpioa.GPIOHS0) btn = GPIO(GPIO.GPIOHS0, GPIO.IN) lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.run(1) task = kpu.load( 0x500000) # you need put model(face.kfpkg) in flash at address 0x500000 # task = kpu.load("/sd/face.kmodel") a = kpu.set_outputs(task, 0, 20, 15, 30) anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor) while (True): btn.irq(btn_function, GPIO.IRQ_RISING) img = sensor.snapshot() code = kpu.run_yolo2(task, img) if code: for i in code: print(i) a = img.draw_rectangle(i.rect()) pos(i.x() + i.w() / 2, i.y() + i.h() / 2) a = lcd.display(img) a = kpu.deinit(task)
def append_dataset(file_name,data): face_dataset.append(data) f = open(file_name,"a") str_list_target = ["{:0.4f}".format(x) for x in data] str_target = ','.join(str_list_target) f.write(str_target) f.close() print("save to dataset success") def send_sheet(face_id): return #=== AI Models ===# task_face_detect = kpu.load(0x200000) task_face_encode = kpu.load(0x300000) kpu.set_outputs(task_face_encode,0,1,1,128) anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) kpu.init_yolo2(task_face_detect, 0.5, 0.3, 5, anchor) #====== config ======# face_threshold = 15 dataset_filename = "faces.csv" #====================# #=== SETUP ===# #clear_dataset(dataset_filename,face_dataset) face_dataset = read_dataset(dataset_filename); corgi85.IFTTT_init("corgi_detect","0hI55mQkUiimG6RIjpWhp") #=== wait wifi connect ===# while corgi85.wifi_check() == 0:
import uos import time lcd.init() lcd.rotation(2) sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) #sensor.set_vflip(1) sensor.run(1) classes = ["sakura"] print(uos.listdir("/sd/") ) task = kpu.load("/sd/sakura_model_v01.kmodel") kpu.set_outputs(task, 0,7,7,30)#Reshape層の内容に応じて中身を変える必要がある #the actual shape needs to match the last layer shape of your model(before Reshape) anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828) kpu.init_yolo2(task, 0.3, 0.3, 5, anchor) #kpu.init_yolo2(task, 0.8, 0.9, 5, anchor) print("start") code="" while(True): img = sensor.snapshot()#.rotation_corr(z_rotation=90.0) #a = img.pix_to_ai() code = kpu.run_yolo2(task, img) if code: for i in code: a=img.draw_rectangle(i.rect(),color = (0, 255, 0)) a = img.draw_string(i.x(),i.y(), classes[i.classid()], color=(255,0,0), scale=3) a = lcd.display(img)
sensor.run(1) # Init KPU print("init KPU") lcd.draw_string(10, 5, "init kpu") lcd.draw_string(170, 5, "Running") lcd.draw_string(10, 20, "load kmodel") kpu.memtest() task = kpu.load(0x600000) lcd.draw_string(170, 20, "Done") lcd.draw_string(10, 35, "set outputs") lcd.draw_string(10, 50, "steer") load_state = kpu.set_outputs(task, 0, 1, 1, 20) lcd.draw_string(150, 50, ("Done" if load_state is True else "None")) lcd.draw_string(10, 65, "throttle") load_state = kpu.set_outputs(task, 1, 1, 1, 16) lcd.draw_string(150, 65, ("Done" if load_state is True else "None")) kpu.memtest() lcd.draw_string(170, 35, "Done") print("Done") time.sleep_ms(1000) lcd.draw_string(170, 5, "Done ") time.sleep_ms(500) lcd.draw_string(60, 119, "Setup Done! :)")
import sensor, image, lcd, time import KPU as kpu sensor.reset(freq=24000000, dual_buff=True) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((320, 224)) sensor.set_vflip(1) sensor.run(1) lcd.init(type=2, freq=20000000, color=lcd.BLACK) #lcd.rotation(2) classes = ["license_plate"] task = kpu.load(0x400000) a = kpu.set_outputs(task, 0, 10, 7, 30) anchor = (1.81, 0.85, 2.26, 1.07, 3.00, 1.46, 4.56, 1.95, 7.38, 3.45) a = kpu.init_yolo2(task, 0.3, 0.3, 5, anchor) while (True): timestamp = time.ticks_ms() img = sensor.snapshot() a = img.pix_to_ai() plates = kpu.run_yolo2(task, img) if plates: plate = plates[0] a = img.draw_rectangle(plate.rect(), color=(0, 255, 0), thickness=5) a = img.draw_string(70, 10, "FPS : %.2f" % (1000 / (time.ticks_ms() - timestamp)), color=(0, 255, 0), scale=2) a = lcd.display(img) a = kpu.deinit(task)
import sensor, image, time, lcd import KPU as kpu lcd.init(freq=15000000) sensor.reset() # Reset and initialize the sensor. It will # run automatically, call sensor.run(0) to stop sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((224, 224)) sensor.skip_frames(time = 2000) # Wait for settings take effect. sensor.set_vflip(1) sensor.set_hmirror(1) clock = time.clock() # Create a clock object to track the FPS. task = kpu.load(0x300000) # mnist a=kpu.set_outputs(task, 0, 1, 1, 2) while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. #img.invert() # Use with numbers in paper img_3 = img.resize(128, 128) a = img_3.pix_to_ai() fmap = kpu.forward(task, img_3) plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax) #img.draw_string(5, 5, ('%s' % (max_index)), color = (255, 255, 255), scale = 2) lcd.display(img) # Display on LCD #print("%d: %.3f" % (max_index, pmax)) print(plist)
import sensor, image, lcd, time import KPU as kpu sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) sensor.set_vflip(1) sensor.run(1) lcd.init(type=2, freq=20000000, color=lcd.BLACK) lcd.rotation(2) clock = time.clock() classes = ['face', 'eye'] task = kpu.load(0x400000) a = kpu.set_outputs(task, 0, 7, 7, 35) anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828) a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor) while (True): img = sensor.snapshot() #a= img.resize(224,224) a = img.pix_to_ai() code = kpu.run_yolo2(task, img) if code: for i in code: a = img.draw_rectangle(i.rect(), color=(0, 255, 0), thickness=4) a = lcd.display(img) a = kpu.deinit(task)
sensor.set_framesize( sensor.QVGA) # Establece la captura de imagen como QVGA (320x240) sensor.set_windowing( (224, 224)) # Establece el tamaño de imagen con el que se entreno la red sensor.set_vflip(1) # Rotación vertical de la imagen sensor.set_saturation(-3) # Saturacion sensor.set_brightness(-3) # brightness sensor.set_contrast(-3) # contrast lcd.clear() # Limpia la pantalla y la deja en negro # Descripción y carga del modelo labels = ['Acaro', 'Bueno', 'Manchado'] # Etiquetas de la ultima capa de la red task = kpu.load( '/sd/3clases.kmodel') # Acá va al ubicación del archivo .kmodel (CARGA) kpu.set_outputs(task, 0, 1, 1, 3) # Aqúi van las dimensiones de la ultima capa de la red while (True): tick1 = utime.ticks_ms() # Ejecucion del modelo en tiempo real kpu.memtest() # Verifica la memoria disponible img = sensor.snapshot() # Captura de la imagen fmap = kpu.forward(task, img) # Ejecuta la red neuronal con la imagen capturada plist = fmap[:] # Extrae las probabilidades dentro de una lista pmax = max(plist) # Escoge de las probabilidades la mayor max_index = plist.index( pmax) # Extrae el indice de la clase con la mayor probabilidad
sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) sensor.set_vflip(1) lcd.clear() labels = [ 'arduino_uno', 'santa_claus' ] #number of labels should match the number of labels the model was trained with task = kpu.load( 0x200000 ) #change to "/sd/name_of_the_model_file.kmodel" if loading from SD card kpu.set_outputs( task, 0, 1, 1, 2) #the actual shape needs to match the last layer shape of your model while (True): kpu.memtest() img = sensor.snapshot() #img = img.rotation_corr(z_rotation=90.0) uncomment if need rotation correction - only present in full maixpy firmware a = img.pix_to_ai() fmap = kpu.forward(task, img) plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax) a = img.draw_string(0, 0, str(labels[max_index].strip()), color=(255, 0, 0),
tinyYolo = "/sd/models/tinyYoloMerged.kmodel" #16 fps mbnet75 = "/sd/models/mbnet75Merged.kmodel" #12 fps mbnet50 = "/sd/models/mbnet50Merged.kmodel" #15fps mbnet25 = "/sd/models/mbnet25Merged.kmodel" #16fps task = kpu.load(mbnet50) try: kpu.netinfo(task) except: print("cannot get net info, kmodel v4 not supported for netinfo!") lastLayerShape = (len(classes) + 5) * 5 #from documentation Kmodel V4 need set output shape manually a = kpu.set_outputs(task, 0, 7, 7, lastLayerShape) anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828) #yolo v2 tiny def doOverlap(boxDict, box1Dict): box = boxDict['rect'] box1 = box1Dict['rect'] print("checking overlapping boxes:", box, " ", box1) box_x = box[0] box_y = box[1] box_x2 = box[0] + box[2] box_y2 = box[1] + box[3] box1_x = box1[0] box1_y = box1[1]
Scale = 2 sensor.set_windowing((window_height, window_width)) sensor.skip_frames(100) sensor.run(1) print("init kpu") lcd.draw_string(10, 10, "init kpu") lcd.draw_string(170, 10, "Running") lcd.draw_string(10, 30, "load kmodel") kpu.memtest() task = kpu.load(0x500000) lcd.draw_string(170, 30, "Done") lcd.draw_string(10, 50, "set outputs") fmap = kpu.set_outputs(task, 0, window_height, window_width, classes) kpu.memtest() lcd.draw_string(170, 50, "Done") print("Done") time.sleep_ms(1000) lcd.draw_string(170, 10, "Done ") time.sleep_ms(500) lcd.draw_string(60, 70, "Setup Done! :)") clock = time.clock() seg_img = image.Image() output_array = np.array([0, 0, 0, 0, 0]) color_dict = {0: (0, 0, 0), # 0 in VOC2012 BG,VOid 1: (192, 0, 0), # 9 in VOC2012 CHAIR, SOFA 2: (64, 128, 0), # 11 in VOC2012 TABLE
sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) #sensor.set_windowing((320, 240)) sensor.skip_frames(time=2000) # Wait for settings take effect. sensor.set_vflip(1) sensor.set_hmirror(1) clock = time.clock() # Create a clock object to track the FPS. print('loading face detect model') task_detect_face = kpu.load(0x300000) # Charge face detect model into KPU print('loading face expresion classify model') task_classify_face = kpu.load( 0x500000) # Charge face classification model into KPU a = kpu.set_outputs(task_classify_face, 0, 1, 1, 2) anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) a = kpu.init_yolo2(task_detect_face, 0.5, 0.3, 5, anchor) labels = ['happy', 'sad'] # Facial expression labels print('configuration complete') while (True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. detected_face = kpu.run_yolo2(task_detect_face, img) if detected_face:
import KPU as kpu task = kpu.load(0x400000) kpu.set_outputs(task, 0, 1,3,1) info = kpu.netinfo(task) layerbottom = info[-1] print(info, layerbottom)
return data_packet #classes = ["smallfish","mebaru","azi","hugu","ishidai","kasago","bera","kawahagi","tai","hamati","kurodai","kisu","gure","sayori","suzuki"] classes = ["sakura"] print(uos.listdir("/sd/")) # KPU setting #kpu.deinit(task) try: task = kpu.load(modelpath) except: kpu.deinit(task) task = kpu.load(modelpath) #kpu.set_outputs(task, 0,7,7,100)#Reshape層の内容に応じて中身を変える必要がある #the actual shape needs to match the last layer shape of your model(before Reshape) kpu.set_outputs(task, 0, 7, 7, 30) anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828) kpu.init_yolo2(task, 0.3, 0.3, 5, anchor) pic_filepath = "/sd/sakura_pic" try: uos.mkdir(pic_filepath) except: print(uos.listdir(pic_filepath)) cnt = 0 while (True): try: img = sensor.snapshot() code = kpu.run_yolo2(task, img)
i2c.scan() # INITIALIZE CAMERA sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.run(1) # INITIALIZE LCD lcd.init() lcd.rotation(2) lcd.clear() # INITIALIZE MODEL task = kpu.load(0x200000) kpu.set_outputs(task, 0, 1, 1, 5) # INITIALIZE FILE HANDLER # create file and include column headers if file doesn't exist if FILE_NAME not in os.listdir(): with open(FILE_NAME, "w") as fout: column_headers = "name|contact_number|email|address|date_time|temperature|classification|confidence_level|cough_others|fever_others|headache_others|difficulty_breathing_others|cough|fever|headache|difficulty_breathing\n" fout.write(column_headers) # create image directory if it doesn't exist if IMAGE_DIRECTORY not in os.listdir(): os.mkdir(IMAGE_DIRECTORY) # RESET WHEN WIFI OR SOCKET CONNECTION IS LOST def reset(): lcd.clear()
#tested with frimware 5-0.22 import sensor,image,lcd import KPU as kpu lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) sensor.set_vflip(1) sensor.run(1) classes = ["racoon"] task = kpu.load(0x200000) #change to "/sd/name_of_the_model_file.kmodel" if loading from SD card a = kpu.set_outputs(task, 0, 7,7,30) #the actual shape needs to match the last layer shape of your model(before Reshape) anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828) a = kpu.init_yolo2(task, 0.3, 0.3, 5, anchor) #tweak the second parameter if you're getting too many false positives while(True): img = sensor.snapshot().rotation_corr(z_rotation=90.0) a = img.pix_to_ai() code = kpu.run_yolo2(task, img) if code: for i in code: a=img.draw_rectangle(i.rect(),color = (0, 255, 0)) a = img.draw_string(i.x(),i.y(), classes[i.classid()], color=(255,0,0), scale=3) a = lcd.display(img) else: a = lcd.display(img) a = kpu.deinit(task)
import os, image, time import KPU as kpu md = kpu.load(0x400000) kpu.set_outputs(md, 0, 1, 3, 1) img = image.Image("/sd/3.jpg") print(img) #img = img.rgb_to_grayscale() img.pix_to_ai() a = kpu.forward(md, img) fmap = kpu.get_output(md, 0) print("fmap", fmap[:]) #print(os.listdir())