def main(model_addr=0x300000, lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False):
    sensor.reset()
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    sensor.set_hmirror(sensor_hmirror)
    sensor.set_vflip(sensor_vflip)
    sensor.run(1)

    lcd.init(type=1)
    lcd.rotation(lcd_rotation)
    lcd.clear(lcd.WHITE)

    task = kpu.load(model_addr)
    anchors = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
    kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1]
    try:
        while(True):
            img = sensor.snapshot()
            t = time.ticks_ms()
            objects = kpu.run_yolo2(task, img)
            t = time.ticks_ms() - t
            if objects:
                for obj in objects:
                    img.draw_rectangle(obj.rect())
            img.draw_string(0, 200, "t:%dms" %(t), scale=2)
            lcd.display(img)
    except Exception as e:
        sys.print_exception(e)
    finally:
        kpu.deinit(task)
Beispiel #2
0
        def get_target_err(self):
            img = sensor.snapshot()
            code = kpu.run_yolo2(self.task_fd, img)
            if code:
                max_area = 0
                max_i = 0
                for i, j in enumerate(code):
                    a = j.w() * j.h()
                    if a > max_area:
                        max_i = i
                        max_area = a

                img = img.draw_rectangle(code[max_i].rect())
                self.pitch = (code[max_i].y() + code[max_i].h() /
                              2) / 240 * self.out_range * 2 - self.out_range
                self.roll = (code[max_i].x() + code[max_i].w() /
                             2) / 320 * self.out_range * 2 - self.out_range
                # limit
                if abs(self.pitch) < self.out_range * self.ignore:
                    self.pitch = 0
                if abs(self.roll) < self.out_range * self.ignore:
                    self.roll = 0
                img = img.draw_cross(160, 120)
                lcd.display(img)
                return (self.pitch, self.roll)
            else:
                img = img.draw_cross(160, 120)
                lcd.display(img)
                return (0, 0)
Beispiel #3
0
    def on_draw(self):
        if not self.__initialized:
            self.__lazy_init()
        try:
            while True:
                img = sensor.snapshot()  # Take an image from sensor
                print("progress 4 OK!")
                # Run the detection routine
                bbox = kpu.run_yolo2(self.task, img)
                if bbox:
                    for i in bbox:
                        print(i)
                        img.draw_rectangle(i.rect())
                lcd.display(img)
                home_button = self.get_system().home_button
                # TODO
                led_w = self.get_system().led_w
                if home_button.value() == 0 and self.but_stu == 1:
                    if led_w.value() == 1:
                        led_w.value(0)
                    else:
                        led_w.value(1)
                    self.but_stu = 0
                if home_button.value() == 1 and self.but_stu == 0:
                    self.but_stu = 1

        except KeyboardInterrupt:
            a = kpu.deinit(task)
            sys.exit()
Beispiel #4
0
 def work(img):
     img.pix_to_ai()
     # Run the detection routine
     FaceReco.bbox = kpu.run_yolo2(FaceReco.model, img)
     if FaceReco.bbox:
         for i in FaceReco.bbox:
             # print(i)
             img.draw_rectangle(i.rect())
Beispiel #5
0
def updateKpu():
    global g_cFiler
    global g_selCnt
    global g_cWav
    global g_task
    global g_powArr

    info = g_cFiler.getInfoList()[g_selCnt]

    if (g_task == None):
        g_task = _resetTask()
    if (g_task == None):
        g_cWav.play('/sd/snd/sys_ng.wav')
        g_cWav.wait()

    img = sensor.snapshot()

    if (info.modelType == 'yolo2'):
        plist = []
        for id in range(0, len(info.classList)):
            plist.append(0.0)

        code_obj = kpu.run_yolo2(g_task, img)
        if code_obj:  # object detected
            for i in code_obj:
                rect_size = i.w() * i.h()
                if rect_size > 10:
                    print(len(plist))
                    print(i.classid())
                    plist[i.classid()] = 0.95

    else:
        fmap = kpu.forward(g_task, img, False)
        plist = fmap[:]

    colArr = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (5, 5, 5), (0, 255, 255),
              (255, 255, 0), (128, 128, 128), (50, 200, 50)]
    for id in range(0, len(plist)):
        if (plist[id] > 0.9):
            g_powArr[id] = min((g_powArr[id] + plist[id] - 0.9) * 5.0, 100.0)
        else:
            g_powArr[id] -= 10.0
            g_powArr[id] = max(g_powArr[id], 0.0)
        img.draw_rectangle((10, 50 + 10 * id, int(g_powArr[id] * 1), 8),
                           colArr[id & 7], 10, True)

        if (g_powArr[id] >= 100.0):
            g_powArr[id] = 0.0
            info = g_cFiler.getInfoList()[g_selCnt]
            labels = info.classList
            wavPath = info.dirName + '/' + labels[id] + '.wav'
            lcd.draw_string(0, 20, wavPath)
            g_cWav.play('/sd/models/' + wavPath)
            g_cWav.wait()
    a = lcd.display(img)
    g_cButton.update()
Beispiel #6
0
def Object_Detection(img, obj):
    global labels
    code = kpu.run_yolo2(obj, img)
    if code != None:
        for i in code:
            a = img.draw_rectangle(i.rect(), (0, 255, 0), 2)
        print(i.classid())
        stuff.ID = i.classid()
        #uart_A.write(pack_obj_data())
        print(pack_obj_data())
def findmasks():
    objects = kpu.run_yolo2(task, img)
    if objects:
        for obj in objects:
            pos = obj.rect()
            if obj.classid() == 0:
                img.draw_rectangle(pos, color=(255, 0, 0))
            if obj.classid() == 1:
                img.draw_rectangle(pos, color=(0, 255, 0))
            return pos
Beispiel #8
0
def main(anchors, labels=None, model_addr="/sd/m.kmodel"):
    sensor.reset()
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    sensor.set_windowing((224, 224))
    sensor.run(1)

    lcd.init(type=1)
    lcd.clear(lcd.WHITE)

    if not labels:
        with open('labels.txt', 'r') as f:
            exec(f.read())
    if not labels:
        print("no labels.txt")
        img = image.Image(size=(320, 240))
        img.draw_string(90, 110, "no labels.txt", color=(255, 0, 0), scale=2)
        lcd.display(img)
        return 1
    try:
        img = image.Image("startup.jpg")
        lcd.display(img)
    except Exception:
        img = image.Image(size=(320, 240))
        img.draw_string(90,
                        110,
                        "loading model...",
                        color=(255, 255, 255),
                        scale=2)
        lcd.display(img)

    task = kpu.load(model_addr)
    kpu.init_yolo2(task, 0.5, 0.3, 5,
                   anchors)  # threshold:[0,1], nms_value: [0, 1]
    try:
        while (True):
            img = sensor.snapshot()
            t = time.ticks_ms()
            objects = kpu.run_yolo2(task, img)
            t = time.ticks_ms() - t
            if objects:
                for obj in objects:
                    pos = obj.rect()
                    img.draw_rectangle(pos)
                    img.draw_string(pos[0],
                                    pos[1],
                                    "%s : %.2f" %
                                    (labels[obj.classid()], obj.value()),
                                    scale=2)
            img.draw_string(0, 200, "t:%dms" % (t), scale=2)
            lcd.display(img)
    except Exception as e:
        sys.print_exception(e)
    finally:
        kpu.deinit(task)
Beispiel #9
0
    def work(img):

        HowMany.things = kpu.run_yolo2(HowMany.task, img)
        if HowMany.things:

            for pos in range(len(HowMany.things)):
                i = HowMany.things[pos]
                img.draw_rectangle(ui.weight - (i.x() + i.w()), i.y(), i.w(), i.h())
                img.draw_string(ui.weight - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), classes[i.classid()]), color=(0, 255, 0))

        return img
Beispiel #10
0
def seeotheritems():  #7second delay
    global taskfe
    global a
    global task
    global yolonum
    global anchor
    classes = [
        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
    ]
    anchored = (1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52)
    kpu.deinit(taskfe)
    kpu.deinit(task)
    tasktw = kpu.load("/sd/model/20class.kmodel")
    uart_B.write(" loaded 20 ")
    kpu.init_yolo2(tasktw, 0.5, 0.3, 5, anchored)
    imgother = sensor.snapshot()
    imgother.pix_to_ai()
    detectcode = kpu.run_yolo2(tasktw, imgother)
    if detectcode:
        led_r.value(0)
        led_b.value(0)
        for i in detectcode:
            imgother = imgother.draw_rectangle(i.rect())
            for i in detectcode:
                imgother = imgother.draw_string(i.x(),
                                                i.y(),
                                                str(classes[i.classid()]),
                                                color=(255, 250, 250))
                imgother = imgother.draw_string(i.x(),
                                                i.y() + 12,
                                                '%f1.3' % i.value(),
                                                color=(255, 250, 250))
                imgother.save("/sd/yoloimages/" + str(yolonum) + ".jpg",
                              quality=70)
                utime.sleep_ms(50)
                yolonum += 1
                uart_B.write(" |Yolo|> " + str(classes[i.classid()]) + " <||")
                f = open("/sd/printoutput.txt", "a+")
                f.write("Yolo detected: " + str(classes[i.classid()]) + "\n\r")
                f.close()
    del (imgother)
    kpu.deinit(tasktw)
    del (tasktw)
    gc.collect()
    uart_B.write(" killed ")
    task = kpu.load("/sd/facedetect.kmodel")
    taskfe = kpu.load("/sd/model/FE.smodel")
    utime.sleep_ms(10)
    led_r.value(1)
    led_b.value(1)
    kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
    uart_B.write(" restarted ")
 def set_face_detection(self):
     img = sensor.snapshot()
     code = kpu.run_yolo2(self.task, img)
     if code:
         for i in code:
             #print(i)
             a = img.draw_rectangle(i.rect())
             print("find face")
             self.face_detection = 1
     else:
         print("no face")
         self.face_detection = 0
Beispiel #12
0
    def work(img):

        MaybeIs.things = kpu.run_yolo2(MaybeIs.task, img)
        if MaybeIs.things:

            value, obj = 0, None
            for k in range(len(MaybeIs.things)):
                if value < MaybeIs.things[k].value():
                    value, obj = MaybeIs.things[k].value(), MaybeIs.things[k]

            i = MaybeIs.things[k]
            MaybeIs.result = classes[i.classid()]
            img.draw_rectangle(ui.weight - (i.x() + i.w()), i.y(), i.w(), i.h())
            img.draw_string(ui.weight - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), classes[i.classid()]), color=(0, 255, 0))

        return img
Beispiel #13
0
def main():
    ## main
    task, graph, counter, uart = init(threshold=0.5, patience=4)
    frame_idx = 0
    try:
        while (True):
            # get image
            img = sensor.snapshot().rotation_corr(z_rotation=0.0)

            # detect boxes
            a = img.pix_to_ai()
            code = kpu.run_yolo2(task, img)

            # set frame
            currF = Frame(frame_idx, img, code)

            # calc track
            diff_idx = graph.track(currF)

            # counting
            counter.vanish_update(graph.F_list[-1].bboxes,
                                  graph.F_list[-2].bboxes, graph.is_decrease)
            counter.count()

            # display on IDE
            #img = currF.draw_frames()
            #img = img.copy((32, 32, 160, 160))
            #img.draw_string(0 ,0, str(counter.counter[LEFT])+","+str(counter.counter[RIGHT]),
            #color=(0,255,0), scale=3)
            #a = lcd.display(img)

            # to Gray
            msg = str(counter.counter[LEFT])+ DIV + \
                  str(counter.counter[RIGHT])+ DIV + str(currF.num_object)
            _ = uart.write(msg)
            #_ = uart.write(img)
            print(counter.counter)

            # finalize
            frame_idx += 1
            time.sleep(0.05)
    except Exception as e:
        # need delete kpu_task when keyboard interrupt
        a = kpu.deinit(task)
        del task
        gc.collect()
        print(e)
Beispiel #14
0
    def detect_objects(self, threshold, return_img=False):
        detected = False
        img = snapshot()

        img_copy = img.resize(224, 224)
        a = img_copy.pix_to_ai()
        code = kpu.run_yolo2(self.object_detection_task, img_copy)

        if code:
            for i in code:
                if i.value() >= threshold:
                    detected = True

                    new_x, new_y = int(i.x() * 1.07), int(i.y() * 1.42)
                    roi = (new_x, new_y, int(i.w() * 1.07), int(i.h() * 1.42))
                    percent = i.value()
                    object_detected = self.classes[i.classid()]

                    if not return_img:
                        a = img.draw_rectangle(roi,
                                               color=(0x1c, 0xa2, 0xff),
                                               thickness=2)
                        a = img.draw_string(new_x,
                                            new_y - 14,
                                            ("%s %%: %.2f" %
                                             (object_detected, percent)),
                                            color=(255, 255, 255),
                                            scale=1.5,
                                            mono_space=False)

            self.x_center, self.y_center, self.area, self.percent, roi = _find_max(
                code)

        del (img_copy)

        if not detected:
            self.object_detected = None
            self.percent = -1
            if return_img:
                return img, None

        if return_img:
            return img, roi
        else:
            a = lcd.display(img)

        del (img)
def find_face():
    lcd.init()
    sensor.reset()
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    sensor.set_vflip(1)
    sensor.run(1)
    task = kpu.load(0x300000)
    anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437,
              6.92275, 6.718375, 9.01025)
    kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
    while (True):
        img = sensor.snapshot()
        code = kpu.run_yolo2(task, img)
        if code:
            for i in code:
                img.draw_rectangle(i.rect())
        lcd.display(img)
    kpu.deinit(task)
Beispiel #16
0
def main(model_addr=0x300000,
         lcd_rotation=0,
         sensor_hmirror=False,
         sensor_vflip=False):
    try:
        sensor.reset()
    except Exception as e:
        raise Exception(
            "sensor reset fail, please check hardware connection, or hardware damaged! err: {}"
            .format(e))
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    sensor.set_hmirror(sensor_hmirror)
    sensor.set_vflip(sensor_vflip)
    sensor.run(1)

    lcd.init(type=1)
    lcd.rotation(lcd_rotation)
    lcd.clear(lcd.WHITE)

    anchors = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437,
               6.92275, 6.718375, 9.01025)
    try:
        task = None
        task = kpu.load(model_addr)
        kpu.init_yolo2(task, 0.5, 0.3, 5,
                       anchors)  # threshold:[0,1], nms_value: [0, 1]
        while (True):
            img = sensor.snapshot()
            t = time.ticks_ms()
            objects = kpu.run_yolo2(task, img)
            t = time.ticks_ms() - t
            if objects:
                for obj in objects:
                    img.draw_rectangle(obj.rect())
            img.draw_string(0, 200, "t:%dms" % (t), scale=2)
            lcd.display(img)
    except Exception as e:
        raise e
    finally:
        if not task is None:
            kpu.deinit(task)
Beispiel #17
0
def checkCamera():
    global x, y
    x = 0
    y = 0
    showInfo("camera starting", True)
    img = image.Image()
    try:
        sensor.reset()
        sensor.set_pixformat(sensor.RGB565)
        sensor.set_framesize(sensor.QVGA)
        sensor.skip_frames(time=2000)
        lcd.rotation(2)
    except:
        showInfo("camera init failed", False)
        return
    else:
        showInfo("camera init done", True)

    try:
        task = kpu.load(0x300000)
        anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437,
                  6.92275, 6.718375, 9.01025)
        a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
        while True:
            img = sensor.snapshot()
            code = kpu.run_yolo2(task, img)
            if code:
                for i in code:
                    a = img.draw_rectangle(i.rect(), color=lcd.BLUE)
            new = img.copy(roi=(0, 0, 239, 239))
            lcd.display(new)
    except:
        print('kup load fialed')
        while True:
            img = sensor.snapshot()
            new = img.copy(roi=(0, 0, 239, 239))
            lcd.display(new)
Beispiel #18
0
img_lcd = image.Image()
img_face = image.Image(size=(128, 128))
a = img_face.pix_to_ai()
record_ftr = []
record_ftrs = []
names = [
    'Mr.1', 'Mr.2', 'Mr.3', 'Mr.4', 'Mr.5', 'Mr.6', 'Mr.7', 'Mr.8', 'Mr.9',
    'Mr.10'
]

ACCURACY = 85

while (1):
    img = sensor.snapshot()
    clock.tick()
    code = kpu.run_yolo2(task_fd, img)
    if code:
        for i in code:
            # Cut face and resize to 128x128
            a = img.draw_rectangle(i.rect())
            face_cut = img.cut(i.x(), i.y(), i.w(), i.h())
            face_cut_128 = face_cut.resize(128, 128)
            a = face_cut_128.pix_to_ai()
            # a = img.draw_image(face_cut_128, (0,0))
            # Landmark for face 5 points
            fmap = kpu.forward(task_ld, face_cut_128)
            plist = fmap[:]
            le = (i.x() + int(plist[0] * i.w() - 10),
                  i.y() + int(plist[1] * i.h()))
            re = (i.x() + int(plist[2] * i.w()), i.y() + int(plist[3] * i.h()))
            nose = (i.x() + int(plist[4] * i.w()),
Beispiel #19
0
classes = ['airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
task = kpu.load("/sd/irasutoya/20class.kmodel")

anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
# Anchor data is for bbox, extracted from the training sets.
kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)

irasutoya_icons = [image.Image("/sd/irasutoya/icons_jpeg/{}.jpeg".format(icon_class)) for icon_class in classes]
wall_img = image.Image("/sd/irasutoya/wall.jpeg")

print('[info]: Started.')
but_stu = 1

try:
    while(True):
        #gc.collect()
        img = sensor.snapshot()
        code_obj = kpu.run_yolo2(task, img)
        img2show = wall_img.copy()

        if code_obj: # object detected
            for i in code_obj:
                icon = irasutoya_icons[i.classid()]
                img2show.draw_image(icon, i.x(), i.y(), x_scale=i.w()/icon.width(), y_scale=i.h()/icon.height())

        lcd.display(img2show)
except KeyboardInterrupt:
    kpu.deinit(task)
    sys.exit()
Beispiel #20
0
import sensor,image,lcd,time
import KPU as kpu
sensor.reset(freq=24000000,dual_buff=True)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((320, 224))
sensor.set_vflip(1)
sensor.run(1)

lcd.init(type=2, freq=20000000, color=lcd.BLACK)
#lcd.rotation(2)
classes = ["face", "face_mask"]
task = kpu.load(0x400000)
a = kpu.set_outputs(task, 0, 10,7,35)
anchor = (0.212104,0.261834, 0.630488,0.706821, 1.264643,1.396262, 2.360058,2.507915, 4.348460,4.007944)
a = kpu.init_yolo2(task, 0.5, 0.5, 5, anchor)
while(True):
    timestamp = time.ticks_ms()
    img = sensor.snapshot()
    a = img.pix_to_ai()
    faces = kpu.run_yolo2(task, img)
    if faces:
        for face in faces:
            if face.classid() == 0 :
                a=img.draw_rectangle(face.rect(),color = (255, 0, 0),thickness=5)
            elif face.classid() == 1 :
                a=img.draw_rectangle(face.rect(),color = (0,255, 0),thickness=5)
    a = img.draw_string(70,10,"FPS : %.2f" % (1000/(time.ticks_ms()-timestamp)),color=(0,255,0),scale=2)
    a = lcd.display(img)
a = kpu.deinit(task)
#====================#

#=== SETUP ===#
#clear_dataset(dataset_filename,face_dataset)
face_dataset = read_dataset(dataset_filename);
corgi85.IFTTT_init("corgi_detect","0hI55mQkUiimG6RIjpWhp")

#=== wait wifi connect ===#
while corgi85.wifi_check() == 0:
    print("WIFI Connecting ...")
    time.sleep(1)

while(True):
    img = sensor.snapshot()
    #--- face detect ---#
    faces = kpu.run_yolo2(task_face_detect, img)
    if faces:
        #--- check face size ---#
        x1 = faces[0].x() - 10
        y1 = faces[0].y() - 10
        w = faces[0].w() + 20
        h = faces[0].h() + 10
        if w > 80 and h > 80:
            #--- crop target face ---#
            face = img.cut(x1,y1,w,h)
            face = face.resize(112,112)
            a = img.draw_rectangle(x1,y1,w,h,color = (255,0,0), thickness=2)
            a = face.pix_to_ai()

            #--- encode face ---#
            fmap = kpu.forward(task_face_encode,face)
print('loading face detect model')
task_detect_face = kpu.load(0x300000)  # Charge face detect model into KPU
print('loading face expresion classify model')
task_classify_face = kpu.load(
    0x500000)  # Charge face classification model into KPU

a = kpu.set_outputs(task_classify_face, 0, 1, 1, 2)

anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)
a = kpu.init_yolo2(task_detect_face, 0.5, 0.3, 5, anchor)

labels = ['happy', 'sad']  # Facial expression labels

print('configuration complete')

while (True):
    clock.tick()  # Update the FPS clock.
    img = sensor.snapshot()  # Take a picture and return the image.
    detected_face = kpu.run_yolo2(task_detect_face, img)

    if detected_face:
        for i in detected_face:
            face = img.cut(i.x(), i.y(), i.w(), i.h())
            face_128 = face.resize(128, 128)
            a = face_128.pix_to_ai()
            fmap = kpu.forward(task_classify_face, face_128)
            plist = fmap[:]
            pmax = max(plist)
            print("%s: %s" % (labels[plist.index(pmax)], pmax))
Beispiel #23
0
#将模型放在SD卡中。
task = kpu.load("/sd/20class.kmodel")  #模型SD卡上

#网络参数
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)

#初始化yolo2网络,识别可信概率为0.7(70%)
a = kpu.init_yolo2(task, 0.7, 0.3, 5, anchor)

while (True):

    clock.tick()

    img = sensor.snapshot()
    code = kpu.run_yolo2(task, img)  #运行yolo2网络

    if code:
        for i in code:
            a = img.draw_rectangle(i.rect())
            a = lcd.display(img)

            lcd.draw_string(i.x(), i.y(), classes[i.classid()], lcd.RED,
                            lcd.WHITE)
            lcd.draw_string(i.x(),
                            i.y() + 12, '%f1.3' % i.value(), lcd.RED,
                            lcd.WHITE)
            #lcd.draw_string(i.x(), i.y()+12, 'test,0.618', lcd.RED, lcd.WHITE)
    else:
        a = lcd.display(img)
Beispiel #24
0
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)
# Anchor data is for bbox, extracted from the training sets.
kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
kpu.init_yolo2(task_face, 0.5, 0.3, 5, anchor)

print('[info]: Started.')
but_stu = 1

isButtonPressedA = 0

try:
    while (True):
        #gc.collect()
        img = sensor.snapshot()
        code_obj = kpu.run_yolo2(task, img)
        code_face = kpu.run_yolo2(task_face, img)

        if code_obj:  # object detected
            max_id = 0
            max_rect = 0
            for i in code_obj:
                img.draw_rectangle(i.rect())
                text = ' ' + classes[i.classid()] + ' (' + str(
                    int(i.value() * 100)) + '%) '
                for x in range(-1, 2):
                    for y in range(-1, 2):
                        img.draw_string(x + i.x(),
                                        y + i.y() + (i.h() >> 1),
                                        text,
                                        color=(250, 205, 137),
Beispiel #25
0
record_ftrs = []  #空列表 用于存储按键记录下人脸特征, 可以将特征以txt等文件形式保存到sd卡后,读取到此列表,即可实现人脸断电存储。
names = []  # 人名标签,与上面列表特征值一一对应。
with open("/sd/recordftr3.txt", "r") as f:
    while (1):
        line = f.readline()
        if not line:
            break
        name = line[0:line.index('#')]
        line = line[line.index('#') + 1:]
        record_ftrs.append(eval(line))
        names.append(name)
while (1):  # 主循环
    check_key()  #按键检测
    img = sensor.snapshot()  #从摄像头获取一张图片
    clock.tick()  #记录时刻,用于计算帧率
    code = kpu.run_yolo2(task_fd, img)  # 运行人脸检测模型,获取人脸坐标位置
    read_data = uart.read()
    if read_data:
        with open("/sd/recordftr3.txt", "a") as f:
            f.write(read_data[2])
    if code:  # 如果检测到人脸
        for i in code:  # 迭代坐标框
            # Cut face and resize to 128x128
            a = img.draw_rectangle(i.rect())  # 在屏幕显示人脸方框
            face_cut = img.cut(i.x(), i.y(), i.w(),
                               i.h())  # 裁剪人脸部分图片到 face_cut
            face_cut_128 = face_cut.resize(128, 128)  # 将裁出的人脸图片 缩放到128 * 128像素
            a = face_cut_128.pix_to_ai()  # 将猜出图片转换为kpu接受的格式
            #a = img.draw_image(face_cut_128, (0,0))
            # Landmark for face 5 points
            fmap = kpu.forward(task_ld, face_cut_128)  # 运行人脸5点关键点检测模型
Beispiel #26
0
import sensor, image, lcd, time
import KPU as kpu
sensor.reset(freq=24000000, dual_buff=True)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((320, 224))
sensor.set_vflip(1)
sensor.run(1)
lcd.init(type=2, freq=20000000, color=lcd.BLACK)
#lcd.rotation(2)
classes = ["license_plate"]
task = kpu.load(0x400000)
a = kpu.set_outputs(task, 0, 10, 7, 30)
anchor = (1.81, 0.85, 2.26, 1.07, 3.00, 1.46, 4.56, 1.95, 7.38, 3.45)
a = kpu.init_yolo2(task, 0.3, 0.3, 5, anchor)
while (True):
    timestamp = time.ticks_ms()
    img = sensor.snapshot()
    a = img.pix_to_ai()
    plates = kpu.run_yolo2(task, img)
    if plates:
        plate = plates[0]
        a = img.draw_rectangle(plate.rect(), color=(0, 255, 0), thickness=5)
    a = img.draw_string(70,
                        10,
                        "FPS : %.2f" % (1000 / (time.ticks_ms() - timestamp)),
                        color=(0, 255, 0),
                        scale=2)
    a = lcd.display(img)
a = kpu.deinit(task)
Beispiel #27
0
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)  #QVGA=320x240
sensor.run(1)

task = kpu.load(0x300000)  # Load Model File from Flash
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)
# Anchor data is for bbox, extracted from the training sets.
kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)

but_stu = 1

try:
    while (True):
        img = sensor.snapshot()  # Take an image from sensor
        bbox = kpu.run_yolo2(task, img)  # Run the detection routine
        if bbox:
            bbox.sort(reverse=True, key=lambda x: x.rect()[2] * x.rect()[3])
            c = bbox[0].rect()[0] + (bbox[0].rect()[2] / 2)
            print(str(c) + ' ' + str(len(bbox)))
            if c < 120:
                print('>>>')
                u_send(200)
            if c > 160:
                print('<<<')
                u_send(201)
            first = True
            for i in bbox:
                color = (255, 0, 0) if first else (255, 255, 255)
                thick = 5 if first else 1
                first = False
Beispiel #28
0
 def run(self,
         on_detect,
         on_img,
         on_clear,
         on_people=None,
         always_show_img=False):
     img = sensor.snapshot()
     try:
         code = kpu.run_yolo2(self._m_fd, img)
     except Exception:
         return
     if code:
         for i in code:
             face_cut = img.cut(i.x(), i.y(), i.w(), i.h())
             face_cut_128 = face_cut.resize(128, 128)
             a = face_cut_128.pix_to_ai()
             #a = img.draw_image(face_cut_128, (0,0))
             # Landmark for face 5 points
             try:
                 fmap = kpu.forward(self._m_ld, face_cut_128)
             except Exception:
                 continue
             plist = fmap[:]
             le = (i.x() + int(plist[0] * i.w() - 10),
                   i.y() + int(plist[1] * i.h()))
             re = (i.x() + int(plist[2] * i.w()),
                   i.y() + int(plist[3] * i.h()))
             nose = (i.x() + int(plist[4] * i.w()),
                     i.y() + int(plist[5] * i.h()))
             lm = (i.x() + int(plist[6] * i.w()),
                   i.y() + int(plist[7] * i.h()))
             rm = (i.x() + int(plist[8] * i.w()),
                   i.y() + int(plist[9] * i.h()))
             a = img.draw_circle(le[0], le[1], 4)
             a = img.draw_circle(re[0], re[1], 4)
             a = img.draw_circle(nose[0], nose[1], 4)
             a = img.draw_circle(lm[0], lm[1], 4)
             a = img.draw_circle(rm[0], rm[1], 4)
             # align face to standard position
             src_point = [le, re, nose, lm, rm]
             T = image.get_affine_transform(src_point, self._dst_point)
             a = image.warp_affine_ai(img, self.img_face, T)
             a = self.img_face.ai_to_pix()
             #a = img.draw_image(img_face, (128,0))
             del (face_cut_128)
             # calculate face feature vector
             try:
                 fmap = kpu.forward(self._m_fe, self.img_face)
             except Exception:
                 continue
             feature = kpu.face_encode(fmap[:])
             scores = []
             for j in range(len(self.features)):
                 score = kpu.face_compare(self.features[j], feature)
                 scores.append(score)
             max_score = 0
             index = 0
             for k in range(len(scores)):
                 if max_score < scores[k]:
                     max_score = scores[k]
                     index = k
             if max_score > 85:
                 a = img.draw_rectangle(i.rect(), color=(0, 255, 0))
                 a = img.draw_string(
                     i.x(),
                     i.y(), ("%s :%2.1f" % (self.names[index], max_score)),
                     color=(0, 255, 0),
                     scale=2)
                 on_detect(self.names[index], feature, max_score, img)
             else:
                 a = img.draw_rectangle(i.rect(), color=(255, 0, 0))
                 # a = img.draw_string(i.x(),i.y(), ("X :%2.1f" % (max_score)), color=(255,0,0),scale=2)
                 on_img(img)
             if on_people:
                 on_people(feature, img)
             self._show_img_t = time.ticks_ms() / 1000.0
     else:
         if always_show_img:
             on_img(img)
         else:
             if time.ticks_ms(
             ) - self._show_img_t * 1000 < self.show_img_timeout * 1000:
                 on_img(img)
             else:
                 on_clear()
Beispiel #29
0
#tested with frimware 5-0.22
import sensor,image,lcd
import KPU as kpu

lcd.init()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.set_vflip(1)
sensor.run(1)
classes = ["racoon"]
task = kpu.load(0x200000) #change to "/sd/name_of_the_model_file.kmodel" if loading from SD card
a = kpu.set_outputs(task, 0, 7,7,30)   #the actual shape needs to match the last layer shape of your model(before Reshape)
anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828)
a = kpu.init_yolo2(task, 0.3, 0.3, 5, anchor) #tweak the second parameter if you're getting too many false positives
while(True):
    img = sensor.snapshot().rotation_corr(z_rotation=90.0)
    a = img.pix_to_ai()
    code = kpu.run_yolo2(task, img)
    if code:
        for i in code:
            a=img.draw_rectangle(i.rect(),color = (0, 255, 0))
            a = img.draw_string(i.x(),i.y(), classes[i.classid()], color=(255,0,0), scale=3)
        a = lcd.display(img)
    else:
        a = lcd.display(img)
a = kpu.deinit(task)
Beispiel #30
0
 def yolo(self):
     self.yoloObj = kpu.run_yolo2(task, self.img)
     if self.yoloObj:
         for object in self.yoloObj:
             self.img.draw_rectangle(object.rect(), self.color, self.border,
                                     self.fill)  # setCamera((320,240))