示例#1
0
def draw_result(dev_idx, det_res, captured_frames):
    #for multiple detection
    for res in det_res:
        x1 = int(res[0])
        y1 = int(res[1])
        x2 = int(res[2])
        y2 = int(res[3])
        class_num = res[5]
        score = res[4]
        # print(x1,x2,class_num,score)

        if (class_num==0):
            captured_frames[0] = cv2.rectangle(captured_frames[0], (x1, y1), (x2, y2), (0, 0, 255), 3)
            # print("score of person: ", score)
        else:
            captured_frames[0] = cv2.rectangle(captured_frames[0], (x1, y1), (x2, y2), (255, 0, 0), 3)
            # print("score of others: ", score)

    cv2.imshow('detection', captured_frames[0])
    del captured_frames[0]
    key = cv2.waitKey(1)

    if key == ord('q'):
        kdp_wrapper.kdp_exit_dme(dev_idx)
        sys.exit()

    return 
示例#2
0
def user_test_single_dme(dev_idx, loop):
    """Test single dme."""
    # load model into Kneron device
    model_path = "../test_images/dme_ssd_fd"
    is_raw_ouput  = True

    kdp_wrapper.kdp_dme_load_ssd_model(dev_idx, model_path, is_raw_ouput)
    image_source_h = 480
    image_source_w = 640
    image_size = image_source_w * image_source_h * 2
    frames = []
    app_id = 0 # if app_id is 0, output raw data for kdp_wrapper.kdp_dme_inference

    # the parameters for postprocess
    anchor_path       = './examples/fdssd/models/anchor_face.npy'
    model_input_shape = (200, 200)
    score_thres       = 0.5
    nms_thres         = 0.35
    only_max          = False

    # Setup video capture device.
    capture = kdp_wrapper.setup_capture(0, image_source_w, image_source_h)
    if capture is None:
        return -1

    while (loop):
        raw_res = kdp_wrapper.kdp_dme_inference(dev_idx, app_id, capture, image_size, frames)

        det_res = postprocess_(raw_res, anchor_path, model_input_shape,
                            image_source_w, image_source_h, score_thres, only_max, nms_thres)

        draw_result(dev_idx, det_res, frames)
        loop -= 1

    kdp_wrapper.kdp_exit_dme(dev_idx)
示例#3
0
def user_test_single_dme(dev_idx, loop):
    """Test single dme."""
    # load model into Kneron device
    model_path = "../test_images/dme_yolo_224"
    kdp_wrapper.kdp_dme_load_yolo_model(dev_idx, model_path)

    image_source_h = 480
    image_source_w = 640
    image_size = image_source_w * image_source_h * 2
    frames = []
    app_id = constants.APP_TINY_YOLO3

    # Setup video capture device.
    capture = kdp_wrapper.setup_capture(0, image_source_w, image_source_h)
    if capture is None:
        return -1

    # Send 1 image to the DME image buffers.
    ret, ssid = kdp_wrapper.dme_fill_buffer(dev_idx, capture, image_size,
                                            frames)
    if ret:
        return -1

    kdp_wrapper.dme_pipeline_inference(dev_idx, app_id, loop, image_size,
                                       capture, ssid, frames, handle_result)

    kdp_wrapper.kdp_exit_dme(dev_idx)
示例#4
0
def user_test_single_dme(dev_idx):
    """Test single dme."""
    # load model into Kneron device
    model_path = "../test_images/dme_mobilenet"
    kdp_wrapper.kdp_dme_load_model(dev_idx, model_path)

    #get test images ready
    img_path = './data/images/cat.jpg'
    img_path2 = './data/images/fox.jpg'

    npraw_data = kdp_wrapper.kdp_inference(dev_idx, img_path)

    # Do postprocessing with keras
    preds = kdp_wrapper.softmax(npraw_data[0]).reshape(1, 1000)
    top_indexes(preds, 3)
    #print('\nPredicted:', decode_predictions(preds, top=3)[0])

    npraw_data = kdp_wrapper.kdp_inference(dev_idx, img_path2)

    # Do postprocessing with keras
    preds = kdp_wrapper.softmax(npraw_data[0]).reshape(1, 1000)
    top_indexes(preds, 3)
    #print('\nPredicted:', decode_predictions(preds, top=3)[0])

    kdp_wrapper.kdp_exit_dme(dev_idx)
示例#5
0
def user_test_single_dme(dev_idx, loop):
    """Test single dme."""
    # load model into Kneron device
    model_path = "../test_images/dme_ssd_fd"
    is_raw_ouput  = False
    kdp_wrapper.kdp_dme_load_ssd_model(dev_idx, model_path, False)

    image_source_h = 480
    image_source_w = 640
    image_size = image_source_w * image_source_h * 2
    frames = []
    app_id = constants.APP_FD_LM

    # Setup video capture device.
    capture = kdp_wrapper.setup_capture(0, image_source_w, image_source_h)
    if capture is None:
        return -1

    while (loop):
        det_res = kdp_wrapper.kdp_dme_inference(dev_idx, app_id, capture, image_size, frames)
        draw_result(dev_idx, det_res, frames)
        loop -= 1
        # print("Total class {}: total detection {}".format(det_res[0], det_res[1]))
        # for i in range(det_res[1]):
        #     print("x1,y1,x2,y2:", det_res[4*i+2],det_res[4*i+3],det_res[4*i+4],det_res[4*i+5])

    kdp_wrapper.kdp_exit_dme(dev_idx)
示例#6
0
def handle_result(dev_idx, raw_res, captured_frames):
    # the parameters for postprocess
    anchor_path = './examples/yolo/models/anchors.txt'
    class_path = './common/coco_name_lists'
    model_input_shape = (224, 224)
    score_thres = 0.2
    nms_thres = 0.45
    keep_aspect_ratio = True
    image_source_h = 480
    image_source_w = 640

    det_res = yolo_postprocess_(raw_res, anchor_path, class_path,
                                image_source_h, image_source_w,
                                model_input_shape, score_thres, nms_thres,
                                keep_aspect_ratio)
    #for multiple detection
    for res in det_res:
        x1 = int(res[0])
        y1 = int(res[1])
        x2 = int(res[2])
        y2 = int(res[3])
        class_num = res[5]
        score = res[4]
        # print(x1,x2,class_num,score)

        if (class_num == 0):
            captured_frames[0] = cv2.rectangle(captured_frames[0], (x1, y1),
                                               (x2, y2), (0, 0, 255), 3)
            # print("score of person: ", score)
        else:
            captured_frames[0] = cv2.rectangle(captured_frames[0], (x1, y1),
                                               (x2, y2), (255, 0, 0), 3)
            # print("score of others: ", score)

    cv2.imshow('detection', captured_frames[0])
    del captured_frames[0]
    key = cv2.waitKey(1)

    if key == ord('q'):
        kdp_wrapper.kdp_exit_dme(dev_idx)
        sys.exit()

    return
示例#7
0
def draw_result(dev_idx, det_res, captured_frames):
    x1_0 = 0
    y1_0 = 0
    x2_0 = 0
    y2_0 = 0
    score_0 = 0 
    #for multiple faces
    for res in det_res:
        #print(type(res))
        x1 = int(res[0])
        y1 = int(res[1])
        x2 = int(res[2]+res[0])
        y2 = int(res[3]+res[1])
        class_num = res[5]
        score = res[4]
        o_l = overlap(x1,y1,x2,y2,x1_0,y1_0,x2_0,y2_0)
        if (o_l<0.6):
            x1_0 = x1
            y1_0 = y1
            x2_0 = x2
            y2_0 = y2
            score_0 = score
            if (class_num==2):
                captured_frames[0] = cv2.rectangle(captured_frames[0], (x1, y1), (x2, y2), (0, 0, 255), 3)
                #print("score of mask fd: ", score)
            elif (class_num==1):
                captured_frames[0] = cv2.rectangle(captured_frames[0], (x1, y1), (x2, y2), (255, 0, 0), 3)   
                #print("score of fd: ", score)

    cv2.imshow('detection', captured_frames[0])
    del captured_frames[0]
    key = cv2.waitKey(1)

    if key == ord('q'):
        kdp_wrapper.kdp_exit_dme(dev_idx)
        sys.exit()

    return 
示例#8
0
def user_test_single_dme(dev_idx, loop):
    """Test single dme."""
    # load model into Kneron device
    model_path = "../test_images/dme_yolo_224"
    kdp_wrapper.kdp_dme_load_yolo_model(dev_idx, model_path)

    image_source_h = 480
    image_source_w = 640
    image_size = image_source_w * image_source_h * 2
    frames = []
    app_id = constants.APP_TINY_YOLO3

    # the parameters for postprocess
    anchor_path       = './examples/yolo/models/anchors.txt'
    class_path        = './common/coco_name_lists'
    model_input_shape = (224, 224)
    score_thres       = 0.2
    nms_thres         = 0.45
    keep_aspect_ratio = True

    # Setup video capture device.
    capture = kdp_wrapper.setup_capture(0, image_source_w, image_source_h)
    if capture is None:
        return -1

    while (loop):
        raw_res = kdp_wrapper.kdp_dme_inference(dev_idx, app_id, capture, image_size, frames)

        dets = yolo_postprocess_(raw_res, anchor_path, class_path, image_source_h, image_source_w, model_input_shape,
                                 score_thres, nms_thres, keep_aspect_ratio)

        # print("dets: ", dets)

        draw_result(dev_idx, dets, frames)
        loop -= 1

    kdp_wrapper.kdp_exit_dme(dev_idx)