def eval_widerface(cfgfile, weightfile, valdir, savedir):
    m = Darknet(cfgfile)
    m.load_weights(weightfile)
    use_cuda = 1
    if use_cuda:
        m.cuda()

    scale_size = 16
    class_names = load_class_names('data/names')
    for parent,dirnames,filenames in os.walk(valdir):
        if parent != valdir:
            targetdir = os.path.join(savedir, os.path.basename(parent))
            if not os.path.isdir(targetdir):
                os.mkdir(targetdir)
            for filename in filenames:
                imgfile = os.path.join(parent,filename)
                img = Image.open(imgfile).convert('RGB')
                sized_width = int(round(img.width*1.0/scale_size) * 16)
                sized_height = int(round(img.height*1.0/scale_size) * 16)
                sized = img.resize((sized_width, sized_height))
                print(filename, img.width, img.height, sized_width, sized_height)
                if sized_width * sized_height > 1024 * 2560:
                    print('omit %s' % filename)
                    continue
                boxes = do_detect(m, sized, 0.05, 0.4, use_cuda)
                if True:
                    savename = os.path.join(targetdir, filename)
                    print('save to %s' % savename)
                    plot_boxes(img, boxes, savename, class_names)
                if True:
                    savename = os.path.join(targetdir, os.path.splitext(filename)[0]+".txt")
                    print('save to %s' % savename)
                    save_boxes(img, boxes, savename)
예제 #2
0
def demo(cfgfile, weightfile):
    model_hand = Darknet(cfgfile)
    model_hand.print_network()
    model_hand.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    namesfile = 'data/hands.names'

    class_names = uyolo.load_class_names(namesfile)

    use_cuda = 1
    if use_cuda:
        model_hand.cuda()

    # RealSense Start
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    profile = pipeline.start(config)
    # Setting exposure
    s = profile.get_device().query_sensors()[1]
    s.set_option(rs.option.exposure, exposure_val)

    # Setting counter for evaluation
    movingList = collections.deque(maxlen=100)

    while True:
        # Reading image from camera
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        if not color_frame:
            continue
        img = np.asanyarray(color_frame.get_data())

        if gamma_correction:
            img = adjust_gamma(img, gamma=gamma_val)

        # yolo stuff
        sized = cv2.resize(img, (model_hand.width, model_hand.height))
        bboxes = uyolo.do_detect(model_hand, sized, 0.5, 0.4, use_cuda)
        print('------')
        draw_img = uyolo.plot_boxes_cv2(img, bboxes, None, class_names)

        # Evaluation
        movingList.append(any(bboxes))
        print('Continuity : {}'.format(np.mean(movingList)))

        cv2.imshow(cfgfile, draw_img)
        cv2.waitKey(1)
예제 #3
0
        output_name = blobs.keys()[-1]
        print 'output_name',output_name
        return blobs[output_name]



if __name__ == '__main__':
    prototxt = 'tiny_yolo_nbn_reluface.prototxt'
    caffemodel = '/nfs/xiaohang/for_chenchao/tiny_yolo_nbn_reluface.caffemodel'
    imgfile = 'data/face.jpg'
    
    m = CaffeNet(prototxt, caffemodel)
    use_cuda = 1
    if use_cuda:
        m.cuda()

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))
    #if m.num_classes == 20:
    #    namesfile = '../data/voc.names'
    #class_names = load_class_names(namesfile)
    class_names = ['face']
    for i in range(1):
        start = time.time()
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish-start)))

    plot_boxes(img, boxes, 'predictions.jpg', class_names)
    # Reading image from camera
    t_start = time.time()
    ret, img = cap.read()
    if ret:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    else:
        continue

    # Gamma(Optional) Correction
    if gamma_correction:
        img = adjust_gamma(img, gamma=gamma_val)

    # YOLO stuff
    if use_hand_tracking:
        sized = cv2.resize(img, (model_hand.width, model_hand.height))
        bboxes = uyolo.do_detect(model_hand, sized, hand_conf_thresh, 0.4,
                                 use_cuda)
        if any(bboxes):
            center = [
                int(bboxes[0][0] * test_width),
                int(bboxes[0][1] * test_height)
            ]
            img_hand_cropped, crop_box = crop_image(img, center,
                                                    hand_crop_size)
            img_detection = img_hand_cropped
            # print(crop_box[3])
        else:
            img_detection = img
    else:
        img_detection = img

    # DOPE pose detection