def test_silhouette_aux(depth_image, prev_silhouette=[], threshold=0, amplify=True):
    normalized = normalize_depth_image(depth_image, threshold, amplify)
    silhouette = get_img_silhouette(normalized, prev_silhouette)

    # display results
    result = np.hstack((normalized, silhouette))
    title_result = 'result: normalized, silhouette with threshold ' + \
        str(threshold)
    ut.imgDisplayCV(result, title_result)
    return silhouette
示例#2
0
def testContourAux(createContour):
    cap = cv.VideoCapture(0)
    _, frame = cap.read()
    height, width, _ = frame.shape
    contour = createContour(width, height)

    while True:
        _, frame = cap.read()
        img = ut.drawContour(frame, contour)
        ut.imgDisplayCV(img, "img")
        cv.waitKey(1)
def test_img_gradient():
    img = np.full((512, 512), 255, np.uint8)

    # cv.circle(img, (256, 256), radius=3, color=0, thickness=-1)
    # cv.line(img, (0, 0), (0, 511), color=0, thickness=3)
    # draw diagonal on image
    cv.line(img, (0, 0), (511, 511), color=0, thickness=3)

    IDT = ut.getIDT(img)
    contour = ut.createCircleContour(IMG_WIDTH, IMG_HEIGHT)
    IDT_gradient = ut.getImgGradient(IDT)
    normals = ut.calc_normal(contour, normalize=False).astype(int)
    IDT = draw_contour_on_IDT(IDT, contour, normals, IDT_gradient)

    IDT = cv.convertScaleAbs(IDT)

    ut.imgDisplayCV(img, "img")
    ut.imgDisplayCV(IDT, "IDT")
    return
def test_IDT_aux(depth_image, color_image, prev_silhouette, use_bbox=False, draw_contour=True,
                 threshold=0, real_time=True):
    normalized = normalize_depth_image(depth_image, threshold, amplify=True)

    bbox = []
    if use_bbox:
        color_image = np.expand_dims(color_image, axis=0)
        (frames_idx, bbox) = ut.bbox(color_image)
        if 0 not in frames_idx:
            # no person was detected in the frame
            bbox = []
        else:
            bbox = bbox[0]

    silhouette = get_img_silhouette(normalized, prev_silhouette, bbox)
    IDT = ut.getIDT(silhouette)

    if draw_contour:
        contour = ut.createCircleContour(IMG_WIDTH, IMG_HEIGHT)
        IDT_gradient = ut.getImgGradient(IDT)
        normals = ut.calc_normal(contour, normalize=False).astype(int)
        IDT_BGR = draw_contour_on_IDT(IDT, contour, normals, IDT_gradient)

        # convert IDT to int so that we can display it with cv.imshow
        IDT_BGR = cv.convertScaleAbs(IDT_BGR)

        silhouette_BGR = cv.cvtColor(silhouette, cv.COLOR_GRAY2BGR)
        normalized_BGR = cv.cvtColor(normalized, cv.COLOR_GRAY2BGR)
        result = np.hstack((normalized_BGR, silhouette_BGR, IDT_BGR))
    else:
        IDT = cv.convertScaleAbs(IDT)
        result = np.hstack((normalized, silhouette, IDT))

    # display result
    title_result = 'result: normalized, silhouette, IDT with threshold ' + \
        str(threshold)
    ut.imgDisplayCV(result, title_result)

    if not real_time:
        cv.waitKey(0)

    return silhouette
def prev_silhouette_buffer(prev_silhouette):
    """
    Calculate an approximation of valid silhouette buffer based on the silhouette from the previous frame.
    Usage: set pixels out of the buffer as background pixels:
        silhouette[buffer != SILH_PIXEL] = BG_PIXEL
    Notice that it's a bit tricky because when the person moves fast, (some of) the real silhouette might be found out
    of this buffer, and therefore be mistakenly classified as background. When using this in real time, the silhouette
    buffer gets "fixed". The bigger the kernel is, the quicker it gets fixed. But bigger kernel also means less
    accurate buffer.
    Also notice: if the person moves *too* fast, the buffer might disappear, and then we would reset it.
    """
    global out_of_buffer
    sqrt_factor = 2
    x = int(sqrt_factor * math.sqrt(out_of_buffer))
    x = min(max(x, 5), 200)                 # make sure 5 < x < 200
    kernel = (x, x)
    # kernel = (5,5)

    # flip silhouette pixels and background pixels so we can do smoothing (filter)
    flipped = np.where(prev_silhouette == SILH_PIXEL, BG_PIXEL, SILH_PIXEL)

    # smoothing
    filtered = ut.img_get_box_filter(flipped, kernel)

    # flip again to get the buffer
    buffer = np.where(filtered == 0, BG_PIXEL, SILH_PIXEL)

    # check if buffer is empty (meaning, empty of SILH_PIXEL / filled only with BG_PIXEL)
    if np.array_equal(buffer, full_background):
        # reset buffer
        buffer = np.copy(full_silhouette)

    # display result
    buffer = cv.convertScaleAbs(buffer)
    buffer_display = cv.normalize(
        buffer, None, 255, 0, cv.NORM_MINMAX, cv.CV_8UC1)
    ut.imgDisplayCV(buffer_display, 'previous silhouette buffer')

    return buffer
示例#6
0
def testBbox(n=10):
    cap = cv.VideoCapture(0)
    img_seq = []
    for i in range(n):
        _, frame = cap.read()
        img_seq.append(frame)
    img_seq = np.stack(img_seq)

    (frames_idx, bbox) = ut.bbox(img_seq)

    n = 0
    for i, img in enumerate(img_seq):
        if i not in frames_idx:
            print('could not find any person in frame number ' + str(i) + '!')
            n += 1
            continue
        b = bbox[i - n]
        (y0, y1, x0, x1) = [int(p) for p in b]
        cut = img[y0:y1, x0:x1]

        ut.imgDisplayCV(cut, "cut")
        cv.waitKey(0)
    return
示例#7
0
def testShiftImg(color_img):
    dst = ut.shiftImg(color_img, x_shift=320, y_shift=240)
    ut.imgDisplayCV(dst, "shifted image")
    return
示例#8
0
def testGetRect():
    img = ut.getRect()
    ut.imgDisplayCV(img, "Rectangle")
    cv.waitKey(0)
    return