def find_c_height(img):
    frame1 = copy.copy(img)
    frame2 = copy.copy(frame1)
    frame2[0:450, :] = (0, 0, 0)
    frame1[230:719, :] = (0, 0, 0)
    cv2.imshow("img2", rescale(frame1, 50))
    cv2.imshow("img3", rescale(frame2, 50))
    a1, check1 = findCheckerboardCoordinates(frame1)
    a2, check2 = findCheckerboardCoordinates(frame2)
    #    print([check1,check2])
    if check1 and check2:
        y1 = min([a1[i][1] for i in range(len(a1))])
        y2 = max([a2[i][1] for i in range(len(a1))])
        cv2.line(img, (0, y1), (1920, y1), (0, 255, 255), 2)
        cv2.line(img, (0, y2), (1920, y2), (0, 255, 255), 2)
        #        cv2.imshow("img",rescale(img,20))
        height = y2 - y1
        #        print(height)
        return True, height
    return False, 0
        aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()
        
        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        image1,[x,y,w,h]=find_red(color_image.copy()) #get the rectangle coordinates



        
        
        cv2.imshow('Found Red', rescale(color_img,50))

        cv2.rectangle(color_image,(x,y),(x+w,y+h),(0,255,0),5)

        Area = depth_image[y:y+h,x:x+w,] 
        distance=find_mean(Area)*depth_scale*39.3701 #inches
        
#        distance -= (4.2*39.3701/1000)  #depth measurement starts from 4.2 mm behind the camera
        answer.append(distance)

        
        check,height=find_c_height(color_image.copy())
        
        real_height= distance*height/916.364501953125
#        print("Height is : {} inches".format(real_height)) 
        print([distance,height,real_height])
#        color_image1,check,values=apply_cascade(color.copy(),'models/cascades/haarcascade_eye.xml')

        #USE THE FIRST DETECTED AREA
        if check and len(values)>0:
            (x,y,w,h)=values[0]
#            x*=1.02
#            w*=0.5
#            y*=1.02
#            h*=0.5
            x,y,w,h = int(x),int(y),int(w),int(h)
            cv2.rectangle(color_image1,(x,y),(x+w,y+h),(255,0,0),2)
            Area=depth[y:y+h,x:x+w]
            d=iterate(Area)
            d=d*depth_scale*100 #ACTUAL DISTANCE
            cv2.putText(color_image1,'distance {} cm'.format(d), (100,250),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),2)
            cv2.imshow('W', rescale(color_image1,90))
            out.write(color_image1)
            print(d)
            answer.append(d)

        else:
            cv2.putText(color,"NOT FOUND", (100,250),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),2)
            cv2.imshow('W', rescale(color,90))

finally:
    cv2.destroyAllWindows() #CLOSE EVERY WINDOW

#%%
    
#FULL BODY ITERATION    
#LOCAL VARIABLE
示例#4
0
        aligned_frames = align.process(frames)

        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        image1, [x, y, w, h] = find_red(color_image.copy())
        cv2.imshow('Found Red', rescale(image1, 50))

        cv2.rectangle(color_image, (x, y), (x + w, y + h), (0, 255, 0), 5)

        Area = depth_image[y:y + h, x:x + w, ]
        distance = find_mean(Area) * depth_scale * 39.3701  #inches
        #        distance -= (4.2*39.3701/1000)  #depth measurement starts from 4.2 mm behind the camera
        answer.append(distance)

        print("distance is : {} inches".format(distance))

        # Remove background - Set pixels further than clipping_distance to grey
        grey_color = 153
        depth_image_3d = np.dstack(
            (depth_image, depth_image,
             depth_image))  #depth image is 1 channel, color is 3 channels
        colorized_depth = np.asanyarray(
            colorizer.colorize(aligned_depth_frame).get_data())

        #        #See the depth image
        #        cv2.imshow("colorized depth",rescale(colorized_depth,50))

        color_image = np.asanyarray(
            color_frame.get_data())  #Getting final RGB frame

        #UPPER CHECKER BOARD
        img1 = color_image.copy()  #Create Copy
        img1[230:719, :] = (0, 0, 0)  #Threshold remaining area
        a1, check1 = findCheckerboardCoordinates(img1)  #FInd the cordinates
        if not check1:  #if we don't find the checkerboard
            print("Check 1")
            cv2.imshow('Found Red', rescale(color_image, 50))
            continue
#        #Visualise the color image with a drawn rectangle. NOTE: Turn it off when processing the RGB Image. Only for debugging.
        cv2.rectangle(color_image, tuple(a1[53]), tuple(a1[43]), (0, 255, 0),
                      5)
        cv2.rectangle(color_image, tuple(a1[46]), tuple(a1[36]), (0, 255, 0),
                      5)

        #        Repeating the procedure for other checkerboard

        #BOTTOM Checkerboard
        img2 = color_image.copy()
        img2[0:450, :] = (0, 0, 0)
        a2, check2 = findCheckerboardCoordinates(img2)
        if not check2:
            print("Check 2")
示例#6
0
        if count == 0:
            answer[count] = [
                depth_image, color_image
            ]  #0 is the first image. can be used for background subtraction
            count += 1
            continue

        #TO START STORING FRAMES WHEN THE SUBJECT IS IN THE DESIRED POSITION. YOU HAVE TO PRESS LEFT CLICK TO SET IX FLAG.
        if ix:
            color_image1, check, values = apply_cascade(
                color_image.copy(),
                'models/cascades/haarcascade_lefteye_2splits.xml')
            if check:
                answer[count] = [depth_image, color_image]
                count += 1
        cv2.imshow('W', rescale(color_image, 90))

        #IF 15 FRAMES STORED BREAK THE LOOP. IF WE INCREASE THE VALUE TO SAY 100, THE FILE WOULD BE VERY LARGE AND WILL CRASH THE COMPUTER
        if len(answer) == 15:
            break

finally:
    pipeline.stop()  #Turn off the camera
    cv2.destroyAllWindows()  #Remove all the windows
    out.release()

#STORE THE  RECORDED DATA INTO PICKLE FILE
import pickle
output = open('data/record_15.pkl', 'wb')
pickle.dump(answer, output)
output.close()
def record_15():
    global ix
    # Create a pipeline
    pipeline = rs.pipeline()

    #Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()

    #Enabling High Accuracy Preset
    depth_sensor.set_option(rs.option.visual_preset, 3.0)
    preset_name = depth_sensor.get_option_value_description(
        rs.option.visual_preset, 3.0)

    #Enabling Emitter To increase accuracy
    enable_ir_emitter = True
    depth_sensor.set_option(rs.option.emitter_enabled,
                            1 if enable_ir_emitter else 0)

    #GET the depth scale from the SDK
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 6  #10 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    colorizer = rs.colorizer()  # TO colorize the depth image

    #FILTERS
    dec_filter = rs.decimation_filter(
    )  # Decimation - reduces depth frame density
    dec_filter.set_option(rs.option.filter_magnitude, 4)

    spat_filter = rs.spatial_filter(
    )  # Spatial    - edge-preserving spatial smoothing
    spat_filter.set_option(rs.option.filter_magnitude, 5)
    spat_filter.set_option(rs.option.filter_smooth_alpha, 1)
    spat_filter.set_option(rs.option.filter_smooth_delta, 50)
    spat_filter.set_option(rs.option.holes_fill, 3)

    temp_filter = rs.temporal_filter()  # Temporal   - reduces temporal noise

    hole_filling = rs.hole_filling_filter()  #FIll ALL THE HOLES
    #FILTERS END

    out = cv2.VideoWriter('data/read.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10,
                          (1280, 780))

    #Some Variables
    count = 0
    answer = {}

    cv2.namedWindow("W")
    cv2.setMouseCallback("W", mouse_callback)
    ix = False

    # Streaming loop
    try:
        while True:
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
            # frames.get_depth_frame() is a 1280x720 depth image

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame(
            )  # aligned_depth_frame is a 640x480 depth image

            #        aligned_depth_frame = dec_filter.process(aligned_depth_frame)
            aligned_depth_frame = spat_filter.process(
                aligned_depth_frame)  #Applying Filter
            aligned_depth_frame = temp_filter.process(
                aligned_depth_frame)  #Applying Filter
            aligned_depth_frame = hole_filling.process(
                aligned_depth_frame)  #Applying Filter

            color_frame = aligned_frames.get_color_frame()  #Getting RGB frame

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(
                aligned_depth_frame.get_data())  #Getting final depth image

            #Colorize the depth image
            colorized_depth = np.asanyarray(
                colorizer.colorize(aligned_depth_frame).get_data())

            #        #See the depth image
            #        cv2.imshow("colorized depth",rescale(colorized_depth,50))

            depth_image[depth_image > 5000] = 0

            color_image = np.asanyarray(
                color_frame.get_data())  #Getting final RGB frame

            if count == 0:
                answer[count] = [
                    depth_image, color_image
                ]  #0 is the first image. can be used for background subtraction
                count += 1
                continue
            if ix:
                color_image1, check, values = apply_cascade(
                    color_image.copy(),
                    'models/cascades/haarcascade_lefteye_2splits.xml')
                if check:
                    answer[count] = [depth_image, color_image]
                    count += 1
            cv2.imshow('W', rescale(color_image, 90))
            if len(answer) == 15:
                break

    finally:
        pipeline.stop()  #Turn off the camera
        cv2.destroyAllWindows()  #Remove all the windows
        out.release()

    import pickle
    output = open('data/record_15.pkl', 'wb')
    pickle.dump(answer, output)
    output.close()
def detect():
    pkl_file = open('data/record_15.pkl', 'rb')
    record_15 = pickle.load(pkl_file)
    pkl_file.close()

    #Some Variables
    ix = False
    depth_scale = 0.0010000000474974513

    first_depth, first_color = record_15[0]

    FACIAL_LANDMARKS_IDXS = OrderedDict([("mouth", (48, 68)),
                                         ("right_eyebrow", (17, 22)),
                                         ("left_eyebrow", (22, 27)),
                                         ("right_eye", (36, 42)),
                                         ("left_eye", (42, 48)),
                                         ("nose", (27, 35)), ("jaw", (0, 17))])

    answer = []

    df = pd.DataFrame()
    temp = {}
    try:

        for i in range(1, len(record_15)):
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

            depth, color = record_15[i]
            dic1, image1 = find_face_points(color.copy())
            if len(dic1) < 1:
                continue

            image, area, loc1 = get_area(color.copy(), dic1[0],
                                         FACIAL_LANDMARKS_IDXS["right_eye"],
                                         depth)
            size = area.shape[0] * area.shape[1]
            re = iterate(area) * depth_scale * 100

            image, area, loc2 = get_area(color.copy(), dic1[0],
                                         FACIAL_LANDMARKS_IDXS["left_eye"],
                                         depth)
            size2 = area.shape[0] * area.shape[1]
            le = iterate(area) * depth_scale * 100

            if min([size, size2]) / max([size, size2]) < 0.8:
                continue
#            print([size,size2])

            if np.isnan(le) or np.isnan(re):
                continue

            loc = min(loc1, loc2)
            error = -1
            le += error
            re += error

            threshold = 70
            image3 = cv2.subtract(first_color, color)
            ret, a = cv2.threshold(image3, threshold, 255, cv2.THRESH_BINARY)
            grayImage = cv2.cvtColor(a, cv2.COLOR_BGR2GRAY)
            grayImage[grayImage > threshold] = 255
            grayImage[grayImage <= threshold] = 0
            [x1, y1] = find_highest(grayImage)
            [x2, y2] = find_lowest(grayImage)
            height = abs(y2 - y1)
            cv2.line(color, (0, y1), (1280, y1), (0, 255, 255), 2)
            cv2.line(color, (0, y2), (1280, y2), (0, 255, 255), 2)
            real_height1 = le * height / 916.364501953125
            real_height2 = re * height / 916.364501953125
            expected = 177.8 * 916.364501953125 / height
            cv2.imshow("image", rescale(color, 75))
            temp = {
                "Eye_l": le,
                "Eye_R": re,
                "pixel_height": height,
                'expected': expected,
                'location': loc,
                'E_R': real_height2,
                'E_L': real_height1
            }
            df = df.append(temp, ignore_index=True)
            print(temp)

    finally:
        cv2.destroyAllWindows()
        return df