Example #1
0
def draw_circle(event,x,y,flags,param):
    global ix,iy,drawing,mode

    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        ix,iy = x,y

    elif event == cv2.EVENT_LBUTTONUP:
        drawing = False
        x0, y0 = x, y
        print 'ixiy=' + str(ix)+','+str(iy)
        print 'x0y0=' + str(x0) + ',' + str(y0)
        udist = depth_frame.get_distance(ix, iy)
        vdist = depth_frame.get_distance(x0, y0)
        print udist,vdist

        point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist)
        point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x0, y0], vdist)
        print str(point1)+str(point2)
        dist = math.sqrt(
            math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],2) + math.pow(
                point1[2] - point2[2], 2))
        print 'distance: '+ str(dist)
        if mode == True:
            cv2.line(images,(ix,iy),(x,y),(0,255,0),100)
        else:
            cv2.circle(images,(x,y),5,(0,0,255),100)
Example #2
0
def map_location(point: list,
                 eyes: list,
                 video_provider,
                 depth_frame,
                 ref,
                 r: int = 3):
    da = get_depth(eyes, depth_frame, 4)  # Depth of the eye

    db = get_depth(point, depth_frame,
                   r) if ref == 0 else ref  # Depth of the point

    xa, ya, za = rs.rs2_deproject_pixel_to_point(
        video_provider.depth_intrinsics, eyes, da)
    xb, yb, zb = rs.rs2_deproject_pixel_to_point(
        video_provider.color_intrinsics, point, db)

    ya = ya * math.cos(theta) + za * math.sin(theta)
    yb = yb * math.cos(theta) + zb * math.sin(theta)

    dz = db + da
    dy = yb - ya
    dx = xb - xa
    if dz != 0:
        yi = ya + (da / dz) * dy
        xi = xa + (da / dz) * dx
        if not math.isnan(xi) and not math.isnan(yi):
            return [round(xi), round(yi)]

    return [-1, -1]
def displacement(colorized_depth):
    img1 = cv2.imread('images/d435colored_4.png')
    img = cv2.imread('images/d435depth_4.png')

    loop = True
    while loop:

        obj = Grabcut()
        imageGC = obj.GC(img1)

        plt.imshow(imageGC[..., ::-1])
        plt.show()

        x, y = input('Enter xy values: ').split()
        x1, y1 = input('Enter other xy values: ').split()
        depth = aligned_depth_frame.get_distance(int(x), int(y))
        dx, dy, dz = rs.rs2_deproject_pixel_to_point(color_intrin,
                                                     [int(x), int(y)], depth)
        depth1 = aligned_depth_frame.get_distance(int(x1), int(y1))
        dx1, dy1, dz1 = rs.rs2_deproject_pixel_to_point(
            color_intrin, [int(x1), int(y1)], depth1)
        # euclid = cv2.norm((dx, dy, dz), (dx1, dy1, dz1), cv2.NORM_L2) # another way to calculate displacement, should be the same result as the distance variable
        distance = math.sqrt(((dx1 - dx)**2) + ((dy1 - dy)**2) +
                             ((dz1 - dz)**2))

        print('Displacement: ', distance, 'meters')
        print('Displacement: ', distance * 100, 'cm')

        again = input('Go again? (y or n)')
        if again == 'y':
            continue
        else:
            loop = False
def computeXYZ(profile, mask, depth_map, color):
    prof = profile.get_stream(rs.stream.depth)
    intrin = prof.as_video_stream_profile().get_intrinsics()
    #print("Camera Parameters:\n {}".format(intrin))

    w, h = np.argwhere(mask).T
    pts = []
    rgb = []
    for x, y in zip(w, h):
        pts.append(
            np.array(
                rs.rs2_deproject_pixel_to_point(intrin, [x, y], depth_map[x,
                                                                          y])))
        rgb.append(color[x, y, :])
    pts = np.array(pts)
    rgb = np.array(rgb)

    a, b = mask.size
    xx, yy = np.meshgrid(np.arange(a), np.arange(b))
    xx = xx.flatten()
    yy = yy.flatten()

    todos_pts = []
    todos_rgb = []
    for y, x in zip(xx, yy):
        todos_pts.append(
            np.array(
                rs.rs2_deproject_pixel_to_point(intrin, [x, y], depth_map[x,
                                                                          y])))
        todos_rgb.append(color[x, y, :])
    todos_pts = np.array(todos_pts)
    return todos_pts, pts, todos_rgb, rgb
Example #5
0
def getnewAvgDepth(aligned_depth_frame, Point, depth_intrin, initPoint=None):
    depthval = aligned_depth_frame.get_distance(int(Point[0]), int(Point[1]))
    depth_pointCtr = rs.rs2_deproject_pixel_to_point(
        depth_intrin, [int(Point[0]), int(Point[1])], depthval)

    depth_values = []

    for i in range(-3, 3):
        for j in range(-3, 3):
            depthval = aligned_depth_frame.get_distance(
                int(Point[0] + i), int(Point[1] + j))
            depth_point = rs.rs2_deproject_pixel_to_point(
                depth_intrin,
                [int(Point[0] + i), int(Point[1] + j)], depthval)

            if (depth_point[2] == 0):
                continue

            depth_values.append(depth_point[2])

    med = np.median(depth_values)

    if initPoint is not None:
        if (abs(initPoint[2] - med) > 0.3):
            med = 0

    depth_pointCtr[2] = med

    return depth_pointCtr
Example #6
0
    def xyz(self,
            pxl,
            depth_frame,
            depth_int,
            wnd=(0, 0),
            z_min=10,
            z_max=1000):
        dim = np.asanyarray(depth_frame.get_data()).shape

        # make pixel to int
        pxl = np.array(pxl)
        pxl = pxl.astype(int)
        # lattice
        lattice = np.array([[x, y] for x in range(-wnd[0], wnd[0] + 1)
                            for y in range(wnd[1] + 1)])
        avg = np.array([0. for i in range(3)])

        i = 0
        for l in lattice:
            try:
                pxl_p = pxl + l
                pxl_n = pxl - l
                # make sure pixeles are not out of range
                if any([
                        any([
                            p[0] < 0, p[0] >= dim[1], p[1] < 0, p[1] >= dim[0]
                        ]) for p in [pxl_p, pxl_n]
                ]):
                    continue

                xyz_p = rs.rs2_deproject_pixel_to_point(
                    depth_int, pxl_p.tolist(),
                    depth_frame.get_distance(pxl_p[0], pxl_p[1]))
                xyz_p = 1000 * np.array(xyz_p)
                xyz_n = rs.rs2_deproject_pixel_to_point(
                    depth_int, pxl_n.tolist(),
                    depth_frame.get_distance(pxl_n[0], pxl_n[1]))
                xyz_n = 1000 * np.array(xyz_n)

                valid_point = [
                    xyz_p[2] < z_max and xyz_p[2] > z_min, xyz_n[2] < z_max
                    and xyz_n[2] > z_min
                ]
                """
                avg += valid_point[0]*xyz_p + valid_point[1]*xyz_n
                i += sum(valid_point)
                """
                if sum(valid_point) == 2:
                    avg += xyz_p + xyz_n
                    i += 2
            except Exception as ex:
                print(ex)
                pass

        if i == 0:
            return avg

        return avg / i
Example #7
0
def render_result(skeletons, img, confidence_threshold, depth_frame,
                  depth_scale):
    skeleton_color = (100, 254, 213)
    skeles_drawn = np.zeros(len(skeletons))
    for i, skeleton in enumerate(skeletons):
        if skeles_drawn[i]:  # skip if already drawn
            continue

        # draw_skeleton(img, skeleton, confidence_threshold, skeleton_color)
        # determine distance violations - mh
        chest1_keypoint = skeleton.joints[1]
        if (chest1_keypoint[0] < 0 or chest1_keypoint[0] >= 640) or (
                chest1_keypoint[1] < 0 or chest1_keypoint[1] >= 480):
            break

        # chest1_distance = depth_frame.get_distance(int(chest1_keypoint[0]), int(chest1_keypoint[1]))
        depth = depth_image[int(chest1_keypoint[1]),
                            int(chest1_keypoint[0])] * depth_scale
        depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
        p1 = rs.rs2_deproject_pixel_to_point(
            depth_intrin, [int(chest1_keypoint[1]),
                           int(chest1_keypoint[0])], depth)
        # p1 = [chest1_keypoint[0], chest1_keypoint[1], chest1_distance]

        for j in range(i + 1, len(skeletons)):
            skeleton2 = skeletons[j]
            chest2_keypoint = skeleton2.joints[1]
            if (chest2_keypoint[0] < 0 or chest2_keypoint[0] >= 640) or (
                    chest2_keypoint[1] < 0 or chest2_keypoint[1] >= 480):
                break
            # chest2_distance = depth_frame.get_distance(int(chest2_keypoint[0]), int(chest2_keypoint[1]))
            depth = depth_image[int(chest2_keypoint[1]),
                                int(chest2_keypoint[0])] * depth_scale
            depth_intrin = depth_frame.profile.as_video_stream_profile(
            ).intrinsics
            p2 = rs.rs2_deproject_pixel_to_point(
                depth_intrin,
                [int(chest2_keypoint[1]),
                 int(chest2_keypoint[0])], depth)
            # p2 = [chest2_keypoint[0], chest2_keypoint[1], chest1_distance]

            distance = math.sqrt(((p1[0] - p2[0])**2) + ((p1[1] - p2[1])**2) +
                                 ((p1[2] - p2[2])**2))
            # print(distance)

            if distance < 1.5:  # draw box red
                draw_skeleton(img, skeleton, confidence_threshold, (0, 0, 255))
                draw_skeleton(img, skeleton2, confidence_threshold,
                              (0, 0, 255))

                skeles_drawn[i] = 1
                skeles_drawn[j] = 1
                break

        if not skeles_drawn[i]:
            draw_skeleton(img, skeleton, confidence_threshold, skeleton_color)
Example #8
0
def convert_depth_to_phys_coord_using_realsense(depth_coordinates, depth):
    result = rs.rs2_deproject_pixel_to_point(
        depth_intrinsics, [depth_coordinates[0], depth_coordinates[1]], depth)

    center_pixel = [depth_intrinsics.ppy / 2, depth_intrinsics.ppx / 2]
    result_center = rs.rs2_deproject_pixel_to_point(depth_intrinsics,
                                                    center_pixel, depth)

    return result[2], (result[1] -
                       result_center[1]), -(result[0] - result_center[0])
Example #9
0
    def calculate_distance(self, ix, iy, x, y):

        udist = self.get_depth_xy(ix, iy)
        vdist = self.get_depth_xy(x, y)

        point1 = rs.rs2_deproject_pixel_to_point(self.intrin, [ix, iy], udist)
        point2 = rs.rs2_deproject_pixel_to_point(self.intrin, [x, y], vdist)

        dist = math.sqrt(math.pow(point1[0] - point2[0], 2) + \
                math.pow(point1[1] - point2[1],2) + math.pow(point1[2] - point2[2], 2))
        return dist
def pixels_to_landmarks(intrinsics, color_frame, depth_frame, depth_scale):
    """
    Transforms pixel landmarks to point space
    :param intrinsics: Intrinsic parameters of the camera
    :param color_frame: color frame
    :param depth_frame: pyrealsense2.pyrealsense2.composite_frame -> rs2::depth_frame
    :param depth_scale: depth scale information
    :return: a list of landmarks with signature -> dict(str(name), tuple((x, y, z)))
    """
    print("[INFO] Depth Scale is: ", depth_scale)
    print("[INFO] Multiplying points by:", 1 / depth_scale)
    scale = 1 / depth_scale
    color_image = np.asanyarray(color_frame.get_data())
    lm = landmark_detector(color_image)
    points = list()
    for n, px in lm.items():
        z = depth_frame.get_distance(px[0], px[1])
        xx = rs.rs2_deproject_pixel_to_point(intrinsics, [px[0], px[1]], z)[0]
        yy = rs.rs2_deproject_pixel_to_point(intrinsics, [px[0], px[1]], z)[1]
        zz = rs.rs2_deproject_pixel_to_point(intrinsics, [px[0], px[1]], z)[2]

        # No scaling, no shift - landmarks are still off so it's not scaling issue
        # lm.update({"%s" % n: (xx, yy, zz)})

        # No shift introduced
        lm.update({"%s" % n: (xx * scale, yy * scale, zz * scale)})

        # When frames are aligned
        # lm.update({"%s" % n: ((xx + 0.006) * scale, (yy + 0.003) * scale, zz * scale)})
        # lm.update({"%s" % n: ((xx) * scale, (yy) * scale, zz * scale)})

        # When frames are not aligned
        # lm.update({"%s" % n: ((xx - 0.005) * scale, (yy + 0.007) * scale, zz * scale)})

        # Test points alone
        points.append([xx * scale, yy * scale, zz * scale])

        if LM_DEBUG:
            with open(
                    os.path.join(SAVE_PATH, '3D_landmarks_rounded.numpy.txt'),
                    'w+') as f:
                np.savetxt(f,
                           np.asanyarray(points),
                           fmt='%-7.8f',
                           delimiter=' ')
            print("Landmark Pixels: ", px[0], px[1],
                  "Corresponding 3D Points: ", xx, yy, zz, "Scaled: ",
                  xx * scale, yy * scale, zz * scale)

        # with open(os.path.join(SAVE_PATH, "3D_landmarks_raw.txt"), "w+") as f:
        #     csv_writer = csv.writer(f)
        #     csv_writer.writerows(lm)

    return lm
Example #11
0
def calc(x0, y0, conf0, x1, y1, conf1, aligned_depth, frame_intrin, frame_name,
         depth_scale, is_right_shoulder, time_stamp):
    global meter_coordinates
    global point_coordinates
    pixel_1 = [x0, y0]
    pixel_2 = [x1, y1]
    depth_1 = aligned_depth[int(x0), int(y0)]
    depth_2 = aligned_depth[int(x1), int(y1)]
    # depth * depth scale = physical distance between the camera and the baby
    phys_depth_1 = depth_1 * depth_scale
    phys_depth_2 = depth_2 * depth_scale

    # Getting depth using this API return wrong value, no idea why
    # phys_depth_1 = depth_frame.get_distance(pixel_1[0], pixel_1[1])
    # phys_depth_2 = depth_frame.get_distance(pixel_2[0], pixel_2[1])
    # depth_1 = phys_depth_1 / depth_scale
    # depth_2 = phys_depth_2 / depth_scale
    # if it is negative then it is incorrect because it means the baby is behind the camera
    # max limit is the expected furthest distance between the baby and the camera,
    # this might be vary each time we record
    if (-1 < phys_depth_1 < 1.9) and (-1 < phys_depth_2 < 1.9):
        point1 = rse.rs2_deproject_pixel_to_point(frame_intrin, pixel_1,
                                                  phys_depth_1)
        point2 = rse.rs2_deproject_pixel_to_point(frame_intrin, pixel_2,
                                                  phys_depth_2)
        # This get the distance in meters, we multiply by 100 to get cm
        length = distance(point1[1], point1[0], point1[2], point2[1],
                          point2[0], point2[2]) * 100
        if frame_name not in meter_coordinates:
            meter_coordinates += '\n' + str(frame_name) + ',' + str(point1[0]) + ',' + str(point1[1]) + ',' \
                                 + str(point1[2]) + ',' + str(conf0) + ',' + str(point2[0]) + ',' + str(point2[1]) \
                                 + ',' + str(point2[2]) + ',' + str(conf1)
        else:
            if is_right_shoulder:
                meter_coordinates += ',' + str(point1[0]) + ',' + str(point1[1]) + ',' + str(point1[2]) + ',' \
                                     + str(conf0) + ',' + str(point2[0]) + ',' + str(point2[1]) + ',' + str(point2[2]) \
                                     + ',' + str(conf1)
            else:
                meter_coordinates += ',' + str(point2[0]) + ',' + str(point2[1]) + ',' + str(point2[2]) + ',' \
                                     + str(conf1)
        if frame_name not in point_coordinates:
            point_coordinates += '\n' + str(frame_name) + ',' + str(y0) + ',' + str(x0) + ',' + str(depth_1) + ',' \
                                 + str(conf0) + ',' + str(y1) + ',' + str(x1) + ',' + str(depth_2) + ',' + str(conf1)
        else:
            if is_right_shoulder:
                point_coordinates += ',' + str(y0) + ',' + str(x0) + ',' + str(depth_1) + ',' + str(conf0) + ',' \
                                     + str(y1) + ',' + str(x1) + ',' + str(depth_2) + ',' + str(conf1)
            else:
                point_coordinates += ',' + str(y1) + ',' + str(x1) + ',' + str(
                    depth_2) + ',' + str(conf1)
            return length
    else:
        return None
Example #12
0
def calculate_distance(depth_frame, intrin, x1, y1, x2, y2):
    udist = depth_frame.get_distance(x1, y1)
    vdist = depth_frame.get_distance(x2, y2)
    # print(udist, vdist)

    point1 = rs.rs2_deproject_pixel_to_point(intrin, [x1, y1], udist)
    point2 = rs.rs2_deproject_pixel_to_point(intrin, [x2, y2], vdist)
    # print(point1, point2)

    dist = math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2)) # Ignore z point

    return dist
Example #13
0
 def get_list(self):
     """This method get the list A and B by rs.deproject function"""
     for match in self.matches:
         img_pixel = [int(self.featureA[match.queryIdx].pt[0]), int(self.featureA[match.queryIdx].pt[1])]
         depth = aligned_depth_frame.get_distance(img_pixel[0], img_pixel[1])
         point_a = rs.rs2_deproject_pixel_to_point(self.intrin, img_pixel, depth)
         point_a = [point_a[0], point_a[2], 1]
         img_pixel = [int(self.featureB[match.trainIdx].pt[0]), int(self.featureB[match.trainIdx].pt[1])]
         depth = aligned_depth_frame.get_distance(img_pixel[0], img_pixel[1])
         point_b = rs.rs2_deproject_pixel_to_point(self.intrin, img_pixel, depth)
         point_b = [point_b[0], point_b[2], 1]
         self.listA.append(point_a)
         self.listB.append(point_b)
Example #14
0
 def get_width_objs(self, x, y, w, h, var_limits_inside):
     ix1, iy1, iz1 = x + var_limits_inside, y + round(h / 2), (
         self.depth_image[y + round(h / 2), x + var_limits_inside]) / 10
     ix2, iy2, iz2 = x + w - var_limits_inside, y + round(h / 2), (
         self.depth_image[y + round(h / 2), x + w - var_limits_inside]) / 10
     point1 = rs.rs2_deproject_pixel_to_point(self.color_intrin, [ix1, iy1],
                                              iz1)
     point2 = rs.rs2_deproject_pixel_to_point(self.color_intrin, [ix2, iy2],
                                              iz2)
     return math.sqrt(
         math.pow(point1[0] - point2[0], 2) +
         math.pow(point1[1] - point2[1], 2) +
         math.pow(point1[2] - point2[2], 2))
Example #15
0
 def calculate_3D (xi, yi, x0, y0):
     udist = depth_frame.get_distance(xi, yi)
     vdist = depth_frame.get_distance(x0, y0)
     print udist, vdist
     point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [xi, yi], udist)
     point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x0, y0], vdist)
     print 'start(x,y,z): '+ str(point1)+'\n' + 'end(x,y,z): ' +str(point2)
     dist = math.sqrt(
     math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1],2) + math.pow(
         point1[2] - point2[2], 2))
     cm = dist * 100
     decimal2 = "%.2f" % cm
     print 'Vermessung: ' + str(decimal2)+ 'cm'
     MessageBox(text='Vermessung: ' + decimal2 + ' cm')
Example #16
0
def compute_distance(profile,depth_map,refPt):
    prof = profile.get_stream(rs.stream.depth)
    intrin = prof.as_video_stream_profile().get_intrinsics()

    print("Camera Parameters:\n {}".format(intrin))

    a=np.array(rs.rs2_deproject_pixel_to_point(intrin,refPt[0][:2],depth_map[refPt[0][:2][0],refPt[0][:2][1]]))
    b=np.array(rs.rs2_deproject_pixel_to_point(intrin,refPt[1][:2],depth_map[refPt[1][:2][0],refPt[1][:2][1]]))
    
    D = np.sqrt((a-b).dot(a-b))
    
    d = "distance between dots = {}mm".format(D)
    print(d)
    return D
def cmPerPx(depth_frame,x1,y1,x2,y2,scale):
    depth1 = depth_frame.get_distance(x1, y1)
    depth2 = depth_frame.get_distance(x2, y2)
    # fx,fy = rs.rs2_fov(depth_frame.profile.as_video_stream_profile().intrinsics)
    # print(fx,fy)
    # realWdith = (abs(x1-x2) ) * 2 * math.tan(math.radians(fx / 2)) * depth1 / depth_frame.width
    # realHeight = (abs(y1-y2) )  * 2 * math.tan(math.radians(fy / 2)) * depth1 / depth_frame.width
    # print(realWdith,realHeight,depth1 - depth2)
    intrin = depth_frame.profile.as_video_stream_profile().intrinsics
    x = rs.rs2_deproject_pixel_to_point(intrin,[x1,y1],depth1)
    y = rs.rs2_deproject_pixel_to_point(intrin,[x2,y2],depth2)
    print("depth1 : ",depth1)
    print("depth1 : ",depth2)
    print(((x[0]-y[0])**2 +(x[1]-y[1])**2 +(x[2]-y[2])**2)**0.5 )
    # return (realWdith ** 2 + realHeight ** 2 + (depth1 - depth2)**2)**(0.5)
Example #18
0
def mapGenerator(detections, intr):
    plt.ion()
    plt.grid(color='r', linestyle='-', linewidth=2)
    plt.axis([-2.5, 2.5, 0, 5])

    for i in range(0, len(detections[0])):
        #Transform x,y,depth coordinates to new xyz:
        cxy = [detections[0][i], detections[1][i]]
        depth = detections[2][i]
        point = rs.rs2_deproject_pixel_to_point(
            intr, cxy,
            depth)  #Outputs xyz in cameras reference frame in 3D space

        #Display map:
        if len(point) != 0:
            p = plt.plot(point[1],
                         point[2],
                         marker='o',
                         label=detections[4][i])
            plt.text(point[1], point[2], detections[4][i])
            fig.canvas.draw()

    img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
    img = img.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
    img = cv.cvtColor(img, cv.COLOR_RGB2BGR)
    cv.imshow("Labelled Map", img)
    plt.clf()
    plt.ioff()
    def getRelativePos(self, pixCoord, meshSize=3):
        # using the center pixel coordinates of a detected target.
        # generate a 3x3 mask around the center pixel
        # deproject the aligned depth frame.

        assert (meshSize % 2 > 0)
        pixCoord = np.round(pixCoord)
        #xyz_array = ros_numpy.point_cloud2.get_xyz_points(self.ptCloud)
        x = np.linspace(pixCoord[0] - (meshSize - 1) / 2,
                        pixCoord[0] + (meshSize - 1) / 2,
                        meshSize,
                        dtype=np.int32)
        y = np.linspace(pixCoord[1] - (meshSize - 1) / 2,
                        pixCoord[1] + (meshSize - 1) / 2,
                        meshSize,
                        dtype=np.int32)

        # deproject the depth image
        pts = np.zeros([meshSize * meshSize, 3])
        n = 0
        for xv in x:
            for yv in y:
                pts[n, :] = rs2.rs2_deproject_pixel_to_point(
                    self.intrinsics, [xv, yv], self.Dframe[yv, xv])
                n += 1
        relPos = np.sum(pts, 0) / (meshSize**2)
        relPosT = np.array([relPos[2], -relPos[0]]) * 0.001
        return relPosT
Example #20
0
def calculate_relative_pose(box, depth_frame, depth_intrin):
    x_mid = int((box[0]+box[2])/2)
    y_mid = int((box[1]+box[3])/2)
    object_depth, object_angle = detect_object_depth_orientation(box, depth_frame)
    if object_depth is not None:
        # The camera intrinsics couldn't be flipped so the other data has to be temporarily flipped
        if flip_camera_ver:
            y_mid = RESOLUTION_HEIGHT-y_mid
        if flip_camera_hor:
            x_mid = RESOLUTION_WIDTH-x_mid
        object_point = rs.rs2_deproject_pixel_to_point(depth_intrin, [x_mid, y_mid], object_depth)
        # Makes sure all objects are above ground-level
        object_point[1] = -object_point[1]
        if object_point[1] < 0:
            object_point[1] = -object_point[1]
        else:
            object_point[1] = object_point[1]*2
        if flip_camera_ver:
            object_point[1] = -object_point[1]
        if flip_camera_hor:
            object_point[0] = -object_point[0]
        object_xyz = [object_point[0], object_point[2], object_point[1]]
        return object_xyz, object_depth, object_angle
    else:
        return None, None, None
Example #21
0
def depthmatrix_to_pointcloud(
    matrix: np.ndarray,
    intrin: rs2.intrinsics,
    max_dist=float('Inf'),
    min_dist=float('-Inf')
) -> np.ndarray:
    """
    Uses rs2_deproject_pixel_to_point to map each pixel to a point.
    Note: i, j are flipped 'cause realsense is stupid
    :param matrix: The matrix that represents the depth frame
    :param intrin: The intrinsics of the camera
    :param max_dist: the maximum distance
    :param min_dist: the maximum distance
    :return: A pointcloud via the depth frame and the rs2_deproject_pixel_to_point function
    """
    assert len(matrix.shape) == 2

    points: List = []
    for i in range(matrix.shape[0]):
        for j in range(matrix.shape[1]):
            if min_dist <= matrix[i][j] <= max_dist:
                points.append(
                    rs2.rs2_deproject_pixel_to_point(intrin, [j, i],
                                                     matrix[i][j]))

    return np.array(points)
Example #22
0
 def calc_world_pos(x, y, depth_frame, tolerance_radius=5):
     depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
     depth = 0
     if tolerance_radius > 0:
         depth_candidates = []
         for x_i in range(-tolerance_radius, tolerance_radius):
             for y_i in range(-tolerance_radius, tolerance_radius):
                 x_t = x+x_i
                 y_t = y+y_i
                 if not DepthRoiEvaluator.is_in_bounds(x_t, y_t, depth_frame):
                     continue
                 depth = depth_frame.get_distance(x_t, y_t)
                 if depth < 10.0:
                     depth_candidates.append(depth)
         if len(depth_candidates) == 0:
             return None
         depth_candidates = np.array(depth_candidates)
         # reject outliers
         m = 2
         depth_candidates = depth_candidates[
             abs(depth_candidates - np.mean(depth_candidates)) < m * np.std(depth_candidates)]
         depth = np.mean(depth_candidates)
     else:
         depth = depth_frame.get_distance(x, y)
     return rs.rs2_deproject_pixel_to_point(depth_intrin, [x, y], depth), depth
Example #23
0
def average_matrices_to_pointcloud(
    mats: List[np.ndarray],
    intrin: rs2.intrinsics,
    max_dist=float('Inf'),
    min_dist=float('-Inf')
) -> np.ndarray:
    """
    Uses rs2_deproject_pixel_to_point as in the depthmatrix_to_pointcloud function.
    Points the have the same i,j values have their z values averaged out, iff the z value is within the range.
    :param mats: A list of matrices that each represents the depth frame
    :param intrin: The intrinsics of the camera
    :param max_dist: the maximum distance
    :param min_dist: the maximum distance
    :return: A pointcloud calculated via averaging the matrices and the rs2_deproject_pixel_to_point function.
    """
    ij_to_zs: Dict[Tuple[int, int], List[float]] = defaultdict(list)
    for mat in mats:
        assert len(mat.shape) == 2

        for i in range(mat.shape[0]):
            for j in range(mat.shape[1]):
                if min_dist <= mat[i][j] <= max_dist:
                    ij_to_zs[(i, j)].append(mat[i][j])

    pc = []
    for (i, j), zs in ij_to_zs.items():
        pc.append(
            rs2.rs2_deproject_pixel_to_point(intrin, [j, i], np.average(zs)))

    return np.array(pc)
Example #24
0
 def calculate_distance(self, x, y):
     color_intrin = self.color_intrin
     x_ratio, y_ratio = self.x_ratio, self.y_ratio
     ix, iy = int(self.ix/x_ratio), int(self.iy/y_ratio)
     x, y = int(x/x_ratio), int(y/y_ratio)
     udist = self.depth_frame.get_distance(ix, iy)
     vdist = self.depth_frame.get_distance(x, y)
     if udist ==0.00 or vdist ==0.00:
         dist = 'NaN'
     else:
         point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist)
         point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], vdist)
         dist = math.sqrt(
             math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2) + math.pow(
                 point1[2] - point2[2], 2))
     return dist
Example #25
0
def frame_to_scan(image_frame, intren, depth_scale=0.5):
    scan_row = int(image_frame.shape[0] / 2)
    row_size = image_frame.shape[1]

    scan_mat = np.zeros((row_size, row_size), np.uint8)
    MAX_WIDTH = 2  #screen covers a maximum of 10 meters of width
    scale_val = (row_size - 10) / (MAX_WIDTH
                                   )  #scale points to make them fit on image

    point_collection = []
    for j in range(row_size):
        depth_val = image_frame[scan_row][j]
        point = rs.rs2_deproject_pixel_to_point(intren, [j, scan_row],
                                                depth_val)
        point = np.asarray(point) * depth_scale
        point_collection.append(point)

        print("x_coordinate: ", point[0])

        img_point = scale_val * point
        img_x_val = int(img_point[0]) + int(row_size / 2) - 1

        print("img_x, img_y: " + str(img_point[0]) + ", " + str(img_point[2]))
        #Dont draw any points that dont fall on the image plane
        if (img_x_val >= 0
                and img_x_val < row_size) and (img_point[2] >= 0
                                               and img_point[2] <= row_size):
            print("writing!")
            scan_mat[row_size - int(img_point[2]) - 1][img_x_val] = 255

    return scan_mat
Example #26
0
def get_depth_as_distance(box, curr_frame, depth_image, color_image,
                          depth_intrin):
    (x, y, w, h) = box.astype("int")
    if curr_frame >= 30 and curr_frame % 30 == 0:
        roi_depth_image = depth_image[y:y + h, x:x + w]
        roi_color_image = color_image[y:y + h, x:x + w]

        os.system('mkdir %d' % curr_frame)
        cv2.imwrite('%d/depth.jpg' % curr_frame, roi_depth_image)
        print("save depth image")
        cv2.imwrite('%d/color.jpg' % curr_frame, roi_color_image)
        print("save color image")
        print("the mid position depth is:",
              depth_frame.get_distance(int(x + w / 2), int(y + h / 2)))

        # write the depth data in a depth.txt
        with open('%d/depth.csv' % curr_frame, 'w') as f:
            cols = list(range(x, x + w))
            rows = list(range(y, y + h))
            for i in rows:
                for j in cols:
                    if (x, x + w) in (cols) and (y, y + w) in (rows):
                        depth = depth_frame.get_distance(j, i)
                        # print(x, x+w, y, y+h, j, i)
                        depth_point = rs.rs2_deproject_pixel_to_point(
                            depth_intrin, [j, i], depth)
                        text = "%d: %.5lf, %.5lf, %.5lf\n" % (
                            curr_frame, depth_point[0], depth_point[1],
                            depth_point[2])
                        f.write(text)
        print("Finish writing the depth img")
Example #27
0
def ProccessCircle(frame, cnts, depth, ballNum, graph_iter=0):

    # only proceed if at least one contour was found
    if len(cnts) > 0:

        ((x, y),
         radius) = cv2.minEnclosingCircle(max(cnts, key=cv2.contourArea))

        # only proceed if the radius meets a minimum size
        if radius > 5:

            # draw the circle, centroid and line on the frame
            markBallInFrame(frame, x, y, radius, ballNum)

            # convert from pixels coordinates to real world cordinates
            depth_val = depth.get_distance(int(x), int(y))
            depth_intrin = depth.profile.as_video_stream_profile().intrinsics
            (x, y,
             z) = rs.rs2_deproject_pixel_to_point(depth_intrin, [x, y],
                                                  depth_val)

            # update x , y , z according to initial state and publish
            update_publish_axes(x, y, z, ballNum)

            # build and output graphes if needed (debugging purposes)
            if graph_iter > 0:
                if iterNum == graph_iter:
                    iters = [i for i in range(len(axes[ballNum]))]
                    plt_axes = zip(*axes[ballNum])
                    plt.plot(iters, list(plt_axes[0]), 'r--', iters,
                             list(plt_axes[1]), 'bs', iters, list(plt_axes[2]),
                             'g^')
                    plt.show()
Example #28
0
    def run(self):
        global depth_frame_global
        print("motor thread starting")
        while not self.done:
            time.sleep(0.001)
            with depth_frame_lock:
                depth_frame = depth_frame_global
                if depth_frame == None:
                    continue

                pos = self.motor_controller.position()
                mouse_x = int(
                    max(
                        min((pos[0] / 50.8 + 0.25) * WIDTH / RESCALE * ZOOM /
                            1.25, WIDTH / RESCALE * ZOOM - 1), 0))
                mouse_y = int(
                    max(
                        min((pos[1] / 50.8 + 0.08) * WIDTH / RESCALE * ZOOM /
                            1.25 - 10, HEIGHT / RESCALE * ZOOM - 1), 0))

                z = max(int(pos[2]) * 400, 1)

                mouse_real_world = rs.rs2_deproject_pixel_to_point(
                    depth_frame.profile.as_video_stream_profile().intrinsics,
                    [mouse_x // 2, mouse_y // 2], z)

                vec = compute_feedback_vector(depth_frame, mouse_real_world)

            with frame_info_lock:
                frame_info_global["mouse_x"] = mouse_x
                frame_info_global["mouse_y"] = mouse_y
                frame_info_global["vec"] = vec

            self.motor_controller.create_force(vec, pos)
            self.frame_counter.inc()
Example #29
0
    def imageDepthCallback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)
            # pick one pixel among all the pixels with the closest range:
            indices = np.array(
                np.where(cv_image == cv_image[cv_image > 0].min()))[:, 0]
            pix = (indices[1], indices[0])
            self.pix = pix
            line = '\rDepth at pixel(%3d, %3d): %7.1f(mm).' % (
                pix[0], pix[1], cv_image[pix[1], pix[0]])

            if self.intrinsics:
                depth = cv_image[pix[1], pix[0]]
                result = rs2.rs2_deproject_pixel_to_point(
                    self.intrinsics, [pix[0], pix[1]], depth)
                line += '  Coordinate: %8.2f %8.2f %8.2f.' % (
                    result[0], result[1], result[2])
            if (not self.pix_grade is None):
                line += ' Grade: %2d' % self.pix_grade
            line += '\r'
            sys.stdout.write(line)
            sys.stdout.flush()

        except CvBridgeError as e:
            print(e)
            return
        except ValueError as e:
            return
 def mouseRGB(event, x, y, flags, param):
     if event == cv2.EVENT_LBUTTONDOWN:  #checks mouse left button down condition
         z = aligned_depth_frame.get_distance(y, x)
         point = rs.rs2_deproject_pixel_to_point(
             depth_intrinsics, [y, x], z)
         print("Projected point:", "x =", point[0], "y =", point[1],
               "z =", point[2])
 def get_point(x, y):
     p = rs.rs2_deproject_pixel_to_point(intrinsics, [x, y], d)
     batch.add(2, gl.GL_LINES, None, ('v3f', [0, 0, 0] + p))
     return p
 def get_point(x, y):
     p = rs.rs2_deproject_pixel_to_point(intrinsics, [x, y], d)
     line3d(out, orig, view(p), color)
     return p