Esempio n. 1
0
 def checkRange(self, src, lowBound, highBound):
     size = im.size(src)
     mask = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     gt_low = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.CmpS(src, lowBound, gt_low, cv.CV_CMP_GT)
     lt_high = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.CmpS(src, highBound, lt_high, cv.CV_CMP_LT)
     cv.And(gt_low, lt_high, mask)
     return mask
Esempio n. 2
0
def split(image):
    ch1 = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)
    ch2 = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)
    ch3 = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)
    ch4 = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)
    cv.CmpS(image, mygray, ch1, cv.CV_CMP_EQ)
    cv.CmpS(image, myblue, ch2, cv.CV_CMP_EQ)
    cv.CmpS(image, mylgrn, ch3, cv.CV_CMP_EQ)
    cv.CmpS(image, mydgrn, ch4, cv.CV_CMP_EQ)
    return (ch1, ch2, ch3, ch4)
Esempio n. 3
0
 def equivalent_region_nearest(nearest_building):
     ''' find the equivalent class region via nearest building
     '''
     equ_class_region = cv.CreateImage(
         (self.map_img.width, self.map_img.height), cv.IPL_DEPTH_8U, 1)
     bid = nearest_building.bid
     cv.CmpS(self.buildingMgr.empty_img, bid, equ_class_region,
             cv.CV_CMP_EQ)
     return equ_class_region
def precornerdetect(image):
    # assume that the image is floating-point
    corners = cv.CloneMat(image)
    cv.PreCornerDetect(image, corners, 3)

    dilated_corners = cv.CloneMat(image)
    cv.Dilate(corners, dilated_corners, None, 1)

    corner_mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
    cv.Sub(corners, dilated_corners, corners)
    cv.CmpS(corners, 0, corner_mask, cv.CV_CMP_GE)
    return (corners, corner_mask)
Esempio n. 5
0
    def handle_stereo(self, qq):

        (lmsg, lcmsg, rmsg, rcmsg) = qq
        limg = CvBridge().imgmsg_to_cv(lmsg, "mono8")
        rimg = CvBridge().imgmsg_to_cv(rmsg, "mono8")

        if 0:
            cv.SaveImage("/tmp/l%06d.png" % self.i, limg)
            cv.SaveImage("/tmp/r%06d.png" % self.i, rimg)
            self.i += 1

        scm = image_geometry.StereoCameraModel()
        scm.fromCameraInfo(lcmsg, rcmsg)

        bm = cv.CreateStereoBMState()
        if "wide" in rospy.resolve_name("stereo"):
            bm.numberOfDisparities = 160
        if 0:
            disparity = cv.CreateMat(limg.rows, limg.cols, cv.CV_16SC1)
            started = time.time()
            cv.FindStereoCorrespondenceBM(limg, rimg, disparity, bm)
            print time.time() - started
            ok = cv.CreateMat(limg.rows, limg.cols, cv.CV_8UC1)
            cv.CmpS(disparity, 0, ok, cv.CV_CMP_GT)
            cv.ShowImage("limg", limg)
            cv.ShowImage("disp", ok)
            cv.WaitKey(6)
        self.track(CvBridge().imgmsg_to_cv(lmsg, "rgb8"))
        if len(self.tracking) == 0:
            print "No markers found"
        for code, corners in self.tracking.items():
            corners3d = []
            for (x, y) in corners:
                limr = cv.GetSubRect(limg, (0, y - bm.SADWindowSize / 2, limg.cols, bm.SADWindowSize + 1))
                rimr = cv.GetSubRect(rimg, (0, y - bm.SADWindowSize / 2, rimg.cols, bm.SADWindowSize + 1))
                tiny_disparity = cv.CreateMat(limr.rows, limg.cols, cv.CV_16SC1)
                cv.FindStereoCorrespondenceBM(limr, rimr, tiny_disparity, bm)
                if tiny_disparity[7, x] < 0:
                    return
                corners3d.append(scm.projectPixelTo3d((x, y), tiny_disparity[7, x] / 16.))
                if 0:
                    cv.ShowImage("d", disparity)
            (a, b, c, d) = [numpy.array(pt) for pt in corners3d]
            def normal(s, t):
                return (t - s) / numpy.linalg.norm(t - s)
            x = PyKDL.Vector(*normal(a, b))
            y = PyKDL.Vector(*normal(a, d))
            f = PyKDL.Frame(PyKDL.Rotation(x, y, x * y), PyKDL.Vector(*a))
            msg = pm.toMsg(f)
            # print "%10f %10f %10f" % (msg.position.x, msg.position.y, msg.position.z)
            print code, msg.position.x, msg.position.y, msg.position.z
            self.broadcast(lmsg.header, code, msg)
Esempio n. 6
0
def dct_hash(img):
    img = float_version(img)
    small_img = cv.CreateImage((32, 32), 32, 1)
    cv.Resize(img[20:190, 20:205], small_img)

    dct = cv.CreateMat(32, 32, cv.CV_32FC1)
    cv.DCT(small_img, dct, cv.CV_DXT_FORWARD)
    dct = dct[1:9, 1:9]

    avg = cv.Avg(dct)[0]
    dct_bit = cv.CreateImage((8, 8), 8, 1)
    cv.CmpS(dct, avg, dct_bit, cv.CV_CMP_GT)

    return [dct_bit[y, x] == 255.0 for y in xrange(8) for x in xrange(8)]
Esempio n. 7
0
 def detectMotion(self, curr):
     assert (curr.nChannels == 1)
     if len(self.history_frames) < self.nHistory:
         self.history_frames.append(curr)
         return curr
     else:
         oldest_frame = self.history_frames.pop(0)
         self.history_frames.append(curr)
     size = (curr.width, curr.height)
     motion_frame = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.AbsDiff(oldest_frame, curr, motion_frame)
     cv.CmpS(motion_frame, self.threshold, motion_frame, cv.CV_CMP_GT)
     # Eliminate disperse pixels, which occur because of
     # the noise of the camera
     img_temp = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.Erode(motion_frame, img_temp)
     cv.Dilate(img_temp, motion_frame)
     return motion_frame
Esempio n. 8
0
    def get_near_region_mask(self, blds):
        regions = [bd.near_region_poly for bd in blds]
        c = cg.rects_intersection(regions, self.label_img.width,
                                  self.label_img.height)
        # subtract buildings
        equ_class_region = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        canvas = cv.CloneImage(self.label_img)
        cv.FillPoly(equ_class_region, [c], im.color.blue)
        cv.CmpS(canvas, 0, canvas, cv.CV_CMP_EQ)
        cv.And(equ_class_region, canvas, equ_class_region)

        # subtract near of near's neighbor
        near_of_near = set(self.buildings)
        for bd in blds:
            near_of_near = near_of_near.union(bd.near_set)
        near_of_near.difference_update(set(blds))
        near_regions = [bd.near_region_poly for bd in near_of_near]
        for reg, bd in zip(near_regions, near_of_near):
            if cg.has_intersection(equ_class_region, reg, self.label_img.width,
                                   self.label_img.height):
                cg.sub_intersection(equ_class_region, reg,
                                    self.label_img.width,
                                    self.label_img.height)
        # equ_class_region -= near of near via nearest
        for bd in near_of_near:
            eq_region = self.get_equivalent_region_via_nearest(bd)
            c = im.find_contour(eq_region)
            while c:
                if cg.has_intersection(equ_class_region, list(c),
                                       self.label_img.width,
                                       self.label_img.height):
                    cg.sub_intersection(equ_class_region, list(c),
                                        self.label_img.width,
                                        self.label_img.height)
                c = c.h_next()

        return equ_class_region
Esempio n. 9
0
def init_buildings(buildingIDs, label_img):
    bld_mask = cv.CreateImage((label_img.width, label_img.height),
                              cv.IPL_DEPTH_8U, 1)
    buildings = []
    for bid in buildingIDs:
        cv.CmpS(label_img, bid, bld_mask, cv.CV_CMP_EQ)
        buildings.append(Building(bid, im.find_contour(bld_mask)))

    for rb in buildings:
        for sb in buildings:
            if sb.bid == rb.bid:
                continue
            if rb.is_near_me(sb):
                rb.near_set.add(sb)
            if rb.is_north_me(sb):
                rb.north_set.add(sb)
            if rb.is_south_me(sb):
                rb.south_set.add(sb)
            if rb.is_east_me(sb):
                rb.east_set.add(sb)
            if rb.is_west_me(sb):
                rb.west_set.add(sb)

    return buildings
Esempio n. 10
0
    def init_empty_img(self):
        # calculate Union{ near_regions } for all building
        nearby_region_polys = [bd.near_region_poly for bd in self.buildings]
        all_near_regions = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        cv.FillPoly(all_near_regions, [nearby_region_polys[0]], im.color.blue)
        for poly in nearby_region_polys:
            tmp_canvas = cv.CreateImage(
                (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U,
                1)
            cv.FillPoly(tmp_canvas, [poly], im.color.blue)
            cv.Or(tmp_canvas, all_near_regions, all_near_regions)

        # find the "empty" region
        empty_region = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        cv.CmpS(all_near_regions, 0, empty_region, cv.CV_CMP_EQ)

        for ele in it.nonzero_indices(cv.GetMat(empty_region)):
            y, x = ele
            y, x = int(y), int(x)
            nearest_bd = self.get_nearest_building(x, y)
            cv.Set2D(empty_region, y, x, nearest_bd.bid)
        return empty_region
Esempio n. 11
0
        ctime = time()
        if ctime - stime >= 1:
            print '%ifps' % (framecnt)
            framecnt = 0
            stime = ctime

    # Get original image
    img = cv.QueryFrame(cam)

    # Adaptive diff
    #cv.Smooth(img, col, cv.CV_GAUSSIAN, 3, 0)
    cv.AddWeighted(img, 1 - ADAPT, avg, ADAPT, 0, avg)
    cv.AbsDiff(img, avg, col)
    cv.CvtColor(col, col, cv.CV_RGB2HSV)
    cv.Split(col, None, None, val, None)
    cv.CmpS(val, TH, val, cv.CV_CMP_GE)
    #cv.Dilate(val, val, None, 18)
    #cv.Erode(val, val, None, 10)
    dif = cv.CountNonZero(val)

    # Show image if it's radically new
    if (dif < PIXCOUNT):
        if act < 0:
            act = ACTLOW
        else:
            act -= 1
    else:
        if act > 0:
            act = ACTHIGH
        else:
            act += 1
Esempio n. 12
0
# histogram creation
#temp = cv.CreateMat(height,width,cv.CV_32FC1) #cv.CV_8UC1)
#cv.SetZero(temp)
#cv.ConvertScale(themap,temp,40,0)
#temp2 = cv.CreateMat(height,width,cv.CV_32FC1)
#cv.SetZero(temp2)
#cv.Pow(temp,temp2,0.5)
#cv.SaveImage("histogram.png",temp2)

cv.Smooth(themap, themap, cv.CV_GAUSSIAN, gaussian_blur,gaussian_blur)
cv.SaveImage("map.png",themap)
(minval,maxval,minloc,maxloc)=cv.MinMaxLoc(themap)
print "Min: "+`minval`+" max: "+`maxval`
cv.ConvertScale(themap,mask,255.0/maxval,0);
cv.SaveImage("mask.png",mask)
cv.CmpS(themap,mask_threshold,mask,cv.CV_CMP_GT)
cv.SaveImage("thresholded.png",mask)

#contour = cv.FindContours(mask,cv.CreateMemStorage(),cv.CV_RETR_CCOMP,cv.CV_CHAIN_APPROX_SIMPLE)
chain = cv.FindContours(mask,cv.CreateMemStorage(),cv.CV_RETR_CCOMP,cv.CV_CHAIN_CODE)
contour = cv.ApproxChains(chain,cv.CreateMemStorage(),cv.CV_CHAIN_APPROX_NONE,0,100,1)

img = cv.CreateMat(height,width,cv.CV_8UC3)
cv.SetZero(img)
cv.DrawContours(img,contour,(255,255,255),(0,255,0),6,1)
cv.SaveImage("contours.png",img)

img = cv.CreateMat(height,width,cv.CV_8UC3)
cv.SetZero(img)

# # create the voronoi graph
Esempio n. 13
0
 def get_equivalent_region_via_nearest(self, bld):
     equ_class_region = cv.CreateImage(
         (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
     bid = bld.bid
     cv.CmpS(self.empty_img, bid, equ_class_region, cv.CV_CMP_EQ)
     return equ_class_region
Esempio n. 14
0
import pyvision as pv
import PIL, cv
ilog = pv.ImageLog()

im = pv.Image("baboon.jpg")

# In PIL
pil = im.asPIL()
gray = pil.convert('L')
thresh = PIL.Image.eval(gray, lambda x: 255 * (x > 127.5))
ilog(pv.Image(thresh), "PILThresh")

#in Scipy
mat = im.asMatrix2D()
thresh = mat > 127.5
ilog(pv.Image(1.0 * thresh), "ScipyThresh")

#in OpenCV
cvim = im.asOpenCVBW()
dest = cv.CreateImage(im.size, cv.IPL_DEPTH_8U, 1)
cv.CmpS(cvim, 127.5, dest, cv.CV_CMP_GT)
ilog(pv.Image(dest), "OpenCVThresh")

ilog.show()
    def get_point_cloud(self):

        # Scan the scene
        col_associations = self.get_projector_line_associations()

        # Project white light onto the scene to get the intensities of each pixel (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height,
                                               self.projector_info.width,
                                               cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(
            illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height,
                                      self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection,
                                                     encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        valid_points_mask = cv.CreateMat(self.camera_info.height,
                                         self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(col_associations, -1, valid_points_mask, cv.CV_CMP_NE)

        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points,
                                                    cv.CV_32FC2)
        scanline_associations = cv.CreateMat(1, number_of_valid_points,
                                             cv.CV_32FC1)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    scanline_associations[0, i] = col_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates,
                           rectified_camera_coordinates,
                           camera_model.intrinsicMatrix(),
                           camera_model.distortionCoeffs())

        # Convert scanline numbers to plane normal vectors
        planes = self.scanline_numbers_to_planes(scanline_associations)

        camera_rays = project_pixels_to_3d_rays(rectified_camera_coordinates,
                                                camera_model)

        intersection_points = ray_plane_intersections(camera_rays, planes)

        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            if point.z < 0:
                print scanline_associations[0, i]
                print rectified_camera_coordinates[0, i]

            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)

        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg
    def get_projector_line_associations(self):
        rospy.loginfo("Scanning...")
        positives = []
        negatives = []
        for i in range(self.number_of_projection_patterns):
            positives.append(
                self.get_picture_of_projection(
                    self.predistorted_positive_projections[i]))
            negatives.append(
                self.get_picture_of_projection(
                    self.predistorted_negative_projections[i]))

        rospy.loginfo("Thresholding...")
        strike_sum = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(strike_sum)
        gray_codes = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(gray_codes)
        for i in range(self.number_of_projection_patterns):
            difference = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
            cv.Sub(positives[i], negatives[i], difference)

            absolute_difference = cv.CreateMat(self.camera_info.height,
                                               self.camera_info.width,
                                               cv.CV_8UC1)
            cv.AbsDiff(positives[i], negatives[i], absolute_difference)

            #Mark all the pixels that were "too close to call" and add them to the running total
            strike_mask = cv.CreateMat(self.camera_info.height,
                                       self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(absolute_difference, self.threshold, strike_mask,
                    cv.CV_CMP_LT)
            strikes = cv.CreateMat(self.camera_info.height,
                                   self.camera_info.width, cv.CV_32SC1)
            cv.Set(strikes, 1, strike_mask)
            cv.Add(strikes, strike_sum, strike_sum)

            #Set the corresponding bit in the gray_code
            bit_mask = cv.CreateMat(self.camera_info.height,
                                    self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(difference, 0, bit_mask, cv.CV_CMP_GT)
            bit_values = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_32SC1)
            cv.Set(bit_values, 2**i, bit_mask)
            cv.Or(bit_values, gray_codes, gray_codes)

        rospy.loginfo("Decoding...")
        # Decode every gray code into binary
        projector_line_associations = cv.CreateMat(self.camera_info.height,
                                                   self.camera_info.width,
                                                   cv.CV_32SC1)
        cv.Copy(gray_codes, projector_line_associations)
        for i in range(
                cv.CV_MAT_DEPTH(cv.GetElemType(projector_line_associations)),
                -1, -1):
            projector_line_associations_bitshifted_right = cv.CreateMat(
                self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            #Using ConvertScale with a shift of -0.5 to do integer division for bitshifting right
            cv.ConvertScale(projector_line_associations,
                            projector_line_associations_bitshifted_right,
                            (2**-(2**i)), -0.5)
            cv.Xor(projector_line_associations,
                   projector_line_associations_bitshifted_right,
                   projector_line_associations)

        rospy.loginfo("Post processing...")

        # Remove all pixels that were "too close to call" more than once
        strikeout_mask = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(strike_sum, 1, strikeout_mask, cv.CV_CMP_GT)
        cv.Set(projector_line_associations, -1, strikeout_mask)

        # Remove all pixels that don't decode to a valid scanline number
        invalid_scanline_mask = cv.CreateMat(self.camera_info.height,
                                             self.camera_info.width,
                                             cv.CV_8UC1)
        cv.InRangeS(projector_line_associations, 0, self.number_of_scanlines,
                    invalid_scanline_mask)
        cv.Not(invalid_scanline_mask, invalid_scanline_mask)
        cv.Set(projector_line_associations, -1, invalid_scanline_mask)

        self.display_scanline_associations(projector_line_associations)

        return projector_line_associations
Esempio n. 17
0
    def get_point_cloud(self):

        # Scan the scene
        pixel_associations = self.get_pixel_associations()

        # Project white light onto the scene to get the intensities of each picture (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height,
                                               self.projector_info.width,
                                               cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(
            illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height,
                                      self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection,
                                                     encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        projector_model = PinholeCameraModel()
        projector_model.fromCameraInfo(self.projector_info)

        projector_to_camera_rotation_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector,
                      projector_to_camera_rotation_matrix)

        pixel_associations_x = cv.CreateMat(self.camera_info.height,
                                            self.camera_info.width,
                                            cv.CV_32SC1)
        pixel_associations_y = cv.CreateMat(self.camera_info.height,
                                            self.camera_info.width,
                                            cv.CV_32SC1)
        cv.Split(pixel_associations, pixel_associations_x,
                 pixel_associations_y, None, None)
        valid_points_mask_x = cv.CreateMat(self.camera_info.height,
                                           self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_x, -1, valid_points_mask_x, cv.CV_CMP_NE)
        valid_points_mask_y = cv.CreateMat(self.camera_info.height,
                                           self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_y, -1, valid_points_mask_y, cv.CV_CMP_NE)
        valid_points_mask = cv.CreateMat(self.camera_info.height,
                                         self.camera_info.width, cv.CV_8UC1)
        cv.And(valid_points_mask_x, valid_points_mask_y, valid_points_mask)

        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points,
                                                    cv.CV_32FC2)
        rectified_projector_coordinates = cv.CreateMat(1,
                                                       number_of_valid_points,
                                                       cv.CV_32FC2)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    rectified_projector_coordinates[0, i] = pixel_associations[
                        row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates,
                           rectified_camera_coordinates,
                           camera_model.intrinsicMatrix(),
                           camera_model.distortionCoeffs())

        # Convert scanline numbers to pixel numbers
        cv.AddS(rectified_projector_coordinates, 0.5,
                rectified_projector_coordinates)
        cv.ConvertScale(rectified_projector_coordinates,
                        rectified_projector_coordinates,
                        self.pixels_per_scanline)

        # Rectify the projector pixels
        cv.UndistortPoints(rectified_projector_coordinates,
                           rectified_projector_coordinates,
                           projector_model.intrinsicMatrix(),
                           projector_model.distortionCoeffs())

        camera_rays = projectPixelsTo3dRays(rectified_camera_coordinates,
                                            camera_model)

        projector_rays = projectPixelsTo3dRays(rectified_projector_coordinates,
                                               projector_model)

        # Bring the projector rays into camera coordinates
        cv.Transform(projector_rays, projector_rays,
                     projector_to_camera_rotation_matrix)

        camera_centers = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC3)
        cv.SetZero(camera_centers)

        projector_centers = cv.CreateMat(1, number_of_valid_points,
                                         cv.CV_32FC3)
        cv.Set(projector_centers, self.projector_to_camera_translation_vector)

        intersection_points = line_line_intersections(camera_centers,
                                                      camera_rays,
                                                      projector_centers,
                                                      projector_rays)

        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)

        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg
Esempio n. 18
0
    def detect(self):
        self.detected = 0
        cv.Smooth(self.grey, self.dst2, cv.CV_GAUSSIAN, 3)
        cv.Laplace(self.dst2, self.d)
        cv.CmpS(self.d, 8, self.d2, cv.CV_CMP_GT)

        if self.onlyBlackCubes:
            # can also detect on black lines for improved robustness
            cv.CmpS(grey, 100, b, cv.CV_CMP_LT)
            cv.And(b, d2, d2)

        # these weights should be adaptive. We should always detect 100 lines
        if self.lastdetected > self.dects:
            self.THR = self.THR + 1

        if self.lastdetected < self.dects:
            self.THR = max(2, self.THR - 1)

        self.li = cv.HoughLines2(self.d2, cv.CreateMemStorage(),
                                 cv.CV_HOUGH_PROBABILISTIC, 1, 3.1415926 / 45,
                                 self.THR, 10, 5)

        # store angles for later
        angs = []
        for (p1, p2) in self.li:
            # cv.Line(sg,p1,p2,(0,255,0))
            a = atan2(p2[1] - p1[1], p2[0] - p1[0])
            if a < 0:
                a += pi
            angs.append(a)

        # log.info("THR %d, lastdetected %d, dects %d, houghlines %d, angles: %s" % (self.THR, self.lastdetected, self.dects, len(self.li), pformat(angs)))

        # lets look for lines that share a common end point
        t = 10
        totry = []

        for i in range(len(self.li)):
            p1, p2 = self.li[i]

            for j in range(i + 1, len(self.li)):
                q1, q2 = self.li[j]

                # test lengths are approximately consistent
                dd1 = sqrt((p2[0] - p1[0]) * (p2[0] - p1[0]) +
                           (p2[1] - p1[1]) * (p2[1] - p1[1]))
                dd2 = sqrt((q2[0] - q1[0]) * (q2[0] - q1[0]) +
                           (q2[1] - q1[1]) * (q2[1] - q1[1]))

                if max(dd1, dd2) / min(dd1, dd2) > 1.3:
                    continue

                matched = 0
                if areclose(p1, q2, t):
                    IT = (avg(p1, q2), p2, q1, dd1)
                    matched = matched + 1

                if areclose(p2, q2, t):
                    IT = (avg(p2, q2), p1, q1, dd1)
                    matched = matched + 1

                if areclose(p1, q1, t):
                    IT = (avg(p1, q1), p2, q2, dd1)
                    matched = matched + 1

                if areclose(p2, q1, t):
                    IT = (avg(p2, q1), q2, p1, dd1)
                    matched = matched + 1

                if matched == 0:
                    # not touching at corner... try also inner grid segments hypothesis?
                    self.p1 = (float(p1[0]), float(p1[1]))
                    self.p2 = (float(p2[0]), float(p2[1]))
                    self.q1 = (float(q1[0]), float(q1[1]))
                    self.q2 = (float(q2[0]), float(q2[1]))
                    success, (ua, ub), (x, y) = intersect_seg(
                        self.p1[0], self.p2[0], self.q1[0], self.q2[0],
                        self.p1[1], self.p2[1], self.q1[1], self.q2[1])

                    if success and ua > 0 and ua < 1 and ub > 0 and ub < 1:
                        # if they intersect
                        # cv.Line(sg, p1, p2, (255,255,255))
                        ok1 = 0
                        ok2 = 0

                        if abs(ua - 1.0 / 3) < 0.05:
                            ok1 = 1

                        if abs(ua - 2.0 / 3) < 0.05:
                            ok1 = 2

                        if abs(ub - 1.0 / 3) < 0.05:
                            ok2 = 1

                        if abs(ub - 2.0 / 3) < 0.05:
                            ok2 = 2

                        if ok1 > 0 and ok2 > 0:
                            # ok these are inner lines of grid
                            # flip if necessary
                            if ok1 == 2:
                                self.p1, self.p2 = self.p2, self.p1

                            if ok2 == 2:
                                self.q1, self.q2 = self.q2, self.q1

                            # both lines now go from p1->p2, q1->q2 and
                            # intersect at 1/3
                            # calculate IT
                            z1 = (self.q1[0] + 2.0 / 3 *
                                  (self.p2[0] - self.p1[0]), self.q1[1] +
                                  2.0 / 3 * (self.p2[1] - self.p1[1]))
                            z2 = (self.p1[0] + 2.0 / 3 *
                                  (self.q2[0] - self.q1[0]), self.p1[1] +
                                  2.0 / 3 * (self.q2[1] - self.q1[1]))
                            z = (self.p1[0] - 1.0 / 3 *
                                 (self.q2[0] - self.q1[0]), self.p1[1] -
                                 1.0 / 3 * (self.q2[1] - self.q1[1]))
                            IT = (z, z1, z2, dd1)
                            matched = 1

                # only single one matched!! Could be corner
                if matched == 1:

                    # also test angle
                    a1 = atan2(p2[1] - p1[1], p2[0] - p1[0])
                    a2 = atan2(q2[1] - q1[1], q2[0] - q1[0])

                    if a1 < 0:
                        a1 += pi

                    if a2 < 0:
                        a2 += pi

                    ang = abs(abs(a2 - a1) - pi / 2)

                    if ang < 0.5:
                        totry.append(IT)
                        # cv.Circle(sg, IT[0], 5, (255,255,255))

        # now check if any points in totry are consistent!
        # t=4
        res = []
        for i in range(len(totry)):

            p, p1, p2, dd = totry[i]
            a1 = atan2(p1[1] - p[1], p1[0] - p[0])
            a2 = atan2(p2[1] - p[1], p2[0] - p[0])

            if a1 < 0:
                a1 += pi

            if a2 < 0:
                a2 += pi

            dd = 1.7 * dd
            evidence = 0

            # cv.Line(sg,p,p2,(0,255,0))
            # cv.Line(sg,p,p1,(0,255,0))

            # affine transform to local coords
            A = matrix([[p2[0] - p[0], p1[0] - p[0], p[0]],
                        [p2[1] - p[1], p1[1] - p[1], p[1]], [0, 0, 1]])
            Ainv = A.I

            v = matrix([[p1[0]], [p1[1]], [1]])

            # check likelihood of this coordinate system. iterate all lines
            # and see how many align with grid
            for j in range(len(self.li)):

                # test angle consistency with either one of the two angles
                a = angs[j]
                ang1 = abs(abs(a - a1) - pi / 2)
                ang2 = abs(abs(a - a2) - pi / 2)

                if ang1 > 0.1 and ang2 > 0.1:
                    continue

                # test position consistency.
                q1, q2 = self.li[j]
                qwe = 0.06

                # test one endpoint
                v = matrix([[q1[0]], [q1[1]], [1]])
                vp = Ainv * v

                # project it
                if vp[0, 0] > 1.1 or vp[0, 0] < -0.1:
                    continue

                if vp[1, 0] > 1.1 or vp[1, 0] < -0.1:
                    continue

                if abs(vp[0, 0] - 1 / 3.0) > qwe and abs(vp[0, 0] - 2 / 3.0) > qwe and \
                        abs(vp[1, 0] - 1 / 3.0) > qwe and abs(vp[1, 0] - 2 / 3.0) > qwe:
                    continue

                # the other end point
                v = matrix([[q2[0]], [q2[1]], [1]])
                vp = Ainv * v

                if vp[0, 0] > 1.1 or vp[0, 0] < -0.1:
                    continue

                if vp[1, 0] > 1.1 or vp[1, 0] < -0.1:
                    continue

                if abs(vp[0, 0] - 1 / 3.0) > qwe and abs(vp[0, 0] - 2 / 3.0) > qwe and \
                        abs(vp[1, 0] - 1 / 3.0) > qwe and abs(vp[1, 0] - 2 / 3.0) > qwe:
                    continue

                # cv.Circle(sg, q1, 3, (255,255,0))
                # cv.Circle(sg, q2, 3, (255,255,0))
                # cv.Line(sg,q1,q2,(0,255,255))
                evidence += 1

            res.append((evidence, (p, p1, p2)))

        minch = 10000
        res.sort(reverse=True)
        # log.info("dects %s, res:\n%s" % (self.dects, pformat(res)))

        if len(res) > 0:
            minps = []
            pt = []

            # among good observations find best one that fits with last one
            for i in range(len(res)):

                if res[i][0] > 0.05 * self.dects:
                    # OK WE HAVE GRID
                    p, p1, p2 = res[i][1]
                    p3 = (p2[0] + p1[0] - p[0], p2[1] + p1[1] - p[1])

                    # cv.Line(sg,p,p1,(0,255,0),2)
                    # cv.Line(sg,p,p2,(0,255,0),2)
                    # cv.Line(sg,p2,p3,(0,255,0),2)
                    # cv.Line(sg,p3,p1,(0,255,0),2)
                    # cen=(0.5*p2[0]+0.5*p1[0],0.5*p2[1]+0.5*p1[1])
                    # cv.Circle(sg, cen, 20, (0,0,255),5)
                    # cv.Line(sg, (0,cen[1]), (320,cen[1]),(0,255,0),2)
                    # cv.Line(sg, (cen[0],0), (cen[0],240), (0,255,0),2)

                    w = [p, p1, p2, p3]
                    p3 = (self.prevface[2][0] + self.prevface[1][0] -
                          self.prevface[0][0], self.prevface[2][1] +
                          self.prevface[1][1] - self.prevface[0][1])
                    tc = (self.prevface[0], self.prevface[1], self.prevface[2],
                          p3)
                    ch = compfaces(w, tc)

                    # log.info("ch %s, minch %s" % (ch, minch))
                    if ch < minch:
                        minch = ch
                        minps = (p, p1, p2)

            # log.info("minch %d, minps:\n%s" % (minch, pformat(minps)))

            if len(minps) > 0:
                self.prevface = minps

                if minch < 10:
                    # good enough!
                    self.succ += 1
                    self.pt = self.prevface
                    self.detected = 1
                    # log.info("detected %d, succ %d" % (self.detected, self.succ))

            else:
                self.succ = 0

            # log.info("succ %d\n\n" % self.succ)

            # we matched a few times same grid
            # coincidence? I think NOT!!! Init LK tracker
            if self.succ > 2:

                # initialize features for LK
                pt = []
                for i in [1.0 / 3, 2.0 / 3]:
                    for j in [1.0 / 3, 2.0 / 3]:
                        pt.append(
                            (self.p0[0] + i * self.v1[0] + j * self.v2[0],
                             self.p0[1] + i * self.v1[1] + j * self.v2[1]))

                self.features = pt
                self.tracking = True
                self.succ = 0
                log.info("non-tracking -> tracking: succ %d" % self.succ)
Esempio n. 19
0
def findColor(image, color):
    result = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    cv.CmpS(image, color, result, cv.CV_CMP_EQ)
    return result
Esempio n. 20
0
def extractCol(image, col):
    result = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
    cv.CmpS(image, col, result, cv.CV_CMP_EQ)
    return result