Example #1
0
 def detectVisibleSide(self, image):
     imageCopy = cv.CloneImage(image)
     colorSegmenter = ColorSegmenter()
     greenSegmentation = colorSegmenter.segmentImageByColor(imageCopy, colorSegmenter.green, 3, 4)
     contours = self.__findContoursInPicture__(greenSegmentation)
     middleHeight = self.__getMiddleHeightOfImage__(imageCopy)
     contours = self.__removeContoursOnBottom__(contours, middleHeight)
     if len(contours) > 0:
         return self.WEST_SIDE
     else:
         return self.EAST_SIDE
    def search(self, queryKps, queryDescs):
        results = {}

        for samplePath in self.samplePaths:
            obj = cv2.imread(samplePath)

            # segmenter = DinoSegmenter2()
            segImage = ColorSegmenter.getMagentaBlob(obj)

            # gray = cv2.cvtColor(segImage, cv2.COLOR_BGR2GRAY)
            (kps, descs) = self.descriptor.describe(segImage)

            score = self.match(queryKps, queryDescs, kps, descs)
            results[samplePath] = score

        if len(results) > 0:
            # sort the result for having the most possible match at the first
            results = sorted([(a,b) for (b,a) in results.items() if a > 0], reverse = True)

        return results
avg2 = np.float32(f)
background = BackgroundRemoval.preprocessbackground(c, f, avg2)
_, f = c.read()
gray = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)
while True:
    _, f = c.read()
    gray = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)
    image_nobackground = BackgroundRemoval.removebackground(gray, background)
    b, g, r = cv2.split(f)
    nb = np.minimum(image_nobackground, b)
    ng = np.minimum(image_nobackground, g)
    nr = np.minimum(image_nobackground, r)
    bn, gn, rn = normalized(ng, nr, ng)
    backgroundRemovedImage = cv2.merge((nb, ng, nr))

    res = ColorSegmenter.getMagentaBlob(backgroundRemovedImage)
    objectdetection = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
    edged = cv2.Canny(objectdetection, 100, 250)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
    closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)

    contours, hierarchy = cv2.findContours(closed, 2, 1)
    areaContours = []
    res2 = copy.deepcopy(res)
    for cnt in contours:
        # Only if there are 2 contours or something
        area = cv2.contourArea(cnt)
        hull = cv2.convexHull(cnt, returnPoints=False)
        if len(hull) > 3 and len(cnt) > 3:
            defects = cv2.convexityDefects(cnt, hull)
            if defects != None:
if useSIFT:
    minMatches = 50

descriptor = DinoDescriptor(useSIFT = useSIFT)
dinoMatcher = DinoMatcher(descriptor, glob.glob(args["samples"] + "/*.png"), ratio = ratio, minMatches = minMatches, useHamming = useHamming)
dinoResultsHandler = DinoResultsHandler(db)

# capture from web cam
cap = cv2.VideoCapture(0)
while True:
    ret, queryImage = cap.read()
    if ret == True:
        queryImage = utils2.resize(queryImage, width = 1000)
        # Segment the pink area out
        segImage = ColorSegmenter.getMagentaBlob(queryImage)
        # Describe the query image
        (queryKps, queryDescs, queryKpdRaw) = descriptor.describeQuery(segImage)
        # It is really important to handle the camera idling time.
        if len(queryKps) == 0:
            print("Place The Object In The Camera!")
            cv2.imshow("Query", queryImage)
        # else let's start matching our samples to the query
        else:
            # To show the key points on query image
            kpImage = cv2.drawKeypoints(queryImage, queryKpdRaw, None)
            # let's also add the cool green box
            greenBoxImg = dinoResultsHandler.drawGreenBox(queryImage, segImage, kpImage)
            # showing the box must have a timer sleep, otherwise it will be flushed
            cv2.imshow("Query", greenBoxImg)
            time.sleep(0.025)