Exemplo n.º 1
0
def mser_ecoli():
    if is_old_cv:
        mser = cv.MSER()
    else:
        mser = cv.MSER_create(  # cv.MSER_create()
            _delta=5,
            _min_area=720,
            _max_area=9000,
            _max_variation=15.0,
            _min_diversity=10.0,
            _max_evolution=10,
            _area_threshold=12.0,
            _min_margin=2.9,
            _edge_blur_size=10)

    # img_path = 'C:\\dev\\courses\\2.131 - Advanced Instrumentation\\E.coli.tif'
    img_path = 'C:\\dev\\Holographic-Images\\Combined-Half-and Half-capture-2018-04-30-20h-52m-09s.png'
    pil_img = Image.open(img_path)

    # for i in range(149):
    #     pil_img.seek(i)
    img = np.array(pil_img)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    vis = img.copy()
    if is_old_cv:
        regions = mser.detect(gray, None)
    else:
        regions, q = mser.detectRegions(gray)

    #polylines
    hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    # cv.polylines(vis, hulls, 1, (0, 255, 0))

    #boundingboxes
    mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
    mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))

    for contour in hulls:
        cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)

    bboxes = []
    for i, contour in enumerate(hulls):
        x, y, w, h = cv2.boundingRect(contour)
        bboxes.append(cv2.boundingRect(contour))
        #cv2.rectangle(vis, (x, y), (x + w, y + h), (0, 255, 0), 2)

    for i in range(len(bboxes)):
        j = i + 1
        while j < len(bboxes):
            if intersection(bboxes[i], bboxes[j]) > 0:
                break
            else:
                j = j + 1
        if j == len(bboxes):
            x, y, w, h = bboxes[i]
            cv2.rectangle(vis, (x, y), (x + w, y + h), (0, 0, 255), 2)

    return img, vis, bboxes
    cv2.imshow('img', vis)
    cv2.waitKey()
 def detect(self, img):
     gray = self._to_gray(img)
     mser = cv2.MSER(_delta=1)
     regions = mser.detect(gray, None)
     bounding_boxes = self._get_boxes(regions)
     regions = Regions(img, bounding_boxes)
     return regions
Exemplo n.º 3
0
Arquivo: algo.py Projeto: sorki/pllm
def mser(img,
         delta=3,
         min_area=500,
         max_area=35000,
         max_variation=0.1,
         min_diversity=0.5):
    """
    Find image segments using MSER (Maximally stable extremal regions)
    algorithm.

    Returns list of (x, y, w, h) tuples of detected segments.
    """

    mser = cv2.MSER(delta, min_area, max_area, max_variation, min_diversity)

    regions = mser.detect(img)

    hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]

    segs = []
    for s in hulls:
        x, y, w, h = cv2.boundingRect(s)
        segs.append((x, y, w, h))

    return segs
Exemplo n.º 4
0
    def detectRegion(self):
        if '3.0' in cv2.__version__:
            msers = cv2.MSER_create().detectRegions(self.image, None)
        elif '2.4' in cv2.__version__:
            msers = cv2.MSER().detect(self.image, None)
        """ MSER + α の候補を格納"""
        BBs = []
        for bb in msers:
            pt1 = tuple(np.min(bb, 0))
            pt2 = tuple(np.max(bb, 0))
            bb_width = np.abs(pt2[0] - pt1[0])
            bb_height = np.abs(pt2[1] - pt1[1])
            #BBs.append(BB(pt1[0], pt1[1], bb_width, bb_height))
            for pad in (10, 20, 30, 35):
                BBs.append(
                    BB(pt1[0] - pad, pt1[1] - pad, bb_width + pad * 2,
                       bb_height + pad * 2))
                BBs.append(
                    BB(pt1[0] - pad, pt2[1] - pad, bb_width + pad * 2,
                       bb_width + pad * 2))
                BBs.append(
                    BB(pt1[0] - pad, pt1[1] - pad - bb_width,
                       bb_width + pad * 2, bb_width + pad * 2))
                BBs.append(
                    BB(pt1[0] - pad, pt1[1] - pad, bb_height + pad * 2,
                       bb_height + pad * 2))
                BBs.append(
                    BB(pt1[0] - bb_height - pad, pt1[1] - pad,
                       bb_height + pad * 2, bb_height + pad * 2))

        return BBs
Exemplo n.º 5
0
    def detect_features(self, needle, haystack):
        """
        In-house feature detection algorithm.

        :param needle: image to look for
        :type needle: :py:class:`target.Image`
        :param haystack: image to look in
        :type haystack: :py:class:`target.Image`

        .. warning:: This method is currently not fully implemented. The current
                     MSER might not be used in the actual implementation.
        """
        import cv2
        import numpy
        opencv_haystack = numpy.array(haystack.pil_image)
        opencv_needle = numpy.array(needle.pil_image)
        hgray = cv2.cvtColor(numpy.array(haystack.pil_image),
                             cv2.COLOR_RGB2GRAY)
        ngray = cv2.cvtColor(numpy.array(needle.pil_image), cv2.COLOR_RGB2GRAY)

        # TODO: this MSER blob feature detector is also available in
        # version 2.2.3 - implement if necessary
        detector = cv2.MSER()
        hregions = detector.detect(hgray, None)
        nregions = detector.detect(ngray, None)
        hhulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in hregions]
        nhulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in nregions]
        # show on final result
        cv2.polylines(opencv_haystack, hhulls, 1, (0, 255, 0))
        cv2.polylines(opencv_needle, nhulls, 1, (0, 255, 0))
Exemplo n.º 6
0
def findApples1(size, scans):
    "try to find an apple(s) of given size"
    orig = np.array(scans).T
    img = scans2img(scans)
    print img.shape, img.dtype
    #    cv2.threshold( img, 128, 255, cv2.THRESH_BINARY )
    g_mser = cv2.MSER(_delta=8, _min_area=100, _max_area=30 * 20)
    gray = img.T
    frame = cv2.cvtColor(img.T, cv2.COLOR_GRAY2BGR)
    contours = g_mser.detect(gray, None)

    ret = []
    for cnt in contours:
        (x1, y1), (x2, y2) = np.amin(cnt, axis=0), np.amax(cnt, axis=0)
        if abs((x2 - x1) * MOTION_STEP_X - size) < 0.01:
            print(x2 - x1) * MOTION_STEP_X, (x1, y1), (x2, y2)
            box = np.int0([(x1, y1), (x2, y1), (x2, y2), (x1, y2)])
            cv2.drawContours(frame, [box], 0, (255, 0, 0), 2)
            if isItApple(orig[y1:y2, x1:x2]):
                ret.append(((x1, y1), (x2, y2)))
                cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)

    cv2.imshow(
        'image',
        frame)  # transposed matrix corresponds to "what we are used to" view
    cv2.imwrite("tmp.png", frame)
    cv2.waitKey(0)
    return ret
Exemplo n.º 7
0
def msSift(img_name, show = False):
    # Extract local interest points with the MSER detector
    # and describe the extracted points by SIFT descriptor
    # img_name: string that stores relative of absolute path to input image
    # img_name eg '.\\Groundhog day\\I_00060.jpg'

    img = cv2.imread(img_name)
    if show:
        cv2.namedWindow('img')
        cv2.imshow('img', img)
        cv2.waitKey(200)
        cv2.destroyWindow('img')
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # mser_region is a list
    # its elements are nparrays containing coordinates of it pixels
    mser = cv2.MSER()
    fd = cv2.FeatureDetector_create('MSER')
    # kpts is a list
    kp = fd.detect(gray)

    sift = cv2.SIFT()
    # des is a numpy array of shape (num_of_kp, 128)
    # compute SIFT features at places determined by kp
    kp, des = sift.compute(gray, kp)

    return gray, kp, des
Exemplo n.º 8
0
def findBlobsMSER(img):
    mser = cv2.MSER()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    regions = mser.detect(gray, None)
    hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    return hulls
Exemplo n.º 9
0
def regression_test_frame(idx, image_array, image_feature, memory_opt,
                          mser_opts, classifier, features_mask,
                          min_dot_distance):
    image_feature.update_features(image_array, idx, memory_opt)

    # get candidate points fidxfin the image from MSER
    red_channel = image_array[:, :, 0]
    red_channel = cv2.equalizeHist(red_channel)  # equalizes the histogram

    mser = cv2.MSER(mser_opts[0],
                    _min_area=mser_opts[1],
                    _max_area=mser_opts[2])
    regions = mser.detect(red_channel)

    candidate_points = set()
    for r in regions:
        for point in r:
            candidate_points.add(tuple(point))

    candidate_points = list(candidate_points)

    X = np.zeros((len(candidate_points), sum(features_mask)))

    # for each candidate point, extract the features and build test matrix
    for i in xrange(len(candidate_points)):
        cp = candidate_points[i]
        feats = image_feature.extractFeatsFromPoint((cp[1], cp[0]),
                                                    features_mask)
        X[i, :] = feats

    # test the vector
    predictions = classifier.predict(X)
    # print "Predictions:: ", predictions

    probabilities = np.zeros((512, 512))

    index = 0
    for i in candidate_points:
        threshold_value = 100
        if (predictions[index] < threshold_value):

            #convert prediction value into probabilty score map
            alpha = 1
            proximity_score = math.exp(
                alpha * (1 - (predictions[index] / threshold_value))) - 1
            print proximity_score
            probabilities[i[1], i[0]] = proximity_score
        index += 1

    # dot detector
    dd = Dd.DotDetect(probabilities)
    dots = dd.detect_dots(min_dot_distance)
    del (dd, mser)

    print("Frame " + str(idx) + " has been tested.")

    # return the dots and probabilities image
    # it also returns the updated image feature dictionary
    return idx, dots, probabilities, image_feature.feats_dictionary
Exemplo n.º 10
0
def MSER(image):
    im = image.copy()
    h, w = im.shape
    mser = cv2.MSER()
    regions = mser.detect(im, None)
    MSER_mask = np.zeros_like(im)
    for r in regions:
        MSER_mask[r[:, 1], r[:, 0]] = 255
    return MSER_mask
Exemplo n.º 11
0
 def detect(self, frame, u_roi, visualize=False):
     #get the user_roi
     img = frame.img
     # r_img = img[u_roi.lY:u_roi.uY,u_roi.lX:u_roi.uX]
     debug = True
     PARAMS = {
         '_delta': 10,
         '_min_area': 2000,
         '_max_area': 10000,
         '_max_variation': .25,
         '_min_diversity': .2,
         '_max_evolution': 200,
         '_area_threshold': 1.01,
         '_min_margin': .003,
         '_edge_blur_size': 7
     }
     pupil_intensity = 150
     pupil_ratio = 2
     mser = cv2.MSER(**PARAMS)
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     regions = mser.detect(gray, None)
     hulls = []
     # Select most circular hull
     for region in regions:
         h = cv2.convexHull(region.reshape(-1, 1, 2)).reshape((-1, 2))
         cv2.drawContours(frame.img, [h], -1, (255, 0, 0))
         hc = h - np.mean(h, 0)
         _, s, _ = np.linalg.svd(hc)
         r = s[0] / s[1]
         if r > pupil_ratio:
             logger.debug('Skipping ratio %f > %f' % (r, pupil_ratio))
             continue
         mval = np.median(gray.flat[np.dot(region,
                                           np.array([1, img.shape[1]]))])
         if mval > pupil_intensity:
             logger.debug('Skipping intensity %f > %f' %
                          (mval, pupil_intensity))
             continue
         logger.debug('Kept: Area[%f] Intensity[%f] Ratio[%f]' %
                      (region.shape[0], mval, r))
         hulls.append((r, region, h))
     if hulls:
         hulls.sort()
         gaze = np.round(np.mean(hulls[0][2].reshape((-1, 2)),
                                 0)).astype(np.int).tolist()
         logger.debug('Gaze[%d,%d]' % (gaze[0], gaze[1]))
         norm_pupil = normalize((gaze[0], gaze[1]),
                                (img.shape[1], img.shape[0]),
                                flip_y=True)
         return {
             'norm_pupil': norm_pupil,
             'timestamp': frame.timestamp,
             'center': (gaze[0], gaze[1])
         }
     else:
         return {'norm_pupil': None, 'timestamp': frame.timestamp}
Exemplo n.º 12
0
    def __init__(self, image):
        """Initialize parameters, and store the first image. """

        self.image = image
        self.features = []
        self.tracks = []
        self.track_len = 10
        self.current_frame = 0
        self.interval = 5
        self.mser = cv2.MSER()
Exemplo n.º 13
0
def mser_detect(img, x_len, y_len):
    utils.update_progress('Detecting Regions')

    min_t = int(math.floor((y_len * x_len) * 0.0009))
    max_t = int(math.floor((y_len * x_len) * 0.05))

    #MSER(5, 60, 14400, 0.25, 0.2, 200, 1.01, 0.003, 5)  <- Default Values
    c_mser = cv2.MSER(5, min_t, max_t, 0.166, 0.153, 90, 1.001, 0.003, 5)
    c_regions = c_mser.detect(img, None)
    return [cv2.convexHull(p.reshape(-1, 1, 2)) for p in c_regions]
Exemplo n.º 14
0
def mser_feature(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    (major, minor, subminor) = (cv2.__version__).split('.')
    if int(major) < 3:
        mser = cv2.MSER()
        regions = mser.detect(img, None)
    else:
        mser = cv2.MSER_create()
        regions = mser.detectRegions(img, None)
    return len(regions)
Exemplo n.º 15
0
    def _create_detector(self):
        detector = cv2.MSER(_delta=self._delta,
                            _min_area=self._min_area,
                            _max_area=self._max_area,
                            _max_variation=self._max_variation,
                            _min_diversity=self._min_diversity,
                            _max_evolution=self._max_evolution,
                            _area_threshold=self._area_threshold,
                            _min_margin=self._min_margin,
                            _edge_blur_size=self._edge_blur_size)

        return detector
Exemplo n.º 16
0
def mser_ecoli2(img, vis, bboxes1):
    if is_old_cv:
        mser = cv.MSER()
    else:
        mser = cv.MSER_create(  # cv.MSER_create()
            _delta=4,
            _min_area=500,
            _max_area=2000,
            _max_variation=15.0,
            _min_diversity=10.0,
            _max_evolution=10,
            _area_threshold=12.0,
            _min_margin=2.9,
            _edge_blur_size=10)

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    if is_old_cv:
        regions = mser.detect(gray, None)
    else:
        regions, q = mser.detectRegions(gray)

    #polylines
    hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    # cv.polylines(vis, hulls, 1, (0, 255, 0))

    #boundingboxes
    mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
    mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))

    for contour in hulls:
        cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)

    bboxes = []
    for i, contour in enumerate(hulls):
        x, y, w, h = cv2.boundingRect(contour)
        bboxes.append(cv2.boundingRect(contour))
        #cv2.rectangle(vis, (x, y), (x + w, y + h), (0, 255, 0), 2)

    bboxesAll = bboxes + bboxes1
    for i in range(len(bboxes)):
        j = i + 1
        while j < len(bboxesAll):
            if intersection(bboxes[i], bboxesAll[j]) > 0:
                break
            else:
                j = j + 1
        if j == len(bboxesAll):
            x, y, w, h = bboxes[i]
            cv2.rectangle(vis, (x, y), (x + w, y + h), (0, 255, 0), 2)

    cv2.imshow('img', vis)
    cv2.imwrite('classified.jpg', vis)
    cv2.waitKey()
Exemplo n.º 17
0
def get_txt(path):
    img = cv2.imread(path)
    mser = cv2.MSER(9, int(0.000 * img.size / 3), int(0.05 * img.size / 3),
                    0.1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Converting to GrayScale
    regions = mser.detect(gray, None)
    x = img.shape
    im = np.zeros((int(x[0]), int(x[1])), np.uint8)
    for region in regions:
        for i in range(0, len(region)):
            im[region[i][1]][region[i][0]] = 255
    return im
Exemplo n.º 18
0
    def __init__(self, image):
        """Initialize parameters, and store the first image. """

        self.image = image
        self.features = []
        self.tracks = []
        self.track_len = 10
        self.current_frame = 0
        self.interval = 5
        self.mser = cv2.MSER()
        self.cvh = cv_gpu.GPU() if use_gpu else cv2
        self.pool = Pool()
Exemplo n.º 19
0
def image2mser(greyim):
        ''' image2mser(greyim)

        convert greyscale image into mser regions
        input: 
                greyim: number array, grey scale image
        Return:
                numpy array contains label image'''
        mserimg = numpy.zeros(greyim.shape)
        mser = cv2.MSER()
        mser_areas = mser.detect(greyim, None)
        for i, m in enumerate(mser_areas):
                mserimg[m[:, 1], m[:, 0]] = i
        return mserimg
Exemplo n.º 20
0
def processFrame( frame, debug=False ):
  result = []
  global g_mser
  global THRESHOLD_FRACTION
  if g_mser == None:
    g_mser = cv2.MSER( _delta = 10, _min_area=100, _max_area=300*50*2 )
  gray = cv2.cvtColor( frame, cv2.COLOR_BGR2GRAY )
  if g_mser:
    contours = g_mser.detect(gray, None)
    if cv2.__version__ == "2.4.2":
      contours = arrayTo3d( contours ) # Jakub's workaround for 2.4.2 on linux
  else:
    histogram = cv2.calcHist([gray],[0],None,[256],[0,256])
    s = 0
    for i, h in enumerate(histogram):
      s += h
      if s > THRESHOLD_FRACTION * 640 * 360:
        break
    ret, binary = cv2.threshold( gray, i, 255, cv2.THRESH_BINARY )
#    ret, binary = cv2.threshold( gray, 0, 255, cv2.THRESH_OTSU )
    contours, hierarchy = cv2.findContours( binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
  for cnt in contours:
    if g_mser == None:
      area = cv2.contourArea(cnt, oriented=True)
    if g_mser != None or (area > 100 and area < 100000):
      rect = cv2.minAreaRect(cnt)
      if g_mser == None or len(cnt)/float(rect[1][0]*rect[1][1]) > 0.70:
        result.append( rect )
  if g_mser != None:
    result = removeDuplicities( result )
  if debug:
    if g_mser == None:
      cv2.drawContours(frame, contours, -1, (0,255,0), 3)
    for rect in result:
      box = cv2.cv.BoxPoints(rect)
      box = np.int0(box)
      cv2.drawContours( frame,[box],0,(255,0,0),2)
  result = filterRectangles([((int(x),int(y)),(int(w),int(h)),int(a)) for ((x,y),(w,h),a) in result], minWidth=150/2)
  if debug:
    for rect in result:
      box = cv2.cv.BoxPoints(rect)
      box = np.int0(box)
      cv2.drawContours( frame,[box],0,(0,0,255),2)
    cv2.imshow('image', frame)
    # save image for simpler results review, angle is used as hash for search sub-sequence
    if g_filename:
      cv2.imwrite( g_filename, frame )
  return result
Exemplo n.º 21
0
            def mserCompHist(img1, img2):
                mser = cv2.MSER()

                gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
                gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
                vis1 = img1.copy()
                vis2 = img2.copy()
                regions1 = mser.detect(gray1)
                hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions1]
                cv2.polylines(vis1, hulls, 1, (0, 255, 0))
                mask = np.zeros(gray1.shape, np.uint8)
                keypoints1 = []
                for hull in hulls:
                    (x, y), radius = cv2.minEnclosingCircle(hull)
                    center = (int(x), int(y))
                    radius = int(radius)
                    # cv2.circle(vis1, center, radius, (255, 0, 0), 2)
                    kp = cv2.KeyPoint()
                    kp.pt = center
                    kp.size = 2 * radius
                    keypoints1.append(kp)
                    cv2.drawContours(mask, [hull], 0, 255, -1)
                mask1 = cv2.bitwise_not(mask)
                masked_img1 = cv2.bitwise_and(gray1, gray1, mask=mask1)
                hist1 = cv2.calcHist([masked_img1], [0], None, [256], [0, 256])
                regions2 = mser.detect(gray2)
                hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions2]
                cv2.polylines(vis2, hulls, 1, (0, 255, 0))
                mask = np.zeros(gray2.shape, np.uint8)
                keypoints2 = []
                for hull in hulls:
                    (x, y), radius = cv2.minEnclosingCircle(hull)
                    center = (int(x), int(y))
                    radius = int(radius)
                    # cv2.circle(vis2, center, radius, (255, 0, 0), 2)
                    kp = cv2.KeyPoint()
                    kp.pt = center
                    kp.size = 2 * radius
                    keypoints2.append(kp)
                    cv2.drawContours(mask, [hull], 0, 255, -1)
                mask2 = cv2.bitwise_not(mask)
                masked_img2 = cv2.bitwise_and(gray2, gray2, mask=mask2)
                hist2 = cv2.calcHist([masked_img2], [0], None, [256], [0, 256])
                histSim = cv2.compareHist(hist1, hist2,
                                          cv2.cv.CV_COMP_INTERSECT)
                norm_histSim = histSim / (gray1.shape[0] * gray1.shape[1])
                return norm_histSim
Exemplo n.º 22
0
def detectRoundel(frame, debug=False):
    global g_mser
    global THRESHOLD_FRACTION
    if g_mser == None:
        g_mser = cv2.MSER(_delta=10, _min_area=100, _max_area=300 * 50 * 2)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    contours = g_mser.detect(gray, None)
    rectangles = []
    circles = []
    for cnt in contours:
        rect = cv2.minAreaRect(cnt)
        area = len(
            cnt)  # MSER returns all points within area, not boundary points
        rectangleArea = float(rect[1][0] * rect[1][1])
        rectangleAspect = max(rect[1][0], rect[1][1]) / float(
            min(rect[1][0], rect[1][1]))
        if area / rectangleArea > 0.70 and rectangleAspect > 3.0:
            (x, y), (w, h), angle = rect
            rectangles.append(
                ((int(x + 0.5), int(y + 0.5)), (int(w + 0.5), int(h + 0.5)),
                 int(angle)))
        cir = cv2.minEnclosingCircle(cnt)
        (x, y), radius = cir
        circleArea = math.pi * radius * radius
        if area / circleArea > 0.64:
            circles.append(((int(x + 0.5), int(y + 0.5)), int(radius + 0.5)))
    rectangles = removeDuplicities(rectangles)
    result = matchCircRect(circles=circles, rectangles=rectangles)
    if debug:
        for rect in rectangles:
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
            cv2.drawContours(frame, [box], 0, (255, 0, 0), 2)
        for cir in circles:
            (x, y), radius = cir
            center = (int(x), int(y))
            radius = int(radius)
            cv2.circle(frame, center, radius, (0, 255, 0), 2)
        if result:
            (x1, y1), (x2, y2) = result
            cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),
                     (0, 0, 255), 3)
    return result
Exemplo n.º 23
0
def mesr_text(img,
              del_text_path,
              image_cnt,
              words_name,
              y_value=10,
              x_value=30):
    """
    分水岭算法
    :param deep:
    :param x_value:
    :param img:
    :param del_text_path:
    :param image_cnt:
    :param words_name:
    :param a_value:
    :return:
    """
    # B, G, R = cv2.split(img)
    mser = cv2.MSER()
    regions = mser.detect(img)
    hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
    """寻找最大区域"""
    for index, hull in enumerate(hulls):
        x_min = 256
        x_max = -1
        y_min = 256
        y_max = -1
        for point in hull:
            x_min = int_min(x_min, point[0][0])
            x_max = int_max(x_max, point[0][0])
            y_min = int_min(y_min, point[0][1])
            y_max = int_max(y_max, point[0][1])
        tmp = img[y_min:y_max, x_min:x_max]
        sub_path = os.path.join(del_text_path, 'words_' + str(image_cnt))
        if make_dir(sub_path):
            final_path = os.path.join(sub_path, words_name.split('.')[0])
            if make_dir(final_path):
                cv2.imwrite(
                    os.path.join(
                        final_path,
                        words_name.split('.')[0] + '_' + str(index) + '.' +
                        words_name.split('.')[1]), tmp)
Exemplo n.º 24
0
def computeMSER(grayImg):
    mser = cv2.MSER(1, 20, 200000, 1, 0.01, 200, 1.01, 0.003, 5)
    regions = mser.detect(grayImg)
    regionNum = len(regions)
    boxes = np.zeros((regionNum, 4))
    minAreaBoxes = np.zeros((regionNum, 8))

    for i in range(0, regionNum):
        region = np.array(regions[i])
        minXY = np.min(region, 0)
        maxXY = np.max(region, 0)
        boxes[i, :] = [
            minXY[0] + 1, minXY[1] + 1, maxXY[0] - minXY[0] + 1,
            maxXY[1] - minXY[1] + 1
        ]
        minAreaBox = np.int0(cv2.cv.BoxPoints(cv2.minAreaRect(regions[i])))
        minAreaBox = np.reshape(minAreaBox, (8))
        minAreaBoxes[i, :] = minAreaBox

    return boxes, minAreaBoxes
Exemplo n.º 25
0
def find_contours_MSER(img, minsize, maxsize, find_characters, margins):
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#
#   DEFAULTS
#     CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400,
#           double _max_variation=0.25, double _min_diversity=.2,
#           int _max_evolution=200, double _area_threshold=1.01,
#           double _min_margin=0.003, int _edge_blur_size=5 );
	delta = 5
	minArea = minsize
	maxArea = maxsize
	maxVariation = 0.1
	minDiversity = 0.1
	maxEvolution = 200
	areaThreshold = 1.01
	minMargin = 0.003
	edgeBlurSize = 5
	mser = cv2.MSER(delta, minArea, maxArea, maxVariation, minDiversity, maxEvolution, areaThreshold, minMargin, edgeBlurSize)

	contours = mser.detect(gray, None)
	buttons, stats = process_contours(contours, minsize, maxsize, img, "gray -> MSER", find_characters, margins)
	return buttons
Exemplo n.º 26
0
    def try_mser(self):
        delta = self.delta_scale.get()
        min_area = self.min_area_scale.get()
        max_area = self.max_area_scale.get()

        image = self.mser_area
        red_c = image[:,:,0]
        red_c = cv2.equalizeHist(red_c)

        det_img = image.copy()

        mser = cv2.MSER(delta, _min_area=min_area, _max_area=max_area)
        regions = mser.detect(red_c)
        cp = list()
        new_c = np.zeros(self.mser_area.shape, dtype=np.uint8)
        for r in regions:
            for point in r:
                cp.append(point)
                det_img[point[1], point[0], 0] = 0
                det_img[point[1], point[0], 1] = 0
                det_img[point[1], point[0], 2] = 204
                #new_c[point[1], point[0]] = 255

        self.update_mser_image(det_img)
Exemplo n.º 27
0
    def test_mser(self):

        img = self.get_sample('cv/mser/puzzle.png', 0)
        smallImg = [[
            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
            255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255
        ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255,
                        255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255, 255, 255,
                        255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 255, 255, 255,
                        255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255,
                        255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 0, 0, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 0, 0, 255, 255, 255,
                        255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ],
                    [
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
                        255, 255, 255, 255
                    ]]
        thresharr = [0, 70, 120, 180, 255]
        kDelta = 5
        np.random.seed(10)

        for i in range(100):

            use_big_image = int(np.random.rand(1, 1) * 7) != 0
            invert = int(np.random.rand(1, 1) * 2) != 0
            binarize = int(np.random.rand(1, 1) *
                           5) != 0 if use_big_image else False
            blur = True  #int(np.random.rand(1,1)*2) != 0 #binarized images are processed incorrectly
            thresh = thresharr[int(np.random.rand(1, 1) * 5)]
            src0 = img if use_big_image else np.array(smallImg).astype('uint8')
            src = src0.copy()

            kMinArea = 256 if use_big_image else 10
            kMaxArea = int(src.shape[0] * src.shape[1] / 4)

            mserExtractor = cv2.MSER(kDelta, kMinArea, kMaxArea)
            if invert:
                cv2.bitwise_not(src, src)
            if binarize:
                _, src = cv2.threshold(src, thresh, 255, cv2.THRESH_BINARY)
            if blur:
                src = cv2.GaussianBlur(src, (5, 5), 1.5, 1.5)
            minRegs = 7 if use_big_image else 2
            maxRegs = 1000 if use_big_image else 15
            if binarize and (thresh == 0 or thresh == 255):
                minRegs = maxRegs = 0
            msers = mserExtractor.detect(src)
            nmsers = len(msers)
            self.assertLessEqual(minRegs, nmsers)
            self.assertGreaterEqual(maxRegs, nmsers)
Exemplo n.º 28
0
def processImage(img, altitude, lon, lat, groll, gpitch, gyaw, resize_factor = 4, isActive = False, toBlur = 0, mser_params = {'_delta' : 4, '_min_area' : 70, '_max_area' : 87616, '_max_variation' : 0.1, '_min_diversity' : 0.2, '_max_evolution' : 200, '_area_threshold' : 1.0, '_min_margin' : 0.003, '_edge_blur_size' : 5}, downsample = 2):
    # Runs mser on input image, stores results to DB
    debug = False
    starttime = time()
    #Load image 
    try:
        # Change exif orientation flag to '1' (Horizontal)
        fixExif(img)
        frameOrig = cv2.imread(img, cv2.CV_LOAD_IMAGE_COLOR)
        
        mser_params['_delta'] = 10
        mser_params['_min_area'] = 578/(downsample**2)
        mser_params['_max_area'] = 87616/(downsample**2)
         
        mser_params['_max_variation'] =  0.1
        mser_params['_min_diversity'] =  0.4 
        mser_params['_area_threshold'] =  1.0
        mser_params['_min_margin'] =  0.003
        
        newshape = (frameOrig.shape[1]/downsample, frameOrig.shape[0]/downsample)
        frame = cv2.resize(frameOrig,newshape)

    except Exception as e:
        print "unable to open file: ",e
        print traceback.format_exc()
        return 0

    
    start = datetime.datetime.now()
    db = MySQLdb.connect(host = auvsiDB.host, user = auvsiDB.user, passwd = auvsiDB.passwd, db = auvsiDB.db)
    cur = db.cursor()
    
    # Get image ID from the DB
    sql = "INSERT INTO `{}`.`images` (`id`, `time`, `milisec`, `alt`, `lon`, `lat`, `groll`, `gpitch`, `gyaw`, `src`, `sent`)".format(auvsiDB.db)
    sql = sql + "VALUES (NULL, '{}',  MICROSECOND('{}'), '{}', '{}', '{}', '{}', '{}', '{}', '{}', 'false');".format(str(start), str(start), altitude, lon, lat, groll, gpitch, gyaw,str(img))
    #print sql
    cur.execute(sql)
    iidx = cur.lastrowid
    firstDB = time()
    
    # Creates a new frame to be resized and sent to the ground
    shape = (frameOrig.shape[1]/resize_factor, frameOrig.shape[0]/resize_factor)
    resizeImage(frameOrig, shape, img_dir, img_prefix, iidx, ext)
    
    # Runs MSER if isActive is set to true, otherwise resize and sends the picture
    areas = list()
    newRects = list()
    
    # Runs MSER
    if isActive == True:
        
        if toBlur == 0:
            frame2 = frame
        else:
            frame2 = cv2.medianBlur(frame, 7)
        mser_channel = genMserChannel(frame2, alpha = 0.2, beta = 20)
        if debug == True: cv2.imwrite("{}{}{}_mser.{}".format(img_dir, img_prefix, iidx, ext), mser_channel)
        
        mser = cv2.MSER(**mser_params)
        areas = mser.detect(mser_channel)
        newRects = mserToRects(areas, maxX = frame.shape[1], maxY = frame.shape[0])
        
    # Creates a copy of the image to draw areas created by MSER
    if debug == True:
        tmpframe = frame.copy()        
        hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in areas]
        cv2.polylines(tmpframe, hulls, 1, (0, 255, 0), thickness=1)

    cropnum = 0
    w = 181 # w = int(128*math.sqrt(2)) = 181
    # Insert targets to DB
    for targ in newRects:
        y_min = targ[1]*downsample
        y_max = (targ[1] + targ[3])*downsample
        x_min = targ[0]*downsample
        x_max = (targ[0] + targ[2])*downsample
        
        if debug == True: cv2.rectangle(tmpframe, (targ[0], targ[1]), (targ[0]+targ[2], targ[1]+targ[3]), (0, 0, 255), thickness=1)
        cropped  = frameOrig[(y_min):(y_max), (x_min):(x_max)]
        if np.std(cropped) > 4:
            resized = cv2.resize(cropped, (w,w))
            cv2.imwrite("{}{}{}_{}.{}".format(crop_dir, crop_prefix, iidx, cropnum, crop_ext), resized)
    
            sql = "INSERT INTO `{}`.`crops` (`fatherid`, `id`, `c1x`, `c1y`, `c2x`, `c2y`, `c3x`, `c3y`, `c4x`, `c4y`) VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}');".format(auvsiDB.db, iidx, cropnum, x_min, y_min, x_max, y_min, x_max, y_max, x_min, y_max)
            cur.execute(sql)
            cropnum+=1
    
    db.commit()
    cur.close()
    db.close()
            
    if debug == True: cv2.imwrite("{}{}{}_debug.{}".format(img_dir, img_prefix, iidx, ext), tmpframe)
    print "Ended: ",img, "\ttook: ", time()-starttime,"\ttargets: ",cropnum
Exemplo n.º 29
0
        kernel_size = cv2.getTrackbarPos(tb_kernel_size, window) * 2 + 1
        block_size = cv2.getTrackbarPos(tb_block_size, window) * 2 + 1
        threshold = cv2.getTrackbarPos(tb_threshold, window) / 100.0

        if image_switch != last_image_switch:
            if image_switch in star_detectors:
                candidate_finder.setDetector(star_detectors[image_switch])
                candidate_finder.setImage(image)
            last_image_switch = image_switch

        if image_switch == 0:
            result = image
        elif image_switch == 3:
            mser = None
            if int(cv2.__version__.split('.')[0]) == 2:
                mser = cv2.MSER(1, 1, 30)
            else:
                mser = cv2.MSER_create(1, 1, 30)
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            msers = mser.detect(gray)
            result = image.copy()
            cv2.polylines(result, msers, True, (0, 0, 255))
        elif image_switch == 4:
            result = image.copy()

            candidate_finder.drawCandidates(result)
        elif image_switch == 5:
            Configuration.surf_threshold = threshold * 100

            candidate_finder.setImage(image)
Exemplo n.º 30
0
import cv2
img = cv2.imread('docking_side.jpg', cv2.CV_LOAD_IMAGE_GRAYSCALE)
mser = cv2.MSER()
mser_areas = mser.detect(img)
for mser_region in mser_areas:
    ellipse = cv2.fitEllipse(mser_region)
    cv2.ellipse(img, ellipse, (0, 255, 0), 2)

cv2.imshow("img", img)
cv2.waitKey(0)