Ejemplo n.º 1
0
def completeClassification(image, svg_file_path):
    # This function classifies the setch strokes according to the allocated stamps
    draw.showImage(image)
    # Detecting symbols
    predictions = sd.detectSymbols(image,
                                   symbolType='stamp',
                                   method='t',
                                   filterColor='red')
    #print("predictions",predictions)
    beacons = sd.beaconCoordinates(predictions)
    #print(beacons)
    #if len(beacons)>0:
    beaconsSVG = getBeacons(beacons, image)
    # Here we get rid of the symbols, clean and segmantate the sketch to obtain a clean image,
    # Contours are then extracted from this clean image
    contours = ss.segmentateSketch(image, 'skeleton')  #

    # To provide a better overview of the extracted contours we display the five larger ones one by one
    #draw.drawContoursOneByOne(contours, image, numberOfContours=5)

    # We classify the contours based on their distances with respect to the detected symbols
    strokeList = strokeClassification(image, contours, predictions, knn=1)
    draw.drawHandStrokes(image, strokeList, oneByOne=False)

    strokesToSVG(strokeList, beaconsSVG, image, svg_file_path)

    return strokeList
Ejemplo n.º 2
0
def segmentateSketch(image, method='skeleton'):
    # Returns the main contours inside an image. In case of selecting the skeleton method
    # all the sketch strokes are transformed into one pixel width lines
    imageClone = image.copy()
    draw.showImage(imageClone)
    imageWithoutStamps = cleanSymbols(imageClone,
                                      colorSpace='hsv',
                                      color='red')
    #draw.showImage(imageWithoutStamps)
    contours = getContours(imageWithoutStamps, method)
    return contours
Ejemplo n.º 3
0
def getSkeleton(image):
    # Applies a skeletonization algorithm, thus, transforming all the strokes inside the sketch into one pixel width lines

    grayscaled = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #grayscaled = cv2.medianBlur(grayscaled, 5)
    ret, threshold = cv2.threshold(grayscaled, 200, 255, cv2.THRESH_BINARY)
    draw.showImage(threshold)
    threshold = cv2.bitwise_not(threshold)
    threshold[threshold == 255] = 1
    skeleton = skeletonize(threshold)
    skeleton = img_as_ubyte(skeleton)
    return skeleton
Ejemplo n.º 4
0
import cv2
from sketchProcessor.helperLibraries.utils import draw
from sketchProcessor.helperLibraries.utils import simple_obj_det as sd
from sketchProcessor.helperLibraries.utils import sketch_segmentation as ss
from sketchProcessor.helperLibraries.utils import contour_classification as cc

image = cv2.imread('./testData/sketchMaps/stamps/GoodOne.bmp')
draw.showImage(image)
svg = cc.completeClassification(image, fileName='preprocessedSketchMap50')

image = cv2.imread('./testData/sketchMaps/stamps/s0.bmp')
cc.completeClassification(image, fileName='hans')
svg = cc.completeClassification(image, fileName='preprocessedSketchMap30')
Ejemplo n.º 5
0
def regionDetectionColor(image, filter='hsv', color='red', display=False):
    # Retrieves bounding boxes that surround the image objects that have a red or a dark blue color (usually the stamps)
    hsvMin = None
    hsVMax = None
    rgbMin = None
    rgbMax = None
    # Defining the number of dilation iterations and the dilation kernel
    iterations = 1
    kernel = np.ones((3, 3), np.uint8)
    h, w, c = image.shape
    if w > 6000 or h > 6000:
        # If one of sides in the input image is greater than 6000 pixels
        # we apply larger kernels and more iterations
        iterations = 4
        kernel = np.ones((5, 5), np.uint8)

    if color == 'blue':
        # These values were determined manually using the range-detector script in the imutils  library
        hsvMin = (103, 28, 144)
        hsVMax = (166, 255, 255)  #
        rgbMin = (174, 79, 0)
        rgbMax = (255, 166, 219)

    else:
        hsvMin = (170, 15, 0)
        hsVMax = (255, 255, 255)
        rgbMin = (0, 0, 189)
        rgbMax = (232, 199, 255)
    clone = image.copy()
    mask = None
    blank_image = np.zeros((image.shape[0], image.shape[1], 3))

    # Here, we filter the input image, thus, creating a mask that only contains the elements whose RGV or HSV values are inside
    # the stablished ranges

    if (filter == 'hsv'):
        clone = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(clone, hsvMin, hsVMax)
        mask = cv2.dilate(mask, kernel, iterations=iterations)
        if display:
            draw.showImage(mask)
    else:
        mask = cv2.inRange(clone, rgbMin, rgbMax)
        mask = cv2.dilate(mask, kernel, iterations=iterations)
        if display:
            draw.showImage(mask)
    # We find contours on the masked image.
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    clone = image.copy()
    cnts = ss.remove_small_contours(cnts)
    clone = image.copy()

    #Generating bounding boxes out of the extracted contours
    bBoxes2 = getContourBoundlingBox(cnts, image=image)
    #Merging boxes whose overlapping area is at leat 15%
    bBoxes4 = generateRegions(np.array(bBoxes2), 0.15)
    # After merging some artifacts or redundant bounding boxes appear
    # Here we sort the bounding boxes by area and eliminate those whose overlapping area is greater than 10%
    bBoxes5 = nms.non_max_suppression_fast_area(bBoxes4, 0.1)
    # Bounding boxes are padded to increase the model accuracy
    boxes6 = padBoxes(image, bBoxes5, increment=10)

    if display:
        # Showing step by step the whole procedure
        displayBoundingBoxes(image, bBoxes2, path="./", name="test")

        displayBoundingBoxes(image, bBoxes4, path="./", name="test")

        displayBoundingBoxes(image, bBoxes5, path="./", name="test")

        displayBoundingBoxes(image, boxes6, path="./", name="test")
    return boxes6