Beispiel #1
0
    def get_board_boundaries(self, image):
        """
        Function gets rid of low intensity pixel values in
        color spectrum and sets them to 0. Then it calculates
        histogram of green color in x and y axis from image.
        Then we calculate 1-th order difference on this
        histogram

        Args:
            image(numpy.ndarray): Loaded image

        Retunrs:
            Returns extremes from calculated histogram
        """
        (width, height, _) = image.shape
        vlines = np.zeros(width)
        hlines = np.zeros(height)
        image[:, :, 1] = cv.inRange(image[:, :, 1], self.lo_thresh,
                                    self.hi_thresh)
        vlines = cv.reduce(image[:, :, 1],
                           1,
                           cv.cv.CV_REDUCE_SUM,
                           dtype=cv.CV_32S)
        hlines = cv.reduce(image[:, :, 1],
                           0,
                           cv.cv.CV_REDUCE_SUM,
                           dtype=cv.CV_32S)
        vlines = map(lambda *row: list(row), *vlines)
        y = np.diff(vlines[0])
        x = np.diff(hlines[0])
        return (self.__get_extremes(x), self.__get_extremes(y))
Beispiel #2
0
 def _find_pipe(self):
   havg = cv2.reduce(self.field_image, 0, cv2.cv.CV_REDUCE_AVG)[0]
   for x in xrange(len(havg)):
     if havg[x] > kPipePixelThreshold:
       continue
     # Find width
     width = 0
     while (x + width < len(havg)) and (havg[x + width] <= kPipePixelThreshold):
       width += 1
     if not (kPipeMinWidth <= width <= kPipeMaxWidth):
       x += width
       continue
     # Find opening
     pipe_slice = self.field_image[:, x:x + width]
     vavg = cv2.reduce(pipe_slice, 1, cv2.cv.CV_REDUCE_AVG)
     def find_first_gap_row(vavg):
       for y in xrange(len(vavg)):
         if vavg[y][0] > kPipePixelThreshold:
           return y
       return None
     top_h = find_first_gap_row(vavg)
     bottom_h = find_first_gap_row(vavg[::-1])
     if top_h is None or bottom_h is None:
       continue
     if len(vavg) - top_h - bottom_h < kPipeMinOpening:
       continue
     return x - 2, width + 4, top_h, bottom_h
   return None, None, None, None
def trim_image(image, padding=10):
    nr_rows = image.shape[0]
    nr_cols = image.shape[1]

    rows_reduced = cv2.reduce(image, 1, cv2.REDUCE_MIN).reshape(-1)
    cols_reduced = cv2.reduce(image, 0, cv2.REDUCE_MIN).reshape(-1)

    # First, determine the number of blanc lines from the top
    threshold = 250
    blancs_top = count_blancs_until_component(rows_reduced,
                                              min_threshold=threshold)
    new_top = max(blancs_top - padding, 0)

    blancs_bottom = count_blancs_until_component(rows_reduced[::-1],
                                                 min_threshold=threshold)
    new_bottom = min(nr_rows - blancs_bottom + padding, nr_rows)

    blancs_left = count_blancs_until_component(cols_reduced,
                                               min_threshold=threshold)
    new_left = max(blancs_left - padding, 0)

    blancs_right = count_blancs_until_component(cols_reduced[::-1],
                                                min_threshold=threshold)
    new_right = min(nr_cols - blancs_right + padding, nr_cols)

    # Crop image
    trimmed_image = image[new_top:new_bottom, new_left:new_right]

    return trimmed_image
Beispiel #4
0
    def get_board_boundaries(self, image):
        """
        Function gets rid of low intensity pixel values in
        color spectrum and sets them to 0. Then it calculates
        histogram of green color in x and y axis from image.
        Then we calculate 1-th order difference on this
        histogram

        Args:
            image(numpy.ndarray): Loaded image

        Retunrs:
            Returns extremes from calculated histogram
        """
        (width, height, _) = image.shape
        vlines = np.zeros(width)
        hlines = np.zeros(height)
        image[:, :, 1] = cv.inRange(image[:, :, 1],
                                    self.lo_thresh,
                                    self.hi_thresh)
        vlines = cv.reduce(image[:, :, 1], 1,
                           cv.cv.CV_REDUCE_SUM,
                           dtype=cv.CV_32S)
        hlines = cv.reduce(image[:, :, 1], 0,
                           cv.cv.CV_REDUCE_SUM,
                           dtype=cv.CV_32S)
        vlines = map(lambda *row: list(row), *vlines)
        y = np.diff(vlines[0])
        x = np.diff(hlines[0])
        return (self.__get_extremes(x), self.__get_extremes(y))
Beispiel #5
0
def words_seperator(lower, upper, cnt_lines):
    hist2 = cv2.reduce(rotated[upper:lower, :], 0, cv2.REDUCE_AVG).reshape(-1)
    left = 0
    right = len(hist2) - 1
    while hist2[left] <= min(hist2):
        left += 1
    while hist2[right] <= min(hist2):
        right -= 1
    x = left
    prev_x = left
    cnt_words = 1
    while x < right:
        while x < len(hist2) and hist2[x] <= min(hist2):
            x += 1
        prev_x = x
        while x < len(hist2) and hist2[x] > min(hist2):
            x += 1
        if x - prev_x > 5:
            cv2.line(output, (prev_x, lower), (prev_x, upper), (255, 0, 0))
            cv2.line(output, (x, lower), (x, upper), (255, 0, 0))
            hist3 = cv2.reduce(
                rotated[upper - BUFFER:lower + BUFFER,
                        prev_x - BUFFER:x + BUFFER], 1,
                cv2.REDUCE_AVG).reshape(-1)
            letter_upper = 0
            letter_lower = len(hist3) - 1
            while hist3[letter_upper] <= min(hist3):
                letter_upper += 1
            while hist3[letter_lower] <= min(hist3):
                letter_lower -= 1
            cv2.imwrite(
                "word/words_" + str(cnt_lines) + str(cnt_words) + ".png",
                output2[upper + letter_upper - BUFFER:upper + letter_lower +
                        BUFFER, prev_x - BUFFER:x + BUFFER])
            cnt_words += 1
Beispiel #6
0
def findpeaks(image, dist, horizontal_profile=True):
    if (horizontal_profile):
        intensity_hist = cv2.reduce(image, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
    else:
        intensity_hist = cv2.reduce(image, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S).T
    
    ## smoothen the intensity histogram using savgol to avoid noise
    x = np.reshape(intensity_hist, len(intensity_hist))
    window_size = 5
    if (horizontal_profile):
        window_size = int(image.shape[0]/22)  ## magic num!
    else:
        window_size = int(image.shape[1]/22)  ## magic num!
    if (window_size%2 == 0):
        window_size = window_size+1
    intensity_hist_s = ss.savgol_filter(x, window_size, 5) 
    intensity_hist_s = intensity_hist_s.astype(int)

    ## find peaks in smooth plot. Prominence is via observation - if a line has just
    ## a few letters it will create a small peak. We enforce roughly 1/10 of the line
    ## should be occupied with letters
    ## distance is to avoid quirks. We use 5 degree poly to smoothen so it leads to dual
    ## peaks at times close together. Distance specification is a heuristic to avoid
    ## getting too close peaks. Ideally we should skip that, but use this routine recursively
    ## by plugging in the median of linewidths back into the find_peaks as distance till
    ## convergence
    maxval = np.max(intensity_hist_s)
    peaks, _ = ss.find_peaks(intensity_hist_s, prominence=maxval/10, distance=dist)
    return intensity_hist, peaks, intensity_hist_s
    def getEqnLayout(self, thresh, img):
        H,W = thresh.shape[:2]
        hist = cv2.reduce(thresh, 0, cv2.REDUCE_AVG).reshape(-1)
        th = 0
        lefts = [y for y in range(W-1) if hist[y]<=th and hist[y+1]>th]
        rights = [y for y in range(W-1) if hist[y]>th and hist[y+1]<=th]

        uppers = []
        lowers = []
        for i in range(0,len(lefts)):
            temp = np.copy(thresh[:, lefts[i]:rights[i] + 1])
            H,W = temp.shape[:2]
            hist = cv2.reduce(temp, 1, cv2.REDUCE_AVG).reshape(-1)
            th = 0
            uppers.append([y for y in range(H-1) if hist[y]<=th and hist[y+1]>th])
            lowers.append([y for y in range(H-1) if hist[y]>th and hist[y+1]<=th])

        chars = []
        numOfClumps = {}
        for i in range(0, len(lefts)):
            for j in range(0, len(uppers[i])):
                numOfClumps[(i,j)] = 0
                temp = np.copy(thresh[uppers[i][j]:lowers[i][j]+1, lefts[i]:rights[i]+1])
                contours= cv2.findContours(temp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
                rects = [cv2.boundingRect(c) for c in contours]
                rects = sorted(rects,key=lambda l:l[0])
                
                for k in range(0, len(rects)):
                    rects[k] = rects[k][0], rects[k][1], rects[k][0]+rects[k][2],\
                                rects[k][1]+rects[k][3]
                
                k = 0
                while k < len(rects)-1:
                    xmin1, xmax1 = rects[k][1], rects[k][3]
                    xmin2, xmax2 = rects[k+1][1], rects[k+1][3]
                    ymin1, ymax1 = rects[k][0], rects[k][2]
                    ymin2, ymax2 = rects[k+1][0], rects[k+1][2]
                    #print(xmin1, xmax1, xmin2, xmax2, ymin1, ymax1, ymin2, ymax2)
                    if (ymin1 <= ymin2 and ymax2 <= ymax1 or ymin2 <= ymin1 and ymax1 <= ymax2) and \
                         (abs(max(xmin1,xmin2) - min(xmax1,xmax2)) / \
                         abs(max(xmax1, xmax2) - min(xmin1,xmin2)) < 0.50) and \
                          ((xmax1-xmin1 <= 25 and ymax1-ymin1 <= 25) or \
                           (xmax2-xmin2 <= 25 and ymax2-ymin2 <= 25)):####:  
                             
                            rects[k] = list((min(ymin1, ymin2), min(xmin1, xmin2),
                                              max(ymax1,ymax2), max(xmax1, xmax2)))
                            del rects[k+1]
                    k += 1
                
                for cnt in range(0, len(rects)):
                    numOfClumps[(i,j)] += 1
                    chars.append(list(((i,j,cnt), lefts[i]+rects[cnt][0], uppers[i][j]+rects[cnt][1],\
                                      lefts[i]+rects[cnt][2], uppers[i][j]+rects[cnt][3])))   #doubt
                    '''cv2.rectangle(img, (chars[-1][1], chars[-1][2]), (chars[-1][3], chars[-1][4]), \
                          (0, 0, 255), 1)
                     
                cv2.imwrite('ltx/z0.png', img) '''          
                            
                                       
        return lefts, rights, uppers, lowers, numOfClumps, chars
Beispiel #8
0
def crop(img):
    '''
    co=np.column_stack(np.where(img>0))
    x,y,w,h=cv2.boundingRect(co)
    img=img[x:x+w,y:y+h]
    '''
    y_sum = cv2.reduce(img, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S)[0]
    i = 0
    j = img.shape[1] - 1
    y_norm = [(x) / max(y_sum) for x in y_sum]
    while (y_norm[i] <= 0.1 and i < j):
        i = i + 1
    while (y_norm[j] <= 0.1 and j > 0):
        j = j - 1
    x_sum = cv2.reduce(img, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
    x_norm = [(x) / max(x_sum) for x in x_sum]
    k = 0
    l = img.shape[0] - 1
    while (x_norm[k][0] <= 0.1 and k < l):
        k = k + 1
    while (x_norm[l][0] <= 0.2 and l > 0):
        l = l - 1
    img1 = img[k:l, i:j]
    #cv2.imshow("a",resize(img1,500,500))
    #cv2.waitKey(0)
    return img1
Beispiel #9
0
def image_projection(img):
    """
    """
    # change to gray
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Change to numpy array format
    nb = np.array(gray)

    x_sum = cv2.reduce(gray, 0, cv2.REDUCE_SUM, dtype=cv2.CV_64F)
    y_sum = cv2.reduce(gray, 1, cv2.REDUCE_SUM, dtype=cv2.CV_64F)

    # get height and weight
    x = gray.shape[1]
    y = gray.shape[0]

    # division the result by height and weight
    x_sum = x_sum / y
    y_sum = y_sum / x

    x_sum = x_sum.transpose()

    x_arr = np.arange(x)
    y_arr = np.arange(y)

    xn = np.array(x_sum)
    yn = np.array(y_sum)
    x_peaks, _ = find_peaks(xn.ravel(), height=0, width=10)
    y_peaks, _ = find_peaks(yn.ravel(), height=0, width=10)

    return x_arr, xn, x_peaks, y_arr, yn, y_peaks
Beispiel #10
0
    def histogram(self):
        """Add histograms to an image.

        A new stage of the image, *histogram*, is added.

        The histograms contain an indication of the amount of ink in
        horizontal and in vertical rows.

        The histogram values are preserved as well.
        """

        if self.empty:
            return

        batch = self.batch
        boxed = self.boxed
        stages = self.stages
        rotated = self.stages["rotated"]
        self.histX = cv2.reduce(rotated, 0, cv2.REDUCE_AVG).reshape(-1)
        self.histY = cv2.reduce(rotated, 1, cv2.REDUCE_AVG).reshape(-1)

        if not batch or boxed:
            normalizedC = stages["normalizedC"]
            if not batch:
                histogram = normalizedC.copy()

                for (hist, vert) in ((self.histY, True), (self.histX, False)):
                    for (i, val) in enumerate(self.histY):
                        color = (int(val), int(2 * val), int(val))
                        index = (0, i) if vert else (i, 0)
                        value = (val, i) if vert else (i, val)
                        cv2.line(histogram, index, value, color, 1)
                stages["histogram"] = histogram
Beispiel #11
0
def get_difference(image1, image2):
    '''
    Get difference between two images
    '''
    diff = image1.copy()
    cv.absdiff(image1, image2, diff)
    reduction = cv.reduce(diff, 0, cv.REDUCE_AVG)
    reduction = cv.reduce(reduction, 1, cv.REDUCE_AVG)
    return reduction
Beispiel #12
0
def cropMoney(inputImage):
    """
    main function for RoI pre-processing
    :param inputImage: RMB bill image
    :return: resultImage: 2 PotentialRoIs from one side RMB bill
    """
    #find horizontal cutting point
    inputImage2 = np.copy(
        inputImage)  #copy to second var to keep the originality
    inputImage2[inputImage2 < 80] = 0
    verProjectedMoney = cv2.reduce(inputImage2,
                                   0,
                                   cv2.REDUCE_SUM,
                                   dtype=cv2.CV_32S)
    normalized = np.copy(verProjectedMoney)
    cv2.normalize(verProjectedMoney, normalized, 0, 255, cv2.NORM_MINMAX)
    normalized = normalized.astype(np.uint8)
    diff = abs(np.diff(normalized.astype(np.int32), 1, axis=1)).flatten()
    loc = np.argwhere(diff > 4)
    minHor = np.asscalar(np.min(loc))
    maxHor = np.asscalar(np.max(loc))

    #find vertical cutting point
    horProjectedMoney = cv2.reduce(inputImage2,
                                   1,
                                   cv2.REDUCE_SUM,
                                   dtype=cv2.CV_32S)
    normalized = np.copy(horProjectedMoney)
    cv2.normalize(horProjectedMoney, normalized, 0, 255, cv2.NORM_MINMAX)
    normalized = normalized.astype(np.uint8)
    diff = abs(np.diff(normalized.astype(np.int32), 0, axis=1)).flatten()
    loc = np.argwhere(diff > 50)
    minVer = np.asscalar(np.min(loc))
    maxVer = np.asscalar(np.max(loc))
    croppedImage = inputImage[minVer:maxVer, minHor:maxHor]

    widthScalledMoney = 1068
    heightScalledMoney = 420
    croppedImage = cv2.resize(croppedImage,
                              (widthScalledMoney, heightScalledMoney))
    flipped = np.flip(np.flip(np.copy(croppedImage), axis=0), axis=1)
    flipped = flipped.astype(np.uint8)
    minHorPotROI = potRoiCoordinate[0]
    minVerPotROI = potRoiCoordinate[1]
    maxHorPotROI = potRoiCoordinate[2]
    maxVerPotROI = potRoiCoordinate[3]
    potROI = np.copy(croppedImage[minVerPotROI:maxVerPotROI,
                                  minHorPotROI:maxHorPotROI])
    potRoiFlipped = np.copy(flipped[minVerPotROI:maxVerPotROI,
                                    minHorPotROI:maxHorPotROI])
    potROI = np.expand_dims(potROI, axis=0)
    potRoiFlipped = np.expand_dims(potRoiFlipped, axis=0)
    PotentialRoIs = np.concatenate((potROI, potRoiFlipped), axis=0)
    croppedImage = np.expand_dims(croppedImage, axis=0).astype(np.uint8)
    return PotentialRoIs, croppedImage
Beispiel #13
0
def start_detection(images):
	result.config(text="NO BREAKAGE")
	img = cv2.imread(images)
	gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	edges = cv2.Canny(gray,50,150,apertureSize = 3)
	edges = cv2.GaussianBlur(edges,(5,5),0)

	k_square = np.ones((5,5),np.uint8)
	k_ellipse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
	thresh = cv2.dilate(edges,k_ellipse,iterations=1)
	thresh = cv2.erode(thresh,k_square,iterations=2)
	minLineLength = 100
	maxLineGap = 10
	lines = cv2.HoughLinesP(thresh,1,np.pi/180,100,minLineLength,maxLineGap)
	x_sum = cv2.reduce(thresh, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
	y_sum = cv2.reduce(thresh, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
	if lines is not None:
	    for line in lines:
	        for x1,y1,x2,y2 in line:
	            cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)

	h=img.shape[0]
	w= img.shape[1]
	flag1=0
	flag2=0
	flag3=0
	count = 0
	n=0
	p=0
	for i in range(0,h-5,5):
	    flag1=0
	    flag2=0
	    flag3=0
	    for j in range(0,w-5,5):
	        count=0;
	        for k in range(i,i+5):
		        for l in range(j,j+5):
		            p=n
		            if img[k][l][0]==0 and img[k][l][1]==255 and img[k][l][2]==0:
				        n=j
				        #count=count+1
				        #print(i,"    ",j,"    ",img[k][l]," ")
	                if (n-p)>30:
				        flag3=1
				        break

	    if flag3==1:
	        break

	if flag3==1:
		result.config(text="BREAKAGE DETECTED")
	ima=images.replace('.jpg','res.jpg')
	cv2.imwrite(ima,img)
	pro_image(ima)
def segment(image, output_directory, padding=10, runmode=1):
    lines_directory = output_directory + '/lines'
    words_li_li = []

    # 1 = column reduction.
    # CV_REDUCE_AVG instead of sum, because we want the normalized number of pixels
    histogram = cv2.reduce(image, 1, cv2.REDUCE_AVG)
    # Transpose column vector into row vector
    histogram = histogram.reshape(-1)

    lookahead = 30
    line_peaks = peakdetect(histogram, lookahead=lookahead)

    remove_directory(lines_directory)
    ensure_directory(lines_directory)

    lines = segment_image_into_lines(image,
                                     line_peaks,
                                     lines_directory,
                                     padding=padding,
                                     runmode=runmode)

    line_idx = 0
    for line in lines:
        line_histogram = cv2.reduce(line, 0, cv2.REDUCE_AVG)

        # Transpose column vector into row vector
        line_histogram = line_histogram.reshape(-1)
        lookahead = 30
        word_peaks = peakdetect(line_histogram, lookahead=lookahead)

        if runmode > 1:
            plt.plot(line_histogram)
            plt.title('Line=' + str(line_idx))
            plt.savefig(lines_directory + '/histogram_' + str(line_idx) +
                        '.jpg')
            plt.clf()

        words_directory = lines_directory + '/line_' + str(line_idx)
        remove_directory(words_directory)
        ensure_directory(words_directory)

        words = segment_line_into_words(line,
                                        line_idx,
                                        word_peaks,
                                        words_directory,
                                        padding=padding,
                                        runmode=runmode)

        line_idx = line_idx + 1
        words_li_li.append(words)

    return words_li_li
Beispiel #15
0
def find_notes(staff_crops, valid_blobs):
    note_rects = [[] for _ in range(valid_blobs)]
    for i in range(valid_blobs):
        staff_crop = staff_crops[valid_blobs - 1 - i]

        x_distribution = cv2.reduce(staff_crop,
                                    0,
                                    cv2.REDUCE_SUM,
                                    dtype=cv2.CV_32S)[0]  # column sum

        min = staff_crop.shape[1] * 255
        x_1 = -1
        for val in x_distribution[50:len(x_distribution) - 50]:
            if val < min:
                min = val
        min += 1800
        # print 'notes %d' % (i+1)
        # print "min: %d" % min

        for j in range(len(x_distribution)):
            val = x_distribution[j]
            if val < min and x_1 != -1 and j - x_1 > 5:
                # print "ENDS from %d to %d: %d" % (x_1, j, val)
                longest_consecutive = 0
                count = 0
                y_start = 0
                y_end = staff_crop.shape[0]
                note_horizontal_crop = staff_crop[0:staff_crop.shape[0], x_1:j]
                y_distribution = np.array(
                    cv2.reduce(note_horizontal_crop,
                               1,
                               cv2.REDUCE_SUM,
                               dtype=cv2.CV_32S).flatten())  # row sum
                for k in range(len(y_distribution)):
                    row_sum = y_distribution[k]
                    if row_sum > 0:
                        if count == 0:
                            y_start_temp = k
                        count += 1
                    else:
                        if count > longest_consecutive:
                            longest_consecutive = count
                            y_start = y_start_temp
                            y_end = k
                        count = 0
                note_rects[valid_blobs - 1 - i].append(
                    ((x_1, y_start), (j, y_end)))
                x_1 = -1
            elif val > min and x_1 == -1:
                # print "STARTS at %d: %d" % (j, val)
                x_1 = j
    return note_rects
Beispiel #16
0
def fixRotation(image):
    mask = np.uint8(np.where(image >= 10, 0, 1))
    row_counts = cv2.reduce(mask, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32SC1)
    col_counts = cv2.reduce(mask, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32SC1)
    rows = row_counts.flatten().tolist()
    cols = col_counts.flatten().tolist()
    up = sum(rows[:5])
    down = sum(rows[-5:])
    left = sum(cols[:5])
    right = sum(cols[-5:])
    if (left > up and left > down) or (right > up and right > down):
        image = ndimage.rotate(image, 90)
    return image
Beispiel #17
0
def resize_to_1080(screen,
                   crop_bars: bool,
                   interpolation=cv2.INTER_NEAREST,
                   respect_width=True):
    if crop_bars:
        gimg = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        hline = np.ravel(cv2.reduce(gimg, 0, cv2.REDUCE_SUM,
                                    dtype=cv2.CV_32S)) > 0
        vline = np.ravel(
            cv2.reduce(gimg[:, 500:], 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)) > 0
        topbar = np.argmax(vline)
        if topbar > 5:
            bottombar = np.argmax(vline[-5::-1]) + 4
        else:
            bottombar = np.argmax(vline[::-1])
        leftbar, rightbar = np.argmax(hline), np.argmax(hline[::-1])

        if screen.shape[0] - (topbar + bottombar) > 250 and screen.shape[1] - (
                leftbar + rightbar) > 250:
            screen = screen[topbar:screen.shape[0] - bottombar,
                            leftbar:screen.shape[1] - rightbar, ]

    if respect_width:
        if screen.shape[0] == 1080:
            return screen

        scale = 1080 / screen.shape[0]
        if screen.shape[1] * scale > 2560:
            scale = 2560 / screen.shape[1]

        cscale = min(scale, 2)
        scaled = cv2.resize(screen, (0, 0),
                            fx=cscale,
                            fy=cscale,
                            interpolation=interpolation)
        if scale <= 2:
            return scaled
        else:
            return cv2.copyMakeBorder(
                scaled,
                0,
                max(1080 - scaled.shape[0], 0),
                0,
                max(1920 - scaled.shape[1], 0),
                cv2.BORDER_CONSTANT,
                value=(0, 0, 0),
            )
    else:
        return cv2.resize(screen, (1920, 1080), interpolation=interpolation)
Beispiel #18
0
def calculateDifference(imageA, imageB, width, height):
    d = 0
    vectorA = cv2.reduce(imageA, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
    vectorB = cv2.reduce(imageB, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S)

    for i in range(len(vectorA[0])):
        pixelA = vectorA[0][i]
        pixelB = vectorB[0][i]

        db = pixelA[0] - pixelB[0]
        dg = pixelA[1] - pixelB[1]
        dr = pixelA[2] - pixelB[2]

        d = d + gmpy2.isqrt(dr * dr + dg * dg + db * db)
    return d
Beispiel #19
0
    def get_histogram_for_angle(self, image, angle):
        """
            Rotates an image for the specified angle and calculates
            sum of pixel values for every row

            Args:

                image: PIL image object
                angle: rotation angle in degrees

            Returns:
                list of values. Each value is a sum of inverted pixel's
                for the corresponding row
        """
        copy = image.rotate(angle, Image.BILINEAR, True)

        img = np.fromstring(copy.tostring(),
                            dtype=np.uint8).reshape(copy.size[1],
                                                    copy.size[0], 2)

        alpha = img[:, :, 1]
        res = img[:, :, 0]

        res[res >= MAGIC_COLOR_THRESHOLD] = 255
        res[alpha < 255] = 255
        res = 255 - res

        # Python cv2.reduce doesn't work correctly with int matrices.
        data_for_reduce = res.astype(np.float)
        histogram = cv2.reduce(data_for_reduce, 1, cv2.cv.CV_REDUCE_SUM)[:, 0]

        return histogram
Beispiel #20
0
def LinesMedian(bw_image):
	# making horizontal projections
	
	horProj = cv2.reduce(bw_image, 1, cv2.cv.CV_REDUCE_AVG)

	# make hist - same dimension as horProj - if 0 (space), then True, else False
	th = 0; # black pixels threshold value. this represents the space lines
	hist = horProj <= th;

	#Get mean coordinate of white white pixels groups
	ycoords = []
	y = 0
	count = 0
	isSpace = False
	median_count = []

	for i in range(0, bw_image.shape[0]):
		
		if (not isSpace):
			if (hist[i]): #if space is detected, get the first starting y-coordinates and start count at 1
				isSpace = True
				count = 1
				#y = i
		else:
			if (not hist[i]):
				isSpace = False
				median_count.append(count)
			else:
				#y = y + i
				count = count + 1
	median_count.append(count)
	#ycoords.append(y / count)
	
	#returns counts of each blank rows of each of the lines found
	return median_count
Beispiel #21
0
    def histogramaHor(self):
        self.plotHisto.clear()
        #imag3=self.img
        # Conversion a escala de grices
        imag3 = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        # Calculo de Histograma Horizontal
        x_sum = cv2.reduce(imag3, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
        x_sum_len = len(x_sum[0])
        x_sum_norm = x_sum[0] / (x_sum_len)
        x_sum_norm = x_sum_norm.round(0)
        x_sum_norm = x_sum_norm.astype(int)
        print(x_sum_norm)

        x_sum_x = np.linspace(0, 255, 256)
        x_sum_x = x_sum_x.astype(int)

        x_sum_y = np.zeros(256)

        for i in range(256):
            for j in range(x_sum_len):
                if (x_sum_norm[j] == i):
                    x_sum_y[i] += 1

        #pg.plot(x_sum_x, x_sum_y, title=('Histograma Horizontal'))
        self.plotHisto.plot(x_sum_x, x_sum_y, title=('Histograma Horizontal'))
        self.plotHisto.setLabel('left', 'Cantidad de pixeles')
        self.plotHisto.setLabel('bottom', 'Intensidad de iluminacion')
def findLines(bw_image, LinesThres):
    horProj = cv2.reduce(bw_image, 1,
                         cv2.REDUCE_AVG)  # making horizontal projections
    # make hist - same dimension as horProj - if 0 (space), then True, else False
    th = 0  # black pixels threshold value. this represents the space lines
    hist = horProj <= th
    #Get mean coordinate of white white pixels groups
    ycoords = []
    y = 0
    count = 0
    isSpace = False
    for i in range(0, bw_image.shape[0]):
        if (not isSpace):
            if (
                    hist[i]
            ):  #if space is detected, get the first starting y-coordinates and start count at 1
                isSpace = True
                count = 1
                y = i
        else:
            if (not hist[i]):
                isSpace = False
                #when smoothing, thin letters will breakdown, creating a new blank lines or pixel rows, but the count will be small, so we set a threshold.
                if (count >= LinesThres):
                    ycoords.append(y / count)
            else:
                y = y + i
                count = count + 1
    ycoords.append(y / count)
    #returns y-coordinates of the lines found
    return ycoords
def LinesMedian(bw_image):
    horProj = cv2.reduce(bw_image, 1,
                         cv2.REDUCE_AVG)  # making horizontal projections
    # make hist - same dimension as horProj - if 0 (space), then True, else False
    th = 0  # black pixels threshold value. this represents the space lines
    hist = horProj <= th

    #Get mean coordinate of white white pixels groups
    count = 0
    isSpace = False
    median_count = []
    for i in range(0, bw_image.shape[0]):
        if (not isSpace):
            if (
                    hist[i]
            ):  #if space is detected, get the first starting y-coordinates and start count at 1
                isSpace = True
                count = 1
        else:
            if (not hist[i]):
                isSpace = False
                median_count.append(count)
            else:
                count = count + 1
    median_count.append(count)

    #returns counts of each blank rows of each of the lines found
    return median_count
Beispiel #24
0
def segment_lines(im):
    im_proj = cv2.reduce(im, 1, cv2.REDUCE_AVG)
    hist = im_proj == 255
    ycoords = []
    y = 0
    count = 0
    isSpace = False
    for i in range(len(hist)):
        if not isSpace:
            if hist[i]:
                isSpace = True
                count = 1
                y = i
        else:
            if not hist[i]:
                isSpace = False
                ycoords.append(y / count)
            else:
                y += i
                count += 1
    lines = [
        line for line in np.vsplit(im,
                                   np.array(ycoords).astype(int))
        if not (line == 255).all()
    ]

    return lines
Beispiel #25
0
 def processLine(i):
     cv2.namedWindow("result", cv2.WINDOW_NORMAL)
     subimg = img_bw[uppers[i]:lowers[i+1], 0:W]
     subimg_c = aligned[uppers[i]:lowers[i+1], 0:W]
     # break into characters via histogramming trick.
     hist = cv2.reduce(subimg, 0, cv2.REDUCE_AVG).reshape(-1)
     hist = cv2.GaussianBlur(hist,(5,5),0)
     #1300 is magic empirical constant
     char = {'l':[], 'r':[]}
     def bindUpdateBounds(ratio):
         (temp1, temp2) = updateBounds(ratio, lhist=hist, mimg = subimg_c, dim=0)
         print(temp1)
         char['l'] = temp1;
         char['r'] = temp2;
     cv2.createTrackbar("ratio", "result", 1300, 2000, bindUpdateBounds)
     bindUpdateBounds(1300)
     cv2.waitKey()
     cv2.destroyAllWindows()
     # armed with individual character images, standardize them if possible
     # then export to folders.
     for j, _ in enumerate(char['l']):
         print("Running character processing")
         processChar(char['r'][j], char['l'][j], subimg, lowers[i+1]-uppers[i])
     line= ""
     print(line)
     return line
def separate_words(line):
    line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
    new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
    line_peaks = peakdetect(line_hist, lookahead=50)
    Hl, Wl = new_line.shape[:2]

    words = []
    for y in line_peaks[0]:
        if y[1] == 255:
            words.append(y[0])
        # plt.plot(y[0], y[1], "r*")
        if y[1] == 255:
            cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)
    # for y in line_peaks[1]:
    # plt.plot(y[0], y[1], "g*")
    # if y[1] == 255:
    #     words.append(y[0])
    #     cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)

    words.insert(0, 0)
    words.append(Wl)

    # plt.imshow(new_line, cmap=plt.cm.gray)
    # plt.show()
    return words
Beispiel #27
0
def deskew(im):
    d_angle = 10
    c_angle = 0
    for delta in [10, 1, 0.1]:
        angles = np.arange(c_angle - d_angle, c_angle + d_angle, delta)
        scores = []
        for angle in angles:
            data = interpolation.rotate(im,
                                        angle,
                                        reshape=False,
                                        order=0,
                                        cval=255)
            #hist = np.sum(data, axis=1)
            hist = cv2.reduce(data, 1, cv2.REDUCE_AVG)
            score = np.std(hist)**2
            scores.append(score)
        best_score = max(scores)
        c_angle = best_angle = angles[scores.index(best_score)]
        d_angle = delta
    im_rot = interpolation.rotate(im,
                                  best_angle,
                                  reshape=False,
                                  order=0,
                                  cval=255)
    return im_rot
Beispiel #28
0
def is_Stripe(image, number_peaks=12):  # -> Boolean
    """ Checks if the image is striped.
	@Parameters: image matrix (binary form - black and white)
				number_peaks (int) Number of peaks to be considered 'striped'

	@Returns Boolean, if striped.
	"""

    # Crop the edges
    image = image[4:-4]

    # Use the Sobel operator across the Y to obtain the Gradient
    fpx = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=5)  # vertical

    # Project it onto single dimension (1D) - by summing them
    vpr = cv2.reduce(fpx, 1, cv2.REDUCE_SUM)

    # Squaring and square rooting removes the negative sign, and flatten
    vpr_p = np.sqrt(vpr**2).flatten()
    # Large spikes on f'(x) indicate change in the rate of change - seek areas where slope of the second derivative is 0
    unfiltered_peaks, _ = find_peaks(vpr_p, height=0)  # Finding peaks

    # Filter the peaks for noise which will be defined as any peak with an absolute height of less than PEAK_THRESH * maximum peak length
    all_peaks = (vpr_p[i] for i in unfiltered_peaks)
    peak_thresh = max(all_peaks) * PEAK_THRESH
    filtered_peaks = sum(1 for i in unfiltered_peaks
                         if vpr_p[i] >= peak_thresh)

    return (filtered_peaks) > number_peaks
def separate_cha(line):
    line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
    new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
    line_peaks = peakdetect(line_hist, lookahead=25)
    Hl, Wl = new_line.shape[:2]

    cha = []
    # for y in line_peaks[0]:
    # plt.plot(y[0], y[1], "r*")
    # cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)

    for y in line_peaks[0]:
        if y[1] >= 235:
            cha.append(y[0])
        # plt.plot(y[0], y[1], "g*")
        cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)

    cha.insert(0, 0)
    cha.append(Wl)

    # plt.plot(line_hist)
    # plt.show()
    # plt.imshow(new_line, cmap=plt.cm.gray)
    # plt.show()
    return cha
Beispiel #30
0
def prepareImage(img):
    w, h = 19, 19
    img = img[:h, :w]
    hist = cv2.reduce(img, 0, cv2.REDUCE_AVG).reshape(-1)
    maxpoint = np.argmin(hist)
    newimg = np.roll(img, (0, w // 2 - maxpoint))
    return np.array(newimg > 150, dtype='int').flatten()
    def ConvertToBinaryImage(self, imagePath):
        """
        Open the original image, convert it to binary, save the binary image in the sam directory
        of the originak one and return the path to the binary image.
        :param imagePath: path to the image we want to convert into binary image
        :type imagePath: string
        """
        img = cv.imread(imagePath, 0)
        ret, binaryImage = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)
        ## (3) minAreaRect on the nozeros
        pts = cv.findNonZero(binaryImage)
        ret = cv.minAreaRect(pts)
        (cx, cy), (w, h), ang = ret
        if w > h:
            w, h = h, w
            ang += 90
        M = cv.getRotationMatrix2D((cx, cy), 0, 1.0)
        rotated = cv.warpAffine(binaryImage, M, (img.shape[1], img.shape[0]))
        hist = cv.reduce(rotated, 1, cv.REDUCE_AVG).reshape(-1)

        th = 2
        H, W = img.shape[:2]
        uppers = [y for y in range(H - 1) if hist[y] <= th and hist[y + 1] > th]
        lowers = [y for y in range(H - 1) if hist[y] > th and hist[y + 1] <= th]

        rotated = cv.cvtColor(rotated, cv.COLOR_GRAY2BGR)
        for y in uppers:
            cv.line(rotated, (0, y), (W, y), (255, 0, 0), 1)

        for y in lowers:
            cv.line(rotated, (0, y), (W, y), (0, 255, 0), 1)

        cv.imwrite("result.png", rotated)
        # Add "_bin" to the name of the binary image.
        parts = imagePath.split(".")
        binaryImagePath = parts[0] + "_bin." + parts[1]
        # Save the binary image in the same directory.
        cv.imwrite(binaryImagePath, binaryImage)
        image = cv.imread(binaryImagePath)
        uppers, lowers = self.boundingHandle(uppers, lowers)
        i = 0
        directory = None
        while i != len(uppers):
            # create help folder named "row" and write each row
            file_path = "C:/rows/" + str(i) + '.png'
            directory = os.path.dirname(file_path)
            try:
                os.stat(directory)
            except:
                os.mkdir(directory)
            y = abs(uppers[i] - 20)
            x = 0
            h = (lowers[i] - uppers[i]) + 40

            row = image[y:y + h, x:x + W]
            cv.imwrite(file_path, row)
            i += 1
        os.remove(binaryImagePath)
        # Return the path to the directory of binary images.
        return directory
 def train(self):
     ustep = self.range_dist[0]/self.hist_bins[0]
     vstep = self.range_dist[1]/self.hist_bins[1]
     #calc n, X_i and mu
     mu=np.array((1,2))
     n = cv2.countNonZero(self.f_hist);
     count = 0;
     N = 0;
     X=np.array((n,2))
     f=[]
     for ubin in self.hist_bins[0]:
         for vbin in self.hist_bins[1]:
             histval = self.f_hist[ubin][vbin];
             if histval > 0:
                 sampleX = ((1,2) << self.low_range[0] + self.ustep * (ubin+.5), self.low_range[1] + vstep * (vbin+.5))
                 count=count+1
                 sampleX=X.row[count]
                 f.append(histval)
                 mu = mu+ histval * sampleX;
                 N += histval
    
     mu /= N;
     #calc psi - mean of DB
     self.psi=cv2.reduce(X, self.psi,0, cv.CV_REDUCE_AVG)
     #calc Lambda
     self.Lambda=np.zeros((2,2),np.uint)
     for i in n:
         X_m_mu = (X.row[i] - mu)
         prod = f[i] * X_m_mu.t() * X_m_mu
         self.Lambda += prod;
     self.Lambda /= N;
     linv = self.Lambda.inv();
     self.Lambda_inv.val[0] = linv[0][0]
     self.Lambda_inv.val[1] = linv[0][1]
     self.Lambda_inv.val[2] = linv[1][0]
Beispiel #33
0
def findline(img):
    '''

    Parameters
    ----------
    img : Array of uint8
        DESCRIPTION.

    Returns
    -------
    uppers : LIST
        list of upper boundary of text.
    lowers : LIST
        list of lower boundary of text.

    '''
    # find and draw the upper and lower boundary of each lines
    hist = cv2.reduce(img, 1, cv2.REDUCE_AVG).reshape(-1)
    th = 2
    uppers = [y for y in range(H - 1) if hist[y] <= th and hist[y + 1] > th]
    lowers = [y for y in range(H - 1) if hist[y] > th and hist[y + 1] <= th]

    for y in range(len(uppers)):
        a = uppers[y]
        b = lowers[y]
        img = cv2.line(img, (0, a), (W, a), (255, 0, 0), 2)
        img = cv2.line(img, (0, b), (W, b), (255, 0, 0), 2)
    cv2.imshow("draw_line", img)
    cv2.waitKey()
    cv2.destroyAllWindows()
    cv2.imwrite('draw_line.png', img)
    return uppers, lowers
def LinesMedian(bw_image):
    # making horizontal projections
    horProj = cv2.reduce(bw_image, 1, cv2.REDUCE_AVG)

    # make hist - same dimension as horProj - if 0 (space), then True, else False
    th = 0
    # black pixels threshold value. this represents the space lines
    hist = horProj <= th

    # Get mean coordinate of white white pixels groups
    ycoords = []
    y = 0
    count = 0
    isSpace = False
    median_count = []
    for i in range(0, bw_image.shape[0]):
        if not isSpace:
            if hist[i]:
                isSpace = True
                count = 1
                # y = 1

        else:
            if not hist[i]:
                isSpace = False
                median_count.append(count)
            else:
                # y = y + i
                count = count + 1

    median_count.append(count)
    # ycoords.append(y / count)
    # returns counts of each blank rows of each of the lines found
    return median_count
def findLines(bw_image, LinesThres):
    # making horizontal projections
    horProj = cv2.reduce(bw_image, 1, cv2.REDUCE_AVG)

    # make hist - same dimension as horProj - if 0 (space), then True, else False
    th = 0
    # black pixels threshold value. this represents the space lines
    hist = horProj <= th

    # Get mean coordinate of white white pixels groups
    ycoords = []
    y = 0
    count = 0
    isSpace = False

    for i in range(0, bw_image.shape[0]):
        if not isSpace:
            if hist[i]:
                isSpace = True
                count = 1
                y = i

        else:
            if not hist[i]:
                isSpace = False
                if count >= LinesThres:
                    ycoords.append(y / count)
            else:
                y = y + i
                count = count + 1

    ycoords.append(y / count)
    # returns y-coordinates of the lines found
    return ycoords
Beispiel #36
0
 def _find_jaw_separation_line(self, image):
     '''
     Finds y coordinate of lines that separates upper and lower jaws.
     :param image:
     :return: tuple (upper jaw line index, lower jaw line index)
     '''
     y_histogram = cv2.reduce(image, 1, cv2.cv.CV_REDUCE_SUM, dtype=cv2.CV_32S)
     return self._get_valley_range(y_histogram)
 def _get_affine_transformed_image_size(self, img_size, homo_mat):
     w = img_size.width;
     h = img_size.height;
     corners_coordindates = [0.0, w, w, 0.0,
                             0.0, 0.0, h, h,
                             1.0, 1.0, 1.0, 1.0]
     homogeneous_corners = numpy.array(corners_coordindates, dtype=numpy.float32).reshape(3, 4)
     transformed_corners = numpy.dot(homo_mat, homogeneous_corners)
     transformed_corners[0] = transformed_corners[0] / transformed_corners[2];
     transformed_corners[1] = transformed_corners[1] / transformed_corners[2];
     transformed_corners[2] = numpy.ones((1, len(transformed_corners[2])), dtype=numpy.float32)
     max_corner_coord = cv2.reduce(transformed_corners, 1, cv.CV_REDUCE_MAX)
     return Size(int(max_corner_coord[0, 0]), int(max_corner_coord[1, 0]))
Beispiel #38
0
 def _find_bird(self):
   hsv_image = cv2.cvtColor(self.bird_image, cv2.COLOR_BGR2HSV)
   threshed = cv2.inRange(hsv_image, kBeakMinColor, kBeakMaxColor)
   v = [p[0] >= 10 for p in cv2.reduce(threshed, 1, cv2.cv.CV_REDUCE_AVG)]
   best = None
   for y in xrange(len(v)):
     for height in xrange(min(kBeakMaxLength, len(v) - y)):
       if not v[y + height]:
         break
       if height >= kBeakMinSize:
         if best is None or best[1] < height:
           best = (y, height)
   # cv2.imshow('bird filtered', threshed)
   return None if best is None else best[0] + 8  # Aim for the center
    def centralizing(self, image):
        '''
            With this function centralizing image in vertical direction
        '''
        image = np.uint8(image)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) 

        if np.count_nonzero(image) < 15:
            return None

        horizontal_projection = cv2.reduce(image, 1, 1)

        index = 0
        while (index < 28 and horizontal_projection[index] == [0]):
            index += 1
            image = np.delete(image, 0, 0)

        index = horizontal_projection.shape[0] - 1
        while (index > 10 and horizontal_projection[index] == 0):
            index -= 1
            image = np.delete(image, image.shape[0] - 1, 0)

        (h, w) = image.shape

        if w > h:
            size = w
            empty_line = [0] * w
            rate = float(self.SIZE_OF_CHARACTER) / w
            for i in xrange(w - h):
                if i % 2 == 0:
                    image = np.vstack((image, empty_line))
                else:
                    image = np.insert(image, 0, empty_line, axis=0)
        else:
            size = w
            rate = float(self.SIZE_OF_CHARACTER) / w

        character_image = np.zeros([size, size, 3])

        character_image[:, :, 0] = image
        character_image[:, :, 1] = image
        character_image[:, :, 2] = image

        result = cv2.resize(character_image, (0, 0), fx=rate, fy=rate)

        return result
Beispiel #40
0
def projection_analysis(im):
    # compute the ink density histogram (sum each rows)
    hist = cv2.reduce(im, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32F)
    hist = hist.ravel()
    # find peaks withing the ink density histogram
    max_hist = max(hist)
    mean_hist = np.mean(hist)
    thres_hist = mean_hist / max_hist
    peaks = peakutils.indexes(hist, thres=thres_hist, min_dist=50)
    # find peaks that are too high
    mean_peaks = np.mean(hist[peaks])
    std_peaks = np.std(hist[peaks])
    thres_peaks_high = mean_peaks + 1.5*std_peaks
    thres_peaks_low = mean_peaks - 3*std_peaks
    peaks = peaks[np.logical_and(hist[peaks] < thres_peaks_high,
                                 hist[peaks] > thres_peaks_low)]

    return peaks
Beispiel #41
0
def SpacesMedian(line):
	
	# making vertical projections
	
	verProj = cv2.reduce(line, 0, cv2.cv.CV_REDUCE_AVG)

	# make hist - same dimension as horProj - if 0 (space), then True, else False
	th = 0; # black pixels threshold value. this represents the space lines
	hist = verProj <= th;

	#Get mean coordinate of white white pixels groups
	xcoords = []
	x = 0
	count = 0
	isSpace = False
	median_count = []
	for i in range(0, line.shape[1]):
		if (not isSpace):
			if (hist[0][i]): #if space is detected, get the first starting x-coordinates and start count at 1
				isSpace = True
				count = 1
				#x = i
		else:
			if (not hist[0][i]):
				isSpace = False
				#when smoothing, thin letters will breakdown, creating a new blank lines or pixel columns, but the count will be small, so we set a threshold.
				#print count,"\t",
				
				#append each count of rows of blank gaps found
				median_count.append(count)
				
				#if (count > 15):
					#xcoords.append(x / count)
			else:
				#x = x + i
				count = count + 1
	
	median_count.append(count)
	xcoords.append(x / count)
	
	#returns x-coordinates of the spaces found in the line
	return median_count
def get_coffee_level(img):
    Y_MAX = img.shape[0]

    img_coffee_level = cv2.reduce(img, 1, cv2.cv.CV_REDUCE_AVG)
    debug(img_coffee_level, 'coffee_pot_reduced')

    levels = [
        img_coffee_level[i][0][0] for i in range(img_coffee_level.shape[0])
    ]

    diffs = [
        (i, int(levels[i]) - int(levels[i+1]))
        for i in range(len(levels)-1)
    ]

    diffs = sorted(diffs, lambda x, y: y[1] - x[1])

    result = Y_MAX - diffs[0][0]
    print("Coffee: %d/%d" % (result, Y_MAX))

    return int(result * 100 / Y_MAX), img_coffee_level
Beispiel #43
0
def findLines(bw_image, LinesThres):
	# making horizontal projections
	
	horProj = cv2.reduce(bw_image, 1, cv2.cv.CV_REDUCE_AVG)

	# make hist - same dimension as horProj - if 0 (space), then True, else False
	th = 0; # black pixels threshold value. this represents the space lines
	hist = horProj <= th;

	#Get mean coordinate of white white pixels groups
	ycoords = []
	y = 0
	count = 0
	isSpace = False

	for i in range(0, bw_image.shape[0]):
		
		if (not isSpace):
			if (hist[i]): #if space is detected, get the first starting y-coordinates and start count at 1
				isSpace = True
				count = 1
				y = i
		else:
			if (not hist[i]):
				isSpace = False
				#when smoothing, thin letters will breakdown, creating a new blank lines or pixel rows, but the count will be small, so we set a threshold.
				if (count >=LinesThres):
					ycoords.append(y / count)
			else:
				y = y + i
				count = count + 1

	ycoords.append(y / count)
	
	#returns y-coordinates of the lines found
	return ycoords
Beispiel #44
0
import cv2 as cv
import numpy as np

data=np.genfromtxt('test.csv', delimiter=',',skip_header=1)

data_cm = np.array([]);
cv.reduce(data,data_cm,1,op = cv.CV_REDUCE_AVG)
print data_cm


#gray = np.float32(gray)
#dst = cv2.cornerHarris(gray,2,3,0.04)

Beispiel #45
0
  ret, frame = cam.read()
  
fgbg = cv2.BackgroundSubtractorMOG()

while True:
  ret, frame = cam.read()
  
  if ret == True:
    frame = cv2.resize(frame, (tamanoMax,tamanoMax))
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.bilateralFilter(gray, 11, 17, 17)
    blur = cv2.blur(gray,(10,10))
    
    fgmask = fgbg.apply(blur) # Aplicamos la mascara
    
    columnas = cv2.reduce(fgmask,0,cv.CV_REDUCE_MAX)
    filas = cv2.reduce(fgmask,1,cv.CV_REDUCE_MAX)
    
    for x in range(len(columnas[0])):
      if columnas[0][x] > 0:
	movimiento = True
	break
    if movimiento == False:
      for x in range(len(filas)):
	if filas[x,0] > 0:
	  movimiento = True
	  break
    
    if movimiento == True and comprobarFichero():
      nombre = time.strftime("%y%m%d-%H:%M:%S")+".jpg"
      path="/path/donde/guardar/imagenes/"+nombre
Beispiel #46
0
    def findFeature(self, queryImage, trainImage, trainMask=None, silent=False):
        
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp_q, des_q = sift.detectAndCompute(queryImage,None)
        kp_t, des_t = sift.detectAndCompute(trainImage, trainMask)
        
        if not kp_q or not kp_t:
            if not silent:
                mylogger.warn("No keypoints found")
            return False

#         img=cv2.drawKeypoints(queryImage, kp_q, outImage=np.zeros((1,1)), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#         cv2.imshow('img',img)
#         key = cv2.waitKey(0)
#         if key==1048603 or key==27:
#             sys.exit()    
#             
#         img=cv2.drawKeypoints(trainImage, kp_t, outImage=np.zeros((1,1)), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#         cv2.imshow('img',img)
#         key = cv2.waitKey(0)
#         if key==1048603 or key==27:
#             sys.exit()                

        # create BFMatcher object
        bf = cv2.BFMatcher()
        
        # Match descriptors.
        matches = bf.knnMatch(des_q, des_t, k=2)
        
        # Apply ratio test
        good = []
        for best_matches in matches:
            if len(best_matches)>1:
                m,n = best_matches
            else:
                m=n=best_matches[0]
            if m.distance < 0.75*n.distance:
                good.append([m])
                
        if len(good)<2:
            if not silent:
                mylogger.warn("Feature not found")
            return False 
        
#         img = cv2.drawMatchesKnn(queryImage, kp_q, trainImage, kp_t, good, outImg=np.zeros((1,1)), flags=2)
#         cv2.imshow('img',img)
#         key = cv2.waitKey(0)
#         if key==1048603 or key==27:
#             sys.exit()    
        
        img_pts = np.float32([ kp_t[m[0].trainIdx].pt for m in good ])
        mean = cv2.reduce(img_pts, 0, cv2.REDUCE_AVG)[0]

        # check dispersion
        disp = 0
        for pt in img_pts:
            delta = 0
            delta += (pt[0]-mean[0]) * (pt[0]-mean[0])           
            delta += (pt[1]-mean[1]) * (pt[1]-mean[1])
            if delta>1000:
                # remove extremums
                np.delete(img_pts, pt)
            else:            
                disp += delta
        
        if len(img_pts)<2:
            if not silent:
                mylogger.warn("G4 not found")
            return False 
        
        # recalc mean
        mean = cv2.reduce(img_pts, 0, cv2.REDUCE_AVG)[0]

#        mylogger.info("Dispersion: %s" % disp)

        if disp>5000:
            if not silent:
                mylogger.warn("Dispersion too big")
            return False 
        
#         cv2.circle(trainImage, (int(mean[0]), int(mean[1])), 5, (0,0,255))
#         cv2.imshow('img',trainImage)
#         key = cv2.waitKey(0)
#         if key==1048603 or key==27:
#             sys.exit()    
            
        return (int(mean[0]), int(mean[1]))
def cv_project(src):
    return numpy.squeeze(cv2.reduce(src,dim=1,rtype=cv2.cv.CV_REDUCE_SUM))
Beispiel #48
0
def compute_sharpness_value(img_gray):
    img_lap = cv2.Laplacian(img_gray, cv.CV_16S, ksize=1, scale=1, delta=0)
    row_lap = cv2.reduce(img_lap, 0, cv.CV_REDUCE_MAX)
    point_lap = cv2.reduce(row_lap, 1, cv.CV_REDUCE_MAX)[0][0]
    return int(point_lap)
    def segmentation(self):
        '''
            With this method segmentation text. First time detect the text line, next time the characters.
        '''
        self.preprocessing()

        self.height, self.width = self.image.shape

        size = self.width / self.NUMBER_OF_PART

        text_lines = []

        for i in xrange(self.NUMBER_OF_PART):

            part_of_image = self.image[:, i * size:(i + 1) * size]

            horizontal_projection = cv2.reduce(part_of_image, 1, 1)
            histogram = horizontal_projection <= 0

            histogram = histogram.astype(int)
            histogram *= -1
            histogram = np.append(histogram, -1)

            sign_change = ((np.roll(histogram, 1) - histogram) != 0).astype(int)

            lines = np.where(sign_change == 1)[0]

            distance = [lines[k] - lines[k - 1] for k in xrange(1, len(lines), 2)]

            threshold_line_width = np.average(np.array(distance)) / 2
            position = np.where(distance <= threshold_line_width)[0]

            for p in xrange(len(position)):
                lines = np.delete(lines, (position[p]) * 2 - p * 2)
                lines = np.delete(lines, (position[p]) * 2 - p * 2)

            if lines[0] == 0:
                lines = np.delete(lines, 0)
                lines = np.delete(lines, 0)

            if lines[-1] == int(self.height):
                lines = np.delete(lines, len(lines) - 1)
                lines = np.delete(lines, len(lines) - 1)

            text_lines.append(lines)

        lines = self.get_same_line_position(text_lines)

        empty_line = [0] * size

        for l in lines:
            croped_images = []
            starts = l[0]
            ends = l[1]

            for i in xrange(len(starts)):
                croped_images.append(self.image[starts[i]:ends[i], i * size:(i + 1) * size])

            min_line = min(starts)
            max_line = max(ends)

            min_line_index = starts.index(min_line)
            max_line_index = ends.index(max_line)

            for k in xrange(self.NUMBER_OF_PART):
                if k != min_line_index:
                    for z in xrange(starts[k] - min_line):
                        croped_images[k] = np.insert(croped_images[k], 0, empty_line, axis=0)
                if k != max_line_index:
                    for z in xrange(max_line - ends[k]):
                        croped_images[k] = np.vstack([croped_images[k], empty_line])

            temp_image = croped_images[0]

            for ci in xrange(1, self.NUMBER_OF_PART):
                temp_image = np.concatenate((temp_image, croped_images[ci]), axis=1)
            # cv2.imwrite("bw_image_" + str(sys.argv[1].split("_")[2].split(".")[0]) + "_lines_" + str(self.line_index) + ".jpg", temp_image)
            self.line_index += 1

            self.detected_text_lines.append(temp_image)

        self.get_characters()
Beispiel #50
0
    input_filename = sys.argv[1]
    # read input and covert to grayscale
    img = cv2.imread(input_filename)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # invert image and thicken pixel lines
    th, threshed = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    threshed = cv2.dilate(threshed, kernel, iterations=3)
    threshed = cv2.erode(threshed, kernel, iterations=3)

    # deskew image
    rotated = straighten(threshed)

    # find and draw the upper and lower boundary of each lines
    hist = cv2.reduce(rotated, 1, cv2.REDUCE_AVG).reshape(-1)

    th = 2
    H, W = img.shape[:2]
    uppers = [y for y in range(H-1) if hist[y]<=th and hist[y+1]>th]
    lowers = [y for y in range(H-1) if hist[y]>th and hist[y+1]<=th]

    for i in range(min(len(uppers), len(lowers))):
        # isolate each line of text
        line = threshed[uppers[i]-BUF:lowers[i]+BUF, 0:W].copy()

        hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
        H, W = line.shape[:2]
        lefts = [x for x in range(W-1) if gap_left(hist, x, th)]
        rights = [x for x in range(W-1) if gap_right(hist, x, th, W)]
# http://stackoverflow.com/questions/34981144/split-text-lines-in-scanned-document/35014061
#

print cv2.__version__

img = cv2.imread('Afbeelding 2.png')

cv2.imshow('image',img)
#cv2.waitKey(0)
bin = (255 - img)
bin = cv2.cvtColor(bin,cv2.COLOR_BGR2GRAY)
pts = cv2.findNonZero(bin)

#box = cv2.minAreaRect(pts)

horProj = cv2.reduce(bin, 1, cv2.REDUCE_AVG)

horProj[horProj<=5.0] = 0

ycoords = []
y=0
count=0
isSpace=False

for i in xrange(0,horProj.size):
         if (not isSpace):
            if (not horProj[i]):
                isSpace=True
                count=1
                y=i
         else:
Beispiel #52
0
        pylab.savefig(os.path.join(outdir, 'reduce_r.png'))
    
    if 0:
        print 'writing to %s' % outdir
        img = cv2.imread(args.fn_in)
        print type(img)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        cv2.imwrite(os.path.join(outdir, 'reduce_01_gray.png'), gray)
        print type(gray)
        edges = cv2.Canny(gray, 125, 250, apertureSize=3)
        print type(edges)
        cv2.imwrite(os.path.join(outdir, 'reduce_02_edges.png'), edges)
        
        #test = cv2.cvtColor(edges)
        test = cv2.cv.GetMat(edges)
        rowr = cv2.reduce(edges, 0, cv2.cv.CV_REDUCE_SUM)
        colr = cv2.reduce(edges, 0, cv2.cv.CV_REDUCE_SUM)
    
        matplotlib.pyplot.clf()
        plt.subplot(211)
        plt.plot(rowr)
        plt.subplot(212)
        plt.plot(colr)
        pylab.savefig(os.path.join(outdir, 'reduce.png'))
        
        matplotlib.pyplot.clf()
        plt.plot(rowr)
        pylab.savefig(os.path.join(outdir, 'reduce_r.png'))

        matplotlib.pyplot.clf()
        plt.plot(colr)