def Extract_Text(img, cnts): global DimensioanlLines i = 0 ExtractedTextArea = [] RotatedText = [] for c in cnts: (x, y, w, h) = cv2.boundingRect(c) ar = w / float(h) crWidth = w / float(img.shape[1]) area = cv2.contourArea(c) if h > 5 and w > 4 and ar > 0.4 and 0.5 > crWidth > 0.001 and area > 20: roi = img[y:y + h + 1, x:x + w + 1].copy() Rotate_roi = img[y:y + h + 5, x:x + w + 5].copy() cv2.imwrite(make_dir_roi + "/segmentedtext" + str(i) + ".png", roi) BB = [x, y, w, h] new_img = TextsFeature.Paste(img, roi) OrientationAngle = ((Cognition.GetOrientation( DimensioanlLines, BB))) output_img_path, detected_text = TextsFeature.RotateByAngle( new_img, OrientationAngle, make_dir_rotate, i) p1 = Point2(x - 2, y - 2) p2 = Point2(x + w + 2, y + h + 2) DimText = DimensionalTexts() DimText.ExtractDimensionalText(DimText, detected_text, p1, p2, OrientationAngle) ExtractedTextArea.append(DimText) i += 1 return ExtractedTextArea, RotatedText
class ExtractedLines(): _rho = 0 _theta = 0 _p1 = Point2(0, 0) _p2 = Point2(0, 0) def __init__(self): self._rho = 0 self._theta = 0 self._p1 = Point2(0, 0) self._p2 = Point2(0, 0) def __eq__(self, other): tol = 1.0 return (fabs(self._rho - other._rho) < tol and fabs(self._theta - other._theta) < tol) def __hash__(self): return hash((self._rho, self._theta, self._p1, self._p2)) def __repr__(self): return "".join([ "line (rho =", str(self._rho), ", theta =", str(self._theta), ", Point1 = ", str(self._p1), ", Point2 =", str(self._p2), ")" ]) @staticmethod def ExtractLine(self, rho, theta, p1, p2): self._rho = rho self._theta = theta self._p1 = p1 self._p2 = p2
class ExtractedText(): _p1 = Point2(0, 0) _p2 = Point2(0, 0) _text = "" _cropedImg = np.zeros([100, 100, 3], dtype=np.uint8) def __init__(self): self._p1 = Point2(0, 0) self._p2 = Point2(0, 0) self._text = "" self._cropedImg = np.zeros([100, 100, 3], dtype=np.uint8) def __hash__(self): return hash((self._p1, self._p2, self._text, self._cropedImg)) def __repr__(self): return "".join([ "ExtractedText p1=", str(self._p1), ", p2 =", str(self._p2), ", DetectedText = ", str(self._text), ", PathOfImg =", str(self._cropedImg), ")" ]) @staticmethod def ExtractText(self, p1, p2, text, path): self._p1 = p1 self._p2 = p2 self._text = text self._cropedImg = path
def CheckNearbySegments(segment, EntityLines): s = segment.startPoint e = segment.endPoint newStartPoint = s newEndPoint = e for l in EntityLines: for ls in l: if 5 >= fabs(s.x - ls.startPoint.x) > 0 and 5 >= fabs( s.y - ls.startPoint.y) > 0: A1, B1, C1 = Cognition.LineCoefficients(s, e) A2, B2, C2 = Cognition.LineCoefficients( ls.startPoint, ls.endPoint) det = A1 * B2 - A2 * B1 if (det != 0): x = (B2 * C1 - B1 * C2) / det y = (A1 * C2 - A2 * C1) / det IntersectionPt = Point2(x, y) newStartPoint = IntersectionPt elif 5 >= fabs(s.x - ls.endPoint.x) > 0 and 5 >= fabs( s.y - ls.endPoint.y) > 0: A1, B1, C1 = Cognition.LineCoefficients(s, e) A2, B2, C2 = Cognition.LineCoefficients( ls.startPoint, ls.endPoint) det = A1 * B2 - A2 * B1 if (det != 0): x = (B2 * C1 - B1 * C2) / det y = (A1 * C2 - A2 * C1) / det IntersectionPt = Point2(x, y) newStartPoint = IntersectionPt for l in EntityLines: for ls in l: if 5 >= fabs(e.x - ls.endPoint.x) > 0 and 5 >= fabs( e.y - ls.endPoint.y) > 0: A1, B1, C1 = Cognition.LineCoefficients(s, e) A2, B2, C2 = Cognition.LineCoefficients( ls.startPoint, ls.endPoint) det = A1 * B2 - A2 * B1 if (det != 0): x = (B2 * C1 - B1 * C2) / det y = (A1 * C2 - A2 * C1) / det IntersectionPt = Point2(x, y) newEndPoint = IntersectionPt elif 5 >= fabs(e.x - ls.startPoint.x) > 0 and 5 >= fabs( e.y - ls.startPoint.y) > 0: A1, B1, C1 = Cognition.LineCoefficients(s, e) A2, B2, C2 = Cognition.LineCoefficients( ls.startPoint, ls.endPoint) det = A1 * B2 - A2 * B1 if (det != 0): x = (B2 * C1 - B1 * C2) / det y = (A1 * C2 - A2 * C1) / det IntersectionPt = Point2(x, y) newEndPoint = IntersectionPt newSegment = Line2(newStartPoint, newEndPoint) return newSegment
def Thickness(point1, point2, img): p1 = Point2(point1.x - 2, point1.y - 2) p2 = Point2(point2.x + 2, point2.y + 2) Image = img[p1.y:p2.y, p1.x:p2.x] gray = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) dist_img = cv2.distanceTransform(thresh, cv2.DIST_L2, 5) minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(dist_img) return maxVal
def CheckOverlapByLineSegment(B1_P1, B1_P2, B2_P1, B2_P2): Ap1 = Point2(B1_P1.x, B1_P1.y) Ap2 = Point2(B1_P2.x, B1_P1.y) Ap3 = Point2(B1_P2.x, B1_P2.y) Ap4 = Point2(B1_P1.x, B1_P2.y) Bp1 = Point2(B2_P1.x, B2_P1.y) Bp2 = Point2(B2_P2.x, B2_P1.y) Bp3 = Point2(B2_P2.x, B2_P2.y) Bp4 = Point2(B2_P1.x, B2_P2.y) A_Segments = [ Line2(Ap1, Ap2), Line2(Ap2, Ap3), Line2(Ap3, Ap4), Line2(Ap4, Ap1) ] B_Segments = [ Line2(Bp1, Bp2), Line2(Bp2, Bp3), Line2(Bp3, Bp4), Line2(Bp4, Bp1) ] for i in A_Segments: p1 = i.startPoint p2 = i.endPoint for j in B_Segments: p3 = j.startPoint p4 = j.endPoint Intersects = Cognition.CheckIfIntersectingLineSegment( p1, p2, p3, p4) if Intersects == True: return True return False
class ExtractedCircles(): _centre = Point2(0, 0) _radius = 0 def __init__(self): self._centre = Point2(0, 0) self._radius = 0 self._pixels = [] def __eq__(self, other): tol = 4.0 return (fabs(self._centre - other._centre) < tol and fabs(self._radius - other._radius) < tol) def __hash__(self): return hash((self._centre, self._radius)) def __repr__(self): return "".join([ "Circle (Centre =", str(self._centre), ", Radius =", str(self._radius), ")" ]) @staticmethod def ExtractCircle(self, centre, radius): self._centre = centre self._radius = radius
def ArrowHeadDirection(Feature_Manager): OriginalImg = Feature_Manager._ImageOriginal.copy() OutputImg = OriginalImg.copy() DimensionalLines = Feature_Manager._DetectedDimensionalLine ArrowHeadsList = Feature_Manager._DetectedArrowHead for dl in DimensionalLines: for ah in dl._ArrowHeads: p1 = ah._BoundingBoxP1 p2 = ah._BoundingBoxP2 TempImg = OriginalImg[p1.y:p2.y + 4, p1.x:p2.x + 4] TempImg = ImgTransform.ImgAspectResize(TempImg, 100, 100) cornerImg = TempImg.copy() gray = cv2.cvtColor(TempImg, cv2.COLOR_BGR2GRAY) corners = cv2.goodFeaturesToTrack(gray, 20, 0.09, 10, True) corners = np.int0(corners) cornerPts = [] xpts = [] ypts = [] for i in corners: x, y = i.ravel() p = Point2(x, y) xpts.append(x) ypts.append(y) cornerPts.append(p) cv2.circle(cornerImg, (x, y), 1, 255, -1) xc = np.mean(xpts) yc = np.mean(ypts) c = Point2(xc, yc) dictPt = {} for i in cornerPts: d = sqrt((c.x - i.x)**2 + (c.y - i.y)**2) dictPt[i] = d sortedDict = sorted(dictPt.items(), key=operator.itemgetter(1)) extremePt = sortedDict[len(sortedDict) - 1][0] cv2.circle(cornerImg, (extremePt.x, extremePt.y), 3, 255, -1) projectedPt = MathUtils.ProjectToLine2( dl._Leaders[0].startPoint, dl._Leaders[0].endPoint, extremePt) projectedCorner = MathUtils.ProjectToLine2( dl._Leaders[0].startPoint, dl._Leaders[0].endPoint, c) Direction = Cognition.GetDirection(projectedPt, projectedCorner) ah._Direction = Direction Cognition.AssignArrowHeadsDirection(ArrowHeadsList, ah._ArrowCenter, Direction) print(Direction)
def Detect(Feature_Manager): img = Feature_Manager._ImageOriginal.copy() gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret2, Threshold = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) kernel = np.ones((2, 2), np.uint8) blackhat = cv2.morphologyEx(Threshold, cv2.MORPH_BLACKHAT, kernel) InvThreshold = cv2.bitwise_not(Threshold) NewImg = InvThreshold - blackhat arrows_image = NewImg.copy() kernel = np.ones((3, 3), np.uint8) erosion = cv2.erode(arrows_image, kernel) dilated = cv2.dilate(erosion, kernel) _im, contour, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) empty_image = erosion.copy() empty_image.fill(0) ExtractedArrows = [] for i in range(0, len(contour)): area = cv2.contourArea(contour[i]) if (area > 35 and area <= 70): x, y, w, h = cv2.boundingRect(contour[i]) P1 = Point2(int(x - 3), int(y - 3)) P2 = Point2(int(x + w + 2), int(y + h + 2)) cv2.rectangle(img, (x - 3, y - 3), (x + w + 2, y + h + 2), (255, 0, 0), 1) M = cv2.moments(contour[i]) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) c = Point2(cx, cy) ar = ArrowHeads() ar.ExtractArrowHead(ar, P1, P2, c) ExtractedArrows.append(ar) cv2.circle(img, (cx, cy), 1, (0, 255, 0), 1) make_dir_root = Feature_Manager._RootDirectory cv2.imwrite(make_dir_root + "/Arrowheads_Extraction_Output.png", img) return ExtractedArrows, img
def HoughLineP(img, rho, theta, threshold, minLineLength, maxLineGap): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) canny = cv2.Canny(gray, 50, 150, apertureSize = 3) lines = cv2.HoughLinesP(canny, int(rho), float(theta), int(threshold), int(minLineLength), int(maxLineGap)) extractedLines = [] if None != lines.any(): for line in lines: for x1,y1,x2,y2 in line: p1 = Point2(x1,y1) p2 = Point2(x2,y2) ex = ExtractedLines() ex.ExtractLine(ex,rho, theta, p1, p2) extractedLines.append(ex) cv2.line(img, (x1,y1), (x2,y2), (0,0,255), 1) return extractedLines, img
def ProjectToLine2(p1, p2, p): v1 = (p2 - p1) denominator = v1.Dot(v1) if (fabs(denominator) < Constants.PRECISION): return p1 v2 = (p - p1) u = (v1.Dot(v2)) / denominator x = p1.x + (u * (p2.x - p1.x)) y = p1.y + (u * (p2.y - p1.y)) return Point2(x, y)
def CornerShiTomasai (img, numCorners, qualityOfCorner, minElucDistance, useHarris): image = img gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) corners = cv2.goodFeaturesToTrack(gray, numCorners, qualityOfCorner, minElucDistance, useHarrisDetector = useHarris) corners = np.int0(corners) points = [] for i in corners: x,y = i.ravel() cv2.circle(image,(x,y), 2, (0,60,255),-1) points.append(Point2(x, y)) return points,image
def GetOrientation(Dimensionallines, BB): text_P1 = Point2(BB[0] - 5, BB[1] - 5) text_P2 = Point2(BB[0] + BB[2] + 5, BB[1] - 5) text_P3 = Point2(BB[0] + BB[2] + 5, BB[1] + BB[3] + 5) text_P4 = Point2(BB[0] - 5, BB[1] + BB[3] + 5) line = Line2(text_P1, text_P3) up1 = Cognition.GetUParam(text_P1, line) up2 = Cognition.GetUParam(text_P3, line) if up2 > up1: Rect1_p1 = text_P1 Rect1_p2 = text_P3 else: Rect1_p1 = text_P3 Rect1_p2 = text_P1 Rect1_p3 = text_P2 Rect1_p4 = text_P4 OrientationAngle = 0 for i in Dimensionallines: for l in i._Leaders: line = l P1 = l.startPoint P2 = l.endPoint Rect2_p1 = Point2(P1.x - 6, P1.y - 6) Rect2_p2 = Point2(P2.x + 6, P2.y + 6) Rect2_p3 = Point2(P2.x + 6, P2.y - 6) Rect2_p4 = Point2(P1.x - 6, P1.y + 6) Rect1_Segments = [ Line2(Rect1_p1, Rect1_p2), Line2(Rect1_p2, Rect1_p3), Line2(Rect1_p3, Rect1_p4), Line2(Rect1_p4, Rect1_p1) ] Rect2_Segments = [ Line2(Rect2_p1, Rect2_p2), Line2(Rect2_p2, Rect2_p3), Line2(Rect2_p3, Rect2_p4), Line2(Rect2_p4, Rect2_p1) ] overlap = Cognition.CheckIfOverlapLineSegments( Rect1_Segments, Rect2_Segments) if overlap == True: OrientationAngle = Cognition.getAngleBetweenLineAndAxis( P1, P2) return OrientationAngle return OrientationAngle
def checkForVicinity(img, p1, p2): img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) pmid = Point2(int((p1.x + p2.x) / 2), int((p1.y + p2.y) / 2)) pixelPresent = 1 if img_thresh[(pmid.y) + 1, (pmid.x) + 1] == 0: pixelPresent += 1 if img_thresh[(pmid.y) - 1, (pmid.x) - 1] == 0: pixelPresent += 1 if img_thresh[(pmid.y) + 2, (pmid.x) + 2] == 0: pixelPresent += 1 if img_thresh[(pmid.y) - 2, (pmid.x) - 2] == 0: pixelPresent += 1 if pixelPresent == 4: if img_thresh[(pmid.y) + 3, (pmid.x) + 3] == 0 or img_thresh[(pmid.y) - 3, (pmid.x) - 3] == 0: pixelPresent += 1 return pixelPresent
def __init__(self): self._centre = Point2(0, 0) self._radius = 0 self._pixels = []
def Detect(Feature_Manager): lines = Feature_Manager._DetectedLine dimension = Feature_Manager._DetectedDimension supportLinesegments = [] entityLinesegments = [] dist_Img = Cognition.DistanceTransform(Feature_Manager._ImageCleaned) minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(dist_Img) for l in lines: for seg in l: PointsBetweenLine = SpecialLineSegments.PixelScanner( seg.startPoint, seg.endPoint, dist_Img) sLinesPoints = [] eLinePoints = [] for i in PointsBetweenLine: t = Cognition.CheckThicknessInVicinity( i[0], i[1], dist_Img) pt = Point2(i[0], i[1]) if t is None: continue if t <= 0.6 * maxVal: sLinesPoints.append(pt) else: eLinePoints.append(pt) sortSuppPts = sLinesPoints sortEntPts = eLinePoints if (len(sortSuppPts) > 1): startsupportPt = sortSuppPts[0] for i in range(0, len(sortSuppPts) - 1): p1 = sortSuppPts[i] p2 = sortSuppPts[i + 1] dist = p1.DistanceTo(p2) if dist > 30: Supportlinesegment = Line2(startsupportPt, p1) supportLinesegments.append(Supportlinesegment) startsupportPt = p2 Supportlinesegment = Line2(startsupportPt, p2) supportLinesegments.append(Supportlinesegment) if (len(sortEntPts) > 1): startentityPt = sortEntPts[0] elines = [] for i in range(0, len(sortEntPts) - 1): eline = [] p1 = sortEntPts[i] p2 = sortEntPts[i + 1] dist = p1.DistanceTo(p2) if dist > 2: entitylinesegment = Line2(startentityPt, p1) eline.append(entitylinesegment) entityLinesegments.append(eline) startentityPt = p2 entitylinesegment = Line2(startentityPt, p2) elines.append(entitylinesegment) entityLinesegments.append(elines) for d in dimension: SupportL = [] for a in d._DimensionalLines._ArrowHeads: BB = Cognition.SortCoordinates( [a._BoundingBoxP1, a._BoundingBoxP2]) bbMin = BB[0] bbMax = BB[1] direction = a._Direction if direction == "West": shortListedLines = [] SupportLSW = [] for ls in supportLinesegments: if bbMin.x + 3 > ls.startPoint.x and fabs( ls.endPoint.x - ls.startPoint.x) < 3 and fabs( ls.endPoint.y - ls.startPoint.y) > 5: shortListedLines.append(ls) for ls in shortListedLines: projectedPt = MathUtils.ProjectToLine2( ls.startPoint, ls.endPoint, bbMin) projectedDistance = bbMin.DistanceTo(projectedPt) if projectedDistance < 8: #<7 SupportLSW.append(ls) SupportL.append(SupportLSW) elif direction == "East": shortListedLines = [] SupportLSE = [] for ls in supportLinesegments: if bbMax.x - 3 < ls.startPoint.x and fabs( ls.endPoint.x - ls.startPoint.x) < 3 and fabs( ls.endPoint.y - ls.startPoint.y) > 5: shortListedLines.append(ls) for ls in shortListedLines: projectedPt = MathUtils.ProjectToLine2( ls.startPoint, ls.endPoint, bbMax) projectedDistance = bbMax.DistanceTo(projectedPt) if projectedDistance < 8: SupportLSE.append(ls) SupportL.append(SupportLSE) elif direction == "North": shortListedLines = [] SupportLSN = [] for ls in supportLinesegments: if bbMin.y + 3 > ls.startPoint.y and fabs( ls.endPoint.y - ls.startPoint.y) < 3 and fabs( ls.endPoint.x - ls.startPoint.x) > 5: shortListedLines.append(ls) for ls in shortListedLines: projectedPt = MathUtils.ProjectToLine2( ls.startPoint, ls.endPoint, bbMin) projectedDistance = bbMin.DistanceTo(projectedPt) if projectedDistance < 8: SupportLSN.append(ls) SupportL.append(SupportLSN) elif direction == "South": SupportLSS = [] shortListedLines = [] for ls in supportLinesegments: if bbMax.y - 3 < ls.startPoint.y and fabs( ls.endPoint.y - ls.startPoint.y) < 3 and fabs( ls.endPoint.x - ls.startPoint.x) > 5: shortListedLines.append(ls) for ls in shortListedLines: projectedPt = MathUtils.ProjectToLine2( ls.startPoint, ls.endPoint, bbMax) projectedDistance = bbMax.DistanceTo(projectedPt) if projectedDistance < 8: SupportLSS.append(ls) SupportL.append(SupportLSS) d._SupportLines = SupportL Feature_Manager._DetectedLine = entityLinesegments
def __init__(self): self._BoundingBoxP1 = Point2(0, 0) self._BoundingBoxP2 = Point2(0, 0) self._ArrowCenter = Point2(0, 0) self._Direction = ""
def __init__(self): self._rho = 0 self._theta = 0 self._p1 = Point2(0, 0) self._p2 = Point2(0, 0)
def __init__(self): self._Text = "AdityaIntwala" self._TextBoxP1 = Point2(0, 0) self._TextBoxP2 = Point2(0, 0) self._Orientation = 90
def __init__(self): self.startPoint = Point2(0, 0) self.endPoint = Point2(0, 0)
def ProximityCorrelation(Detection_Manager): TextBoxes = Detection_Manager._DetectedDimensionalText DimensionalLines = Detection_Manager._DetectedDimensionalLine TextBox_Midpoints = {} TextBox_Midpointslist = [] for i in TextBoxes: md_point = Cognition.MidPoint(i._TextBoxP1, i._TextBoxP2) TextBox_Midpoints[md_point] = i TextBox_Midpointslist.append(md_point) sortedTextBoxMidpoints = Cognition.SortCoordinates( TextBox_Midpointslist) sortedTextBoxPoints = [] for i in sortedTextBoxMidpoints: for j in TextBox_Midpoints.keys(): if int(i.x) == int(j.x) and int(i.y) == int(j.y): val = TextBox_Midpoints[j] sortedTextBoxPoints.append(val) DimensionalLine_Midpoints = {} DimensionalLine_Midpointslist = [] for DL in DimensionalLines: for i in DL._Leaders: P1 = Point2(i.startPoint.x - 6, i.startPoint.y - 6) P2 = Point2(i.endPoint.x + 6, i.endPoint.y + 6) md_point = Cognition.MidPoint(P1, P2) DimensionalLine_Midpoints[md_point] = DL DimensionalLine_Midpointslist.append(md_point) sortedLineMidpoints = Cognition.SortCoordinates( DimensionalLine_Midpointslist) sortedLinePoints = [] for i in sortedLineMidpoints: for j in DimensionalLine_Midpoints.keys(): if int(i.x) == int(j.x) and int(i.y) == int(j.y): val = DimensionalLine_Midpoints[j] sortedLinePoints.append(val) DimensionCorrelated = [] removedLs = [] for i in sortedTextBoxPoints: text_P1 = Point2(i._TextBoxP1.x - 3, i._TextBoxP1.y - 3) text_P2 = Point2(i._TextBoxP2.x + 3, i._TextBoxP1.y - 3) text_P3 = Point2(i._TextBoxP2.x + 3, i._TextBoxP2.y + 3) text_P4 = Point2(i._TextBoxP1.x - 3, i._TextBoxP2.y + 3) line = Line2(text_P1, text_P3) up1 = Cognition.GetUParam(text_P1, line) up2 = Cognition.GetUParam(text_P3, line) if up2 > up1: Rect1_p1 = text_P1 Rect1_p2 = text_P3 else: Rect1_p1 = text_P3 Rect1_p2 = text_P1 Rect1_p3 = text_P2 Rect1_p4 = text_P4 overlap = False for j in sortedLinePoints: if overlap != True: if j not in removedLs: for l in j._Leaders: line = l P1 = l.startPoint P2 = l.endPoint Rect2_p1 = Point2(P1.x - 6, P1.y - 6) Rect2_p2 = Point2(P2.x + 6, P2.y + 6) Rect2_p3 = Point2(P2.x + 6, P2.y - 6) Rect2_p4 = Point2(P1.x - 6, P1.y + 6) Rect1_Segments = [ Line2(Rect1_p1, Rect1_p2), Line2(Rect1_p2, Rect1_p3), Line2(Rect1_p3, Rect1_p4), Line2(Rect1_p4, Rect1_p1) ] Rect2_Segments = [ Line2(Rect2_p1, Rect2_p2), Line2(Rect2_p2, Rect2_p3), Line2(Rect2_p3, Rect2_p4), Line2(Rect2_p4, Rect2_p1) ] overlap = Cognition.CheckIfOverlapLineSegments( Rect1_Segments, Rect2_Segments) if overlap == True: removedLs.append(j) D = Dimensions() D.ExtractDimension(D, j, i) DimensionCorrelated.append(D) break else: break return DimensionCorrelated
def DimensionProximityCorrelation(Detection_Manager): DimensionCorrelated = [] Dimensionallines = Detection_Manager._DetectedDimensionalLine DimensionalText = Detection_Manager._DetectedDimensionalText for DT in DimensionalText: text_P1 = Point2(DT._TextBoxP1.x - 3, DT._TextBoxP1.y - 3) text_P2 = Point2(DT._TextBoxP2.x + 3, DT._TextBoxP1.y - 3) text_P3 = Point2(DT._TextBoxP2.x + 3, DT._TextBoxP2.y + 3) text_P4 = Point2(DT._TextBoxP1.x - 3, DT._TextBoxP2.y + 3) line = Line2(text_P1, text_P3) up1 = Cognition.GetUParam(text_P1, line) up2 = Cognition.GetUParam(text_P3, line) if up2 > up1: Rect1_p1 = text_P1 Rect1_p2 = text_P3 else: Rect1_p1 = text_P3 Rect1_p2 = text_P1 Rect1_p3 = text_P2 Rect1_p4 = text_P4 overlap = False for i in Dimensionallines: if overlap != True: for l in i._Leaders: line = l up1 = Cognition.GetUParam(l.startPoint, line) up2 = Cognition.GetUParam(l.endPoint, line) if up2 > up1: P1 = l.startPoint P2 = l.endPoint else: P1 = l.endPoint P2 = l.startPoint Rect2_p1 = Point2(P1.x - 6, P1.y - 6) Rect2_p2 = Point2(P2.x + 6, P2.y + 6) Rect2_p3 = Point2(P2.x + 6, P2.y - 6) Rect2_p4 = Point2(P1.x - 6, P1.y + 6) Rect1_Segments = [ Line2(Rect1_p1, Rect1_p2), Line2(Rect1_p2, Rect1_p3), Line2(Rect1_p3, Rect1_p4), Line2(Rect1_p4, Rect1_p1) ] Rect2_Segments = [ Line2(Rect2_p1, Rect2_p2), Line2(Rect2_p2, Rect2_p3), Line2(Rect2_p3, Rect2_p4), Line2(Rect2_p4, Rect2_p1) ] overlap = Cognition.CheckIfOverlapLineSegments( Rect1_Segments, Rect2_Segments) if overlap == True: D = Dimensions() D.ExtractDimension(D, i, DT) DimensionCorrelated.append(D) break else: break return DimensionCorrelated
def MidPoint(p1, p2): return Point2(int((p1.x + p2.x) / 2), int((p1.y + p2.y) / 2))
def __init__(self): self._p1 = Point2(0, 0) self._p2 = Point2(0, 0) self._text = "" self._cropedImg = np.zeros([100, 100, 3], dtype=np.uint8)
def __init__(self): self._ArrowHeads = ArrowHeads() self._Leaders = Line2(Point2(0, 0), Point2(2, 2))
def ToPoint2(self): return Point2(self.x, self.y)
def circleFiltering(DetectedCircles): ThresholdPixel = 4 RequiredCircles = [] UnwantedCircles = [] for i in range(0,len(DetectedCircles)): c1 = DetectedCircles[i] c1 = (int(c1[0]), int(c1[1]), int(c1[2])) if c1 not in UnwantedCircles: if c1 not in RequiredCircles: if len(RequiredCircles) != 0: IsInRequiredCircles = [] for j in RequiredCircles: if fabs(j[0]-c1[0]) < ThresholdPixel and fabs(j[1]-c1[1]) < ThresholdPixel and fabs(j[2]-c1[2]) < ThresholdPixel: IsInRequiredCircles.append(True) else: IsInRequiredCircles.append(False) if True in IsInRequiredCircles: continue else: nc = (int(c1[0]),int(c1[1]),int(c1[2])) RequiredCircles.append(nc) else: nc = (int(c1[0]),int(c1[1]),int(c1[2])) RequiredCircles.append(nc) for c in range(i+1,len(DetectedCircles)): c2 = DetectedCircles[c] c2 = (int(c2[0]), int(c2[1]), int(c2[2])) if c2 not in UnwantedCircles: if fabs(c1[0]-c2[0]) < ThresholdPixel and fabs(c1[1]-c2[1]) < ThresholdPixel and fabs(c1[2]-c2[2]) < ThresholdPixel: IsInRequiredCircles = [] if len(RequiredCircles) > 0: for j in RequiredCircles: if fabs(j[0]-c2[0]) < ThresholdPixel and fabs(j[1]-c2[1]) < ThresholdPixel and fabs(j[2]-c2[2]) < ThresholdPixel: IsInRequiredCircles.append(True) else: IsInRequiredCircles.append(False) if True in IsInRequiredCircles: continue else: nx = int((fabs(c1[0]+c2[0]))/2) ny = int((fabs(c1[1]+c2[1]))/2) nr = int((fabs(c1[2]+c2[2]))/2) nc = (int(nx),int(ny),int(nr)) RequiredCircles.append(nc) UnwantedCircles.append(c1) UnwantedCircles.append(c2) else: nx = int((fabs(c1[0]+c2[0]))/2) ny = int((fabs(c1[1]+c2[1]))/2) nr = int((fabs(c1[2]+c2[2]))/2) nc = (int(nx),int(ny),int(nr)) RequiredCircles.append(nc) UnwantedCircles.append(c1) UnwantedCircles.append(c2) else: IsInRequiredCircles = [] for j in RequiredCircles: if fabs(j[0]-c1[0]) < ThresholdPixel and fabs(j[1]-c1[1]) < ThresholdPixel and fabs(j[2]-c1[2]) < ThresholdPixel: IsInRequiredCircles.append(True) else: IsInRequiredCircles.append(False) if True in IsInRequiredCircles: continue else: nc = (int(c1[0]),int(c1[1]),int(c1[2])) RequiredCircles.append(nc) RequiredCirclesExtracted = [] for i in RequiredCircles: EC = ExtractedCircles() centre = Point2(i[0], i[1]) radius = i[2] EC.ExtractCircle(EC,centre, radius) RequiredCirclesExtracted.append(EC) return RequiredCirclesExtracted
def __init__(self): self._p1 = Point2(0, 0) self._p2 = Point2(0, 0) self._line = Line2(self._p1, self._p2) self._lineSegments = [] self._cornerPoints = []