def __init__(self, node_name, sub_topic, pub_topic): self.bridge = CvBridge() self.img_proc = ImagePreprocessor() # Publishers #self. rospy.init_node(node_name) # Crop parameters self.above = rospy.get_param("/asv_lane_track/lane_detection_node/above", 0.35) self.below = rospy.get_param("/asv_lane_track/lane_detection_node/below", 0.1) self.side = rospy.get_param("/asv_lane_track/lane_detection_node/side", 0) # Lane tracking parameters self.deviation = rospy.get_param("/asv_lane_track/lane_detection_node/deviation", 15) self.border = rospy.get_param("/asv_lane_track/lane_detection_node/border", 0) # Canny parameters self.low_thresh = rospy.get_param("/asv_lane_track/lane_detection_node/low_threshold", 50) self.high_thresh = rospy.get_param("/asv_lane_track/lane_detection_node/high_threshold", 150) self.aperture = rospy.get_param("/asv_lane_track/lane_detection_node/aperture", 3) self.image_sub = rospy.Subscriber(sub_topic, Image, self.img_callback) self.image_pub = rospy.Publisher(pub_topic, Image, queue_size=5) self.yellow_masked_pub = rospy.Publisher("/yellow_masked", Image, queue_size=5) self.gray_pub = rospy.Publisher("/test", Image, queue_size=5) self.blur_pub = rospy.Publisher("/blur", Image, queue_size=5) self.left_lines = deque(maxlen=QUEUE_LENGTH) self.right_lines = deque(maxlen=QUEUE_LENGTH) rospy.spin()
def Load(cls, file_path, rescale_in_preprocessing=False): training_data = numpy.load(file_path) image_input = training_data['image_data'] if rescale_in_preprocessing: for row in range(image_input.shape[0]): image_input[row, 0, :, :] = ImagePreprocessor.RescaleImageInput( image_input[row, 0, :, :]) else: image_input = ImagePreprocessor.NormalizeImageInput(image_input) ret = (image_input, training_data['chars']) del training_data.f training_data.close() return ret
def __init__(self): self.preprocessor = ImagePreprocessor() # each of these attributes that start with show_ will be # turned into booleans self.show_contours = (False, 'Show all found contours') self.show_toosmall = (False, 'Show too small contours') self.show_horizontal = (True, 'Show horizontal targets') self.show_vertical = (True, 'Show vertical targets') self.show_aspect = (False, 'Show aspect ratios') self.show_rectangularity = (False, 'Show rectangularity') self.show_hot = (True, 'Show hot targets') self.show_hot_text = (True, 'Show hot text')
def GenerateTrainingData(cls, captchas_dir, training_data_dir, max_size=500, max_captcha_length=8): image_shape = _GetShapeOfImagesUnderDir(captchas_dir) training_data_shape = tuple( [max_size] + list(ImagePreprocessor.GetProcessedImageShape(image_shape))) training_image_data = numpy.zeros(training_data_shape, dtype=numpy.float32) training_labels = numpy.zeros((max_size, max_captcha_length), dtype=numpy.int32) i = 0 for captcha_filepath in utils.GetFilePathsUnderDir(captchas_dir): try: image_data = ImagePreprocessor.GetImageData(captcha_filepath) except Exception as e: print e, captcha_filepath continue i += 1 index = i % max_size training_image_data[index] = ImagePreprocessor.ProcessImage( image_data) captcha_ids = _GetCaptchaIdsFromImageFilename(captcha_filepath) training_labels[index, :] = numpy.zeros(max_captcha_length, dtype=numpy.int32) training_labels[index, :captcha_ids.shape[0]] = captcha_ids if i != 0 and (i % 1000) == 0: print 'Generated {0} examples.'.format(i) if i != 0 and i % max_size == 0: print i file_path = os.path.join( training_data_dir, "training_images_{0}.npy".format(i / max_size)) try: cls.Save(file_path, training_image_data, training_labels) except Exception as e: print e
def get_dataset(self, image_dir): """ :param image_dir: :return: """ preporcessor = ImagePreprocessor(resize_ratio=.75) dataset = Mot17Tracking(root=image_dir, transform=preporcessor) return dataset
def get(self, images_dir): # Market150 dataset size is 64 width, height is 128, so we maintain the aspect ratio # NOTE: for some reason oly 224 / 224 works, any other shape results in NAN dataset = Market1501TripletDataset(images_dir, min_img_size_h=256, min_img_size_w=128) processor = ImagePreprocessor(min_img_size_h=dataset.min_img_size_h, min_img_size_w=dataset.min_img_size_w, original_height=dataset.original_height, original_width=dataset.original_width) dataset.preprocessor = processor return dataset
class BackDetector(object): ''' Detects objects in the back of the robot... which is what's pointing at the hot goal when the autonomous mode starts This code is essentially a translation of the 2014 Vision Sample into OpenCV/Python ''' def __init__(self): self.preprocessor = ImagePreprocessor() # each of these attributes that start with show_ will be # turned into booleans self.show_contours = (False, 'Show all found contours') self.show_toosmall = (False, 'Show too small contours') self.show_horizontal = (True, 'Show horizontal targets') self.show_vertical = (True, 'Show vertical targets') self.show_aspect = (False, 'Show aspect ratios') self.show_rectangularity = (False, 'Show rectangularity') self.show_hot = (True, 'Show hot targets') self.show_hot_text = (True, 'Show hot text') def ratioToScore (self, ratio): return float(max(0, min(100.0*(1.0-float(abs(1.0-float(ratio)))), 100.0))) def scoreRectangularity(self, contour): x, y, w, h = cv2.boundingRect(contour) if float(w) * float(h) != 0: return 100.0* cv2.contourArea(contour)/(w * h) else: return 0 def scoreAspectRatio(self, width, height, vertical): if vertical: idealAspectRatio = 4.0/32.0 else: idealAspectRatio = 23.5/4.0 #determine the long and shortsides of the rectangle if float(width) > float(height): rectLong = float(width) rectShort = float(height) elif(height >= width): rectLong = float(height) rectShort = float(width) if width > height: aspectRatio = self.ratioToScore(float(rectLong)/float(rectShort)/float(idealAspectRatio)) else: aspectRatio = self.ratioToScore(float(rectShort)/float(rectLong)/float(idealAspectRatio)) return aspectRatio def scoreCompare(self, vertical, rectangularity, verticalAspectRatio, horizontalAspectRatio): isTarget = True #rectangularityLimit = 40 #aspectRatioLimit = 55 # on a smaller image, I'm willing to fudge a bit rectangularityLimit = 30 aspectRatioLimit = 40 isTarget = isTarget and rectangularity > rectangularityLimit if vertical == True: isTarget = isTarget and verticalAspectRatio > aspectRatioLimit else: isTarget = isTarget and horizontalAspectRatio > aspectRatioLimit return isTarget def hotOrNot(self, tTapeWidthScore, tVerticalScore, tLeftScore, tRightScore): isHot = True tape_Width_Limit = 10 vertical_Score_Limit = 50 lr_Score_Limit = 50 isHot = isHot and tTapeWidthScore >= tape_Width_Limit isHot = isHot and tVerticalScore >= vertical_Score_Limit isHot = isHot and (tLeftScore > lr_Score_Limit or tRightScore > lr_Score_Limit) return isHot def print_text(self, img, text, x, y, color=(255,255,255)): # TODO: could do some fancy layout here.. cv2.putText(img, text, (int(x+5),int(y+5)), cv2.FONT_HERSHEY_PLAIN, 2, color, bottomLeftOrigin=False) def minAreaRect(self, contour): x, y, w, h = cv2.boundingRect(contour) ((cx, cy), (rw, rh), rotation) = cv2.minAreaRect(contour) # sometimes minAreaRect decides to rotate the rectangle too much. # detect that and fix it. if (w > h and rh < rw) or (h > w and rw < rh): rh, rw = rw, rh # swap rotation = -90.0 - rotation return (x, y, w, h, cx, cy, rw, rh, rotation) def process_image(self, img): p = [] vertical_targets = [] horizontal_targets=[] show_toosmall = self.show_toosmall toosmall = [] isHotLeft = False isHotRight = False # colors FOUND_CONTOURS = (0,255,255) TOOSMALL_CONTOURS = (255, 0, 255) HORIZ_CONTOURS = (255, 255, 0) VERTICAL_CONTOURS = (255, 0, 0) #IDENTIFIED_COLOR = (44,0,232) HOT_COLOR = (0, 0, 255) img, processed_img = self.preprocessor.process_image(img) contours, hierarchy = cv2.findContours(processed_img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS) if self.show_contours: cv2.drawContours(img, contours, -1, FOUND_CONTOURS, thickness=1) for contour in contours: #p = cv2.approxPolyDP(contour, 45, False) # filtering smaller contours from pictures if cv2.contourArea(contour) >= 40: p.append(contour) elif cv2.contourArea(contour) < 40: if show_toosmall: toosmall.append(contour) continue #getting lengths of sides of rectangle around the contours x, y, w, h, cx, cy, rw, rh, rotation = self.minAreaRect(contour) #score rectangularity rectangularity = self.scoreRectangularity(contour) # score aspect ratio vertical verticalAspectRatio = self.scoreAspectRatio(w, h, vertical=True) # score aspect ratio horizontal horizontalAspectRatio = self.scoreAspectRatio(w, h, vertical=False) # determine if horizontal if self.show_rectangularity: self.print_text(img, '%.1f' % (rectangularity), cx, cy) if self.show_aspect: self.print_text(img, '%.1f, %.1f' % (verticalAspectRatio, horizontalAspectRatio), cx, cy) if self.scoreCompare(False, rectangularity, verticalAspectRatio, horizontalAspectRatio): horizontal_targets.append(contour) elif self.scoreCompare(True, rectangularity, verticalAspectRatio, horizontalAspectRatio): vertical_targets.append(contour) # store vertical targets in vertical array, horizontal targets in horizontal array # Match up the targets to each other #final targets array declaration if show_toosmall: cv2.drawContours(img, toosmall, -1, TOOSMALL_CONTOURS, thickness=1) if self.show_horizontal: cv2.drawContours(img, horizontal_targets, -1, HORIZ_CONTOURS, thickness=1) if self.show_vertical: cv2.drawContours(img, vertical_targets, -1, VERTICAL_CONTOURS, thickness=1) #tTotalScore = tLeftScore = tRightScore = tTapeWidthScore = tVerticalScore = 0.0 hotTargets = [] # for each vertical target for vertical_target in vertical_targets: x1, y1, w1, h1, cx1, cy1, rw, rh, rotation = self.minAreaRect(vertical_target) # for each horizontal target for horizontal_target in horizontal_targets: # measure equivalent rectangle sides x2, y2, w2, h2, cx2, cy2, rw, rh, rotation = self.minAreaRect(horizontal_target) #a, b, w2, h2 = cv2.boundingRect(horizontal_target) # determine if horizontal target is in expected location # -> to the right rightScore = self.ratioToScore(1.2*(float(cx2) - float(x1) - float(w1))/float(w2)) # -> to the left leftScore = self.ratioToScore(1.2*(float(x1) - float(cx2))/ float(w2)) # determine if the tape width is the same tapeWidthScore = self.ratioToScore(float(w1)/float(h2)) # determine if vertical location of horizontal target is correct verticalScore = self.ratioToScore(1.0-(float(y2) - cy2)/(4.0*h2)) total = max(leftScore,rightScore) total = total + tapeWidthScore + verticalScore # if the targets match up enough, store it in an array of potential matches #if (total >= tTotalScore): # tHorizontalIndex = horizontal_target # tVerticalIndex = vertical_target # tTotalScore = total # tLeftScore = leftScore # tRightScore = rightScore # tTapeWidthScore = tapeWidthScore # tVerticalScore = verticalScore #else: # continue # for the potential matched targets #possibleHTarget = self.hotOrNot(tTapeWidthScore, tVerticalScore, tLeftScore, tRightScore) #print(tTapeWidthScore, tVerticalScore, tLeftScore, tRightScore) # # actually, the best target doesn't matter. just determine whether # it's hot to the left or hot to the right. That's all we *really* care about # if self.hotOrNot(tapeWidthScore, verticalScore, leftScore, rightScore): if rightScore > leftScore: isHotLeft = True if self.show_hot_text: self.print_text(img, "R", cx2, cy1, HOT_COLOR) else: if self.show_hot_text: self.print_text(img, "L", cx1, cy2, HOT_COLOR) isHotRight = True # TODO: left/right? hotTargets.append(horizontal_target) hotTargets.append(vertical_target) # determine if the target is hot or not #if len(horizontal_targets) == 0: # possibleHTarget = False #if len(vertical_targets) > 0: # if possibleHTarget == True: # print ("hot target Located") # isHot = True # elif possibleHTarget == False: # print ("hot target not Located") # isHot = True # determine the best target if self.show_hot: cv2.drawContours(img, hotTargets, -1, HOT_COLOR, thickness=1) # print out the data or something. #if (len(horizontal_targets) != 0): # if tHorizontalIndex is not None: # cv2.drawContours(img, [tHorizontalIndex,], -1, IDENTIFIED_COLOR, thickness=1) # if tVerticalIndex is not None: # cv2.drawContours(img, [tVerticalIndex,], -1, IDENTIFIED_COLOR, thickness=1) # TODO: return data for targeting and stuff return img, (isHotRight, isHotLeft)
class LaneDetector: def __init__(self, node_name, sub_topic, pub_topic): self.bridge = CvBridge() self.img_proc = ImagePreprocessor() # Publishers #self. rospy.init_node(node_name) # Crop parameters self.above = rospy.get_param("/asv_lane_track/lane_detection_node/above", 0.35) self.below = rospy.get_param("/asv_lane_track/lane_detection_node/below", 0.1) self.side = rospy.get_param("/asv_lane_track/lane_detection_node/side", 0) # Lane tracking parameters self.deviation = rospy.get_param("/asv_lane_track/lane_detection_node/deviation", 15) self.border = rospy.get_param("/asv_lane_track/lane_detection_node/border", 0) # Canny parameters self.low_thresh = rospy.get_param("/asv_lane_track/lane_detection_node/low_threshold", 50) self.high_thresh = rospy.get_param("/asv_lane_track/lane_detection_node/high_threshold", 150) self.aperture = rospy.get_param("/asv_lane_track/lane_detection_node/aperture", 3) self.image_sub = rospy.Subscriber(sub_topic, Image, self.img_callback) self.image_pub = rospy.Publisher(pub_topic, Image, queue_size=5) self.yellow_masked_pub = rospy.Publisher("/yellow_masked", Image, queue_size=5) self.gray_pub = rospy.Publisher("/test", Image, queue_size=5) self.blur_pub = rospy.Publisher("/blur", Image, queue_size=5) self.left_lines = deque(maxlen=QUEUE_LENGTH) self.right_lines = deque(maxlen=QUEUE_LENGTH) rospy.spin() def process(self, img): cropped = self.img_proc.crop(img, self.above, self.below, self.side) mask = cropped yellow_mask = self.img_proc.select_yellow(mask) yellow_img = cv2.bitwise_and(cropped, cropped, mask=yellow_mask) gray = self.img_proc.convert_gray(yellow_img) smooth_gray = self.img_proc.blur(gray, (self.deviation, self.deviation), self.border) edges = self.img_proc.edge_detection(smooth_gray, self.low_thresh, self.high_thresh, self.aperture) lines = self.img_proc.hough_lines(edges) # print(lines) return self.img_proc.draw_lines_from_points(cropped, lines) # left_line, right_line = self.img_proc.lane_lines(cropped, lines) # # def mean_line(line, lines): # if line is not None: # lines.append(line) # # if len(lines) > 0: # line = np.mean(lines, axis=0, dtype=np.int32) # line = tuple(map(tuple, line)) # make sure it's tuples not numpy array for cv2.line to work # return line # # left_line = mean_line(left_line, self.left_lines) # right_line = mean_line(right_line, self.right_lines) # # middle_line = ((int(left_line[0][0] + (right_line[0][0] - left_line[0][0]) / 2), int(left_line[0][1])), # (int(left_line[1][0] + (right_line[1][0] - left_line[1][0]) / 2), int(left_line[1][1]))) # return self.img_proc.draw_lane_lines(cropped, (left_line, right_line, middle_line)) def img_callback(self, data): try: cv_img = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: rospy.logerr(e) img_proc = cv_img # crop cropped = self.img_proc.crop(img_proc, self.above, self.below, self.side) # grayscale #gray = self.img_proc.grayscale(cropped) # Extract yellow color in hsv yellow_masked = self.img_proc.select_yellow(cropped) res = cv2.bitwise_and(cropped,cropped, mask = yellow_masked) # blur # uhm why do you choose blurred = self.img_proc.blur(res, (self.deviation, self.deviation), self.border) # canny canny = self.img_proc.edge_detection(blurred, self.low_thresh, self.high_thresh, self.aperture) # # cv2.imshow("canny", canny) # # cv2.waitKey(25) # #canny # lines = self.img_proc.hough_lines(canny) # left_line, right_line = self.img_proc.lane_lines(cv_img, lines) # final = self.img_proc.draw_lane_lines(cv_img, (left_line, right_line)) # for line in lines: # self.img_proc.draw_lane_lines() lane_img = self.process(cv_img) try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(lane_img, encoding="rgb8")) self.gray_pub.publish(self.bridge.cv2_to_imgmsg(canny)) #cv2.imshow('mask', cv_img) # cv2.imshow('mask', res) # #cv2.waitKey(25) #self.yellow_masked_pub.publish(self.bridge.cv2_to_imgmsg(yellow_masked, encoding="rgb8")) except CvBridgeError as e: rospy.logerr(e)
class BackDetector(object): ''' Detects objects in the back of the robot... which is what's pointing at the hot goal when the autonomous mode starts This code is essentially a translation of the 2014 Vision Sample into OpenCV/Python ''' def __init__(self): self.preprocessor = ImagePreprocessor() # each of these attributes that start with show_ will be # turned into booleans self.show_contours = (False, 'Show all found contours') self.show_toosmall = (False, 'Show too small contours') self.show_horizontal = (True, 'Show horizontal targets') self.show_vertical = (True, 'Show vertical targets') self.show_aspect = (False, 'Show aspect ratios') self.show_rectangularity = (False, 'Show rectangularity') self.show_hot = (True, 'Show hot targets') self.show_hot_text = (True, 'Show hot text') def ratioToScore(self, ratio): return float( max(0, min(100.0 * (1.0 - float(abs(1.0 - float(ratio)))), 100.0))) def scoreRectangularity(self, contour): x, y, w, h = cv2.boundingRect(contour) if float(w) * float(h) != 0: return 100.0 * cv2.contourArea(contour) / (w * h) else: return 0 def scoreAspectRatio(self, width, height, vertical): if vertical: idealAspectRatio = 4.0 / 32.0 else: idealAspectRatio = 23.5 / 4.0 #determine the long and shortsides of the rectangle if float(width) > float(height): rectLong = float(width) rectShort = float(height) elif (height >= width): rectLong = float(height) rectShort = float(width) if width > height: aspectRatio = self.ratioToScore( float(rectLong) / float(rectShort) / float(idealAspectRatio)) else: aspectRatio = self.ratioToScore( float(rectShort) / float(rectLong) / float(idealAspectRatio)) return aspectRatio def scoreCompare(self, vertical, rectangularity, verticalAspectRatio, horizontalAspectRatio): isTarget = True #rectangularityLimit = 40 #aspectRatioLimit = 55 # on a smaller image, I'm willing to fudge a bit rectangularityLimit = 30 aspectRatioLimit = 40 isTarget = isTarget and rectangularity > rectangularityLimit if vertical == True: isTarget = isTarget and verticalAspectRatio > aspectRatioLimit else: isTarget = isTarget and horizontalAspectRatio > aspectRatioLimit return isTarget def hotOrNot(self, tTapeWidthScore, tVerticalScore, tLeftScore, tRightScore): isHot = True tape_Width_Limit = 10 vertical_Score_Limit = 50 lr_Score_Limit = 50 isHot = isHot and tTapeWidthScore >= tape_Width_Limit isHot = isHot and tVerticalScore >= vertical_Score_Limit isHot = isHot and (tLeftScore > lr_Score_Limit or tRightScore > lr_Score_Limit) return isHot def print_text(self, img, text, x, y, color=(255, 255, 255)): # TODO: could do some fancy layout here.. cv2.putText(img, text, (int(x + 5), int(y + 5)), cv2.FONT_HERSHEY_PLAIN, 2, color, bottomLeftOrigin=False) def minAreaRect(self, contour): x, y, w, h = cv2.boundingRect(contour) ((cx, cy), (rw, rh), rotation) = cv2.minAreaRect(contour) # sometimes minAreaRect decides to rotate the rectangle too much. # detect that and fix it. if (w > h and rh < rw) or (h > w and rw < rh): rh, rw = rw, rh # swap rotation = -90.0 - rotation return (x, y, w, h, cx, cy, rw, rh, rotation) def process_image(self, img): p = [] vertical_targets = [] horizontal_targets = [] show_toosmall = self.show_toosmall toosmall = [] isHotLeft = False isHotRight = False # colors FOUND_CONTOURS = (0, 255, 255) TOOSMALL_CONTOURS = (255, 0, 255) HORIZ_CONTOURS = (255, 255, 0) VERTICAL_CONTOURS = (255, 0, 0) #IDENTIFIED_COLOR = (44,0,232) HOT_COLOR = (0, 0, 255) img, processed_img = self.preprocessor.process_image(img) contours, hierarchy = cv2.findContours(processed_img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS) if self.show_contours: cv2.drawContours(img, contours, -1, FOUND_CONTOURS, thickness=1) for contour in contours: #p = cv2.approxPolyDP(contour, 45, False) # filtering smaller contours from pictures if cv2.contourArea(contour) >= 40: p.append(contour) elif cv2.contourArea(contour) < 40: if show_toosmall: toosmall.append(contour) continue #getting lengths of sides of rectangle around the contours x, y, w, h, cx, cy, rw, rh, rotation = self.minAreaRect(contour) #score rectangularity rectangularity = self.scoreRectangularity(contour) # score aspect ratio vertical verticalAspectRatio = self.scoreAspectRatio(w, h, vertical=True) # score aspect ratio horizontal horizontalAspectRatio = self.scoreAspectRatio(w, h, vertical=False) # determine if horizontal if self.show_rectangularity: self.print_text(img, '%.1f' % (rectangularity), cx, cy) if self.show_aspect: self.print_text( img, '%.1f, %.1f' % (verticalAspectRatio, horizontalAspectRatio), cx, cy) if self.scoreCompare(False, rectangularity, verticalAspectRatio, horizontalAspectRatio): horizontal_targets.append(contour) elif self.scoreCompare(True, rectangularity, verticalAspectRatio, horizontalAspectRatio): vertical_targets.append(contour) # store vertical targets in vertical array, horizontal targets in horizontal array # Match up the targets to each other #final targets array declaration if show_toosmall: cv2.drawContours(img, toosmall, -1, TOOSMALL_CONTOURS, thickness=1) if self.show_horizontal: cv2.drawContours(img, horizontal_targets, -1, HORIZ_CONTOURS, thickness=1) if self.show_vertical: cv2.drawContours(img, vertical_targets, -1, VERTICAL_CONTOURS, thickness=1) #tTotalScore = tLeftScore = tRightScore = tTapeWidthScore = tVerticalScore = 0.0 hotTargets = [] # for each vertical target for vertical_target in vertical_targets: x1, y1, w1, h1, cx1, cy1, rw, rh, rotation = self.minAreaRect( vertical_target) # for each horizontal target for horizontal_target in horizontal_targets: # measure equivalent rectangle sides x2, y2, w2, h2, cx2, cy2, rw, rh, rotation = self.minAreaRect( horizontal_target) #a, b, w2, h2 = cv2.boundingRect(horizontal_target) # determine if horizontal target is in expected location # -> to the right rightScore = self.ratioToScore( 1.2 * (float(cx2) - float(x1) - float(w1)) / float(w2)) # -> to the left leftScore = self.ratioToScore(1.2 * (float(x1) - float(cx2)) / float(w2)) # determine if the tape width is the same tapeWidthScore = self.ratioToScore(float(w1) / float(h2)) # determine if vertical location of horizontal target is correct verticalScore = self.ratioToScore(1.0 - (float(y2) - cy2) / (4.0 * h2)) total = max(leftScore, rightScore) total = total + tapeWidthScore + verticalScore # if the targets match up enough, store it in an array of potential matches #if (total >= tTotalScore): # tHorizontalIndex = horizontal_target # tVerticalIndex = vertical_target # tTotalScore = total # tLeftScore = leftScore # tRightScore = rightScore # tTapeWidthScore = tapeWidthScore # tVerticalScore = verticalScore #else: # continue # for the potential matched targets #possibleHTarget = self.hotOrNot(tTapeWidthScore, tVerticalScore, tLeftScore, tRightScore) #print(tTapeWidthScore, tVerticalScore, tLeftScore, tRightScore) # # actually, the best target doesn't matter. just determine whether # it's hot to the left or hot to the right. That's all we *really* care about # if self.hotOrNot(tapeWidthScore, verticalScore, leftScore, rightScore): if rightScore > leftScore: isHotLeft = True if self.show_hot_text: self.print_text(img, "R", cx2, cy1, HOT_COLOR) else: if self.show_hot_text: self.print_text(img, "L", cx1, cy2, HOT_COLOR) isHotRight = True # TODO: left/right? hotTargets.append(horizontal_target) hotTargets.append(vertical_target) # determine if the target is hot or not #if len(horizontal_targets) == 0: # possibleHTarget = False #if len(vertical_targets) > 0: # if possibleHTarget == True: # print ("hot target Located") # isHot = True # elif possibleHTarget == False: # print ("hot target not Located") # isHot = True # determine the best target if self.show_hot: cv2.drawContours(img, hotTargets, -1, HOT_COLOR, thickness=1) # print out the data or something. #if (len(horizontal_targets) != 0): # if tHorizontalIndex is not None: # cv2.drawContours(img, [tHorizontalIndex,], -1, IDENTIFIED_COLOR, thickness=1) # if tVerticalIndex is not None: # cv2.drawContours(img, [tVerticalIndex,], -1, IDENTIFIED_COLOR, thickness=1) # TODO: return data for targeting and stuff return img, (isHotRight, isHotLeft)
def _GetShapeOfImagesUnderDir(captchas_dir): for captcha_filepath in utils.GetFilePathsUnderDir(captchas_dir): image_data = ImagePreprocessor.GetImageData(captcha_filepath) return image_data.shape return None