def find_bins(self, frame, min_area, max_area, debug_frame): # empty variables discovered_bins = [] # Find contours of every shape present after threshold contours, hierarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # if there are enough contours for at least one bin if len(contours) > 1: cv2.drawContours(debug_frame, contours, -1, (255, 100, 255), 1) for n, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if DEBUG_BIN_IDENTIFICATION: print "for bin{}: area={}".format(n, area) if min_area < area < max_area: #approximate raw contour points to a simpler polygon with less points approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if DEBUG_BIN_IDENTIFICATION: print "for bin{}: len={}".format(n, len(approx)) if 2 <= len(approx) < 12 and (0.8 < aspect_ratio < 1.2): p1, p2, p3, p4 = (tuple(box[0]), tuple(box[1]), tuple(box[2]), tuple(box[3])) # instantiate a new bin new_bin = Bin(p1, p2, p3, p4) new_bin.id = self.recent_id new_bin.area = area # print "new bin created with slope: ", new_bin.line_slope #print -theta # if theta != 0: # new_bin.theta = np.pi*(-theta)/180 # else: # new_bin.theta = 0 self.recent_id += 1 discovered_bins.append(new_bin) print "found {} contours. {} are bins.".format(len(contours), len(discovered_bins)) return discovered_bins
def find_bins(self, frame, min_area, max_area, debug_frame): # empty variables discovered_bins = [] # Find contours of every shape present after threshold contours, hierarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # if there are enough contours for at least one bin if len(contours) > 1: cv2.drawContours(debug_frame, contours, -1, (255, 100, 255), 1) for n, cnt in enumerate(contours): #print "analyzing contour{}".format(n) #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w #if DEBUG_BIN_IDENTIFICATION: #print "for contour{}: area={}".format(n,area) if min_area < area < max_area: #approximate raw contour points to a simpler polygon with less points approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case #print "for bin{}: len={}".format(n,len(approx)) #print "for bin{}: aspect ratio={}".format(n,aspect_ratio) if 2 <= len(approx) < 12 and (0.8 < aspect_ratio < 1.2): p1,p2,p3,p4 = (tuple(box[0]), tuple(box[1]), tuple(box[2]), tuple(box[3])) # instantiate a new bin new_bin = Bin(p1,p2,p3,p4) new_bin.id = self.recent_id new_bin.area = area # print "new bin created with slope: ", new_bin.line_slope #print -theta # if theta != 0: # new_bin.theta = np.pi*(-theta)/180 # else: # new_bin.theta = 0 self.recent_id += 1 discovered_bins.append(new_bin) print "found {} contours. {} are bins.".format(len(contours), len(discovered_bins)) return discovered_bins
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] self.failed_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if ((1/self.ratio_range[1]) < aspect_ratio < (1/self.ratio_range[0]) or self.ratio_range[0] < aspect_ratio < self.ratio_range[1]): new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() #populate self.output with infos self.output.found = False if len(self.confirmed) > 0: self.output.found = True self.return_output() print self self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms: denoise and convert to hsv self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) # Separate the channels convenience later (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) # capture frames representing the effect of the adaptive threshold self.adaptive_frame = self.numpy_frame.copy() # Find contours of every shape present after threshold contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] # if there are enough contours for at least one bin if len(contours) > 1: cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: #approximate raw contour points to a simpler polygon with less points approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if 2 <= len(approx) < 12 and ( .4 < aspect_ratio < .6 or 1.8 < aspect_ratio < 2.2): new_bin = Bin(tuple(box[0]), tuple(box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area # print "new bin created with slope: ", new_bin.line_slope #print -theta # if theta != 0: # new_bin.theta = np.pi*(-theta)/180 # else: # new_bin.theta = 0 self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame) for bin in self.confirmed: print type(bin.patch) if (bin.patch.shape[1] != 0) and (bin.patch.shape[0] != 0): self.debug_stream("Patch" + str(bin.id), bin.patch) # svr.debug("Patch"+str(bin.id),libvision.cv2_to_cv(bin.patch)) print bin.id
def process_frame(self, frame): # Creation of frames self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) self.debug_numpy_frame = cv_to_cv2(self.debug_frame) # CV2 Transforms self.numpy_frame = cv_to_cv2(self.debug_frame) self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv.CV_BGR2HSV) [self.frame1, self.frame2, self.frame3] = numpy.dsplit( self.numpy_frame, 3) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 '''Temporarily converts the image to a cv frame to do the adaptive threshold ''' self.temp_cv2frame = cv2_to_cv(self.numpy_frame) cv.AdaptiveThreshold(self.temp_cv2frame, self.temp_cv2frame, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(self.temp_cv2frame, self.temp_cv2frame, kernel, 1) cv.Dilate(self.temp_cv2frame, self.temp_cv2frame, kernel, 1) self.adaptive_frame = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Copy(self.temp_cv2frame, self.adaptive_frame) '''Returns the frame to a cv2 image''' self.numpy_frame = cv_to_cv2(self.temp_cv2frame) '''Begins finding contours''' contours, hierarchy = cv2.findContours( self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours( self.numpy_frame, contours, -1, (255, 255, 255), 3) self.masks = [] pts = [] for h, cnt in enumerate(contours): mask = numpy.zeros(self.numpy_frame.shape, numpy.uint8) cv2.drawContours(mask, [cnt], 0, 255, -1) mean = cv2.mean(cv_to_cv2(self.debug_frame), mask=mask) self.masks.append(mask) hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = numpy.int0(box) new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id self.recent_id = self.recent_id + 1 self.raw_bins.append(new_bin) for pt in box: type(tuple(pt)) cv2.circle(self.numpy_frame, tuple( pt), 5, (255, 255, 255), -1, 8, 0) pts.append(pt) '''Removes bins that have centers too close to others (to prevent bins inside bins), and bins that are too small''' for bin1 in self.raw_bins[:]: for bin2 in self.raw_bins[:]: if bin1 in self.raw_bins and bin2 in self.raw_bins and math.fabs(bin1.midx - bin2.midx) < self.mid_sep and math.fabs(bin1.midy - bin2.midy) < self.mid_sep: if bin1.area < bin2.area: self.raw_bins.remove(bin1) elif bin2.area < bin1.area: self.raw_bins.remove(bin2) if bin1 in self.raw_bins and bin2 in self.raw_bins: if bin1.area < self.min_area: self.raw_bins.remove(bin1) if bin2.area < self.min_area and bin2 in self.raw_bins: self.raw_bins.remove(bin2) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.numpy_to_cv = cv2_to_cv(self.numpy_frame) self.debug_final_frame = cv2_to_cv(self.debug_numpy_frame) self.draw_bins() svr.debug("CV", self.debug_final_frame) svr.debug("CV2", self.numpy_to_cv) svr.debug("Adaptive", self.adaptive_frame)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms: denoise and convert to hsv self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) # Separate the channels convenience later (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) # capture frames representing the effect of the adaptive threshold self.adaptive_frame = self.numpy_frame.copy() # Find contours of every shape present after threshold contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] # if there are enough contours for at least one bin if len(contours) > 1: cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: #approximate raw contour points to a simpler polygon with less points approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if 2 <= len(approx) < 12 and (.4 < aspect_ratio < .6 or 1.8 < aspect_ratio < 2.2): new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area # print "new bin created with slope: ", new_bin.line_slope #print -theta # if theta != 0: # new_bin.theta = np.pi*(-theta)/180 # else: # new_bin.theta = 0 self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame) for bin in self.confirmed: print type(bin.patch) if (bin.patch.shape[1] != 0) and (bin.patch.shape[0] != 0): self.debug_stream("Patch" + str(bin.id), bin.patch) # svr.debug("Patch"+str(bin.id),libvision.cv2_to_cv(bin.patch)) print bin.id
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] self.failed_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if ((1 / self.ratio_range[1]) < aspect_ratio < (1 / self.ratio_range[0]) or self.ratio_range[0] < aspect_ratio < self.ratio_range[1]): new_bin = Bin(tuple(box[0]), tuple(box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() #populate self.output with infos self.output.found = False if len(self.confirmed) > 0: self.output.found = True self.return_output() print self self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh ) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if .4 < aspect_ratio < .6 or 1.8 < aspect_ratio < 2.2: new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area new_bin.theta = -theta self.recent_id += 1 self.raw_bins.append(new_bin) # Removes bins that have centers too close to others (to prevent bins inside bins) for bin1 in self.raw_bins[:]: for bin2 in self.raw_bins[:]: if bin1 is bin2: continue if bin1 in self.raw_bins and bin2 in self.raw_bins and \ math.fabs(bin1.midx - bin2.midx) < self.mid_sep and \ math.fabs(bin1.midy - bin2.midy) < self.mid_sep: if bin1.area < bin2.area: self.raw_bins.remove(bin1) elif bin2.area < bin1.area: self.raw_bins.remove(bin2) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)