def main(): ap = ArgumentParser() ap.add_argument('P') P = int(sys.argv[1]) tau = float(sys.argv[2]) delta = float(sys.argv[3]) pic = Picture(sys.argv[4]) prevBeads = BlobFinder(pic, tau).getBeads(P) # for every frame: for i in sys.argv[5:]: currBeads = BlobFinder(Picture(i), tau).getBeads(P) # for every bead in that frame: for currBead in range(len(currBeads)): # intialize shortest_dist with largest possible dimension: shortest_dist = max([pic.width(), pic.height()]) # search for currBead closest to prevBead: for v in range(min([len(currBeads), len(prevBeads)])): d = prevBeads[v].distanceTo(currBeads[currBead]) if d < shortest_dist: shortest_dist = d # confirm that displacement is within delta: if shortest_dist <= delta: # if yes, then show the distance: stdio.writef('%.4f\n', shortest_dist) stdio.writeln() prevBeads = currBeads
def main(): P = int(sys.argv[1]) tau = float(sys.argv[2]) delta = float(sys.argv[3]) for i in range(4,len(sys.argv) - 1): pic1 = Picture(sys.argv[i]) pic2 = Picture(sys.argv[i+1]) blobscan = BlobFinder(pic1,tau) blobscan2 = BlobFinder(pic2,tau) blob = blobscan.getBeads(P) blob2 = blobscan2.getBeads(P) for i in range(len(blob2)): least = 0.0000 first = True for j in range(len(blob)): b = blob2[i] bb = blob[j] distance = b.distanceTo(bb) if(distance <= delta and first): least = distance first = False elif(distance <= delta and distance < least): least = distance if(least>0): stdio.writef('%s\n', round(least,4)) stdio.write('\n')
def main(): # intialize P, tau, and delta. P, tau = int(sys.argv[1]), float(sys.argv[2]) delta = float(sys.argv[3]) firstPic = BlobFinder(Picture(sys.argv[4]), tau) prevBeads = firstPic.getBeads(P) for v in sys.argv[5:]: # construct blobfinder, currBeads, and a list of beads blobfinder = BlobFinder(Picture(v), tau) currBeads = blobfinder.getBeads(P) for currBead in currBeads: # map distances a = map(lambda x: currBead.distanceTo(x), prevBeads) # filter the list minDist = list(filter(lambda x: x <= delta, a)) # if the list isn't empty, write the min distance if minDist: minDist = min(minDist) stdio.writef('%.4f\n', minDist) # instialize prevBeads wiht currBeads prevBeads = currBeads stdio.writeln()
def main(): P = int(sys.argv[1]) tau = float(sys.argv[2]) delta = float(sys.argv[3]) files = sys.argv[4:] beads1 = BlobFinder(Picture(files[0]), tau).getBeads(P) for i in range(1, len(files)): beads0 = beads1 beads1 = BlobFinder(Picture(files[i]), tau).getBeads(P) for bead in beads0: dist = min(bead.distanceTo(other) for other in beads1) if dist <= delta: stdio.writef('%.4f\n', dist) stdio.writeln()
def __init__(self, param): self.param = param self.circle_size = 5 self.circle_thickness = 1 self.blob_finder = BlobFinder( threshold=self.param['tracking']['multiobj_threshold'], minArea=self.param['tracking']['min_area'], maxArea=self.param['tracking']['max_area'], )
def __init__(self): super(PuzzleBoxes, self).__init__() self.blob_finder = BlobFinder( threshold=self.param['tracking']['threshold'], minArea=self.param['tracking']['min_area'], maxArea=self.param['tracking']['max_area'], ) self.data_pub = rospy.Publisher('/puzzleboxes_data', PuzzleboxesData, queue_size=10)
def main(): P = int(sys.argv[1]) tau = float(sys.argv[2]) delta = float(sys.argv[3]) seq = sys.argv[4:] test = [] for i in range(1, len(seq)): pic = Picture(seq[i - 1]) bf = BlobFinder(pic, tau) beads = bf.getBeads(P) pic = Picture(seq[i]) bf = BlobFinder(pic, tau) beads2 = bf.getBeads(P) for q in beads2: for a in beads: test += [q.distanceTo(a)] if min(test) <= delta: stdio.writef('%.4f\n', min(test)) test = []
def main(): P = int(sys.argv[1]) tau = float(sys.argv[2]) delta = float(sys.argv[3]) bf = BlobFinder(Picture(sys.argv[4]), tau) prevBeads = bf.getBeads(P) for i in range(5, len(sys.argv)): bf = BlobFinder(Picture(sys.argv[i]), tau) currBeads = bf.getBeads(P) for currBead in currBeads: min_dist = float('inf') for prevBead in prevBeads: d = currBead.distanceTo(prevBead) if d <= delta and d < min_dist: min_dist = d if min_dist != float('inf'): stdio.writef('%.4f\n', min_dist) stdio.writeln() prevBeads = currBeads
def main(): P = int(sys.argv[1]) tau = float(sys.argv[2]) delta = float(sys.argv[3]) frame = BlobFinder(Picture(sys.argv[4]), tau) prevBeads = frame.getBeads(P) for i in range(5, len(sys.argv)): frame = BlobFinder(Picture(sys.argv[i]), tau) currBeads = frame.getBeads(P) for currBead in currBeads: distance = float('inf') for prevBead in prevBeads: d = currBead.distanceTo(prevBead) if d <= delta and d < distance: distance = d if distance != float('inf'): stdio.writef('%.4f\n', distance) stdio.writeln() prevBeads = currBeads
def run(self): blob_finder = BlobFinder( threshold=self.tracking_threshold, minArea=self.tracking_min_area, maxArea=self.tracking_max_area, ) while not rospy.is_shutdown(): try: new_image = self.image_queue.get_nowait() except Queue.Empty: continue current_time = rospy.Time.now().to_time() elapsed_time = current_time - self.start_time diff_image = cv2.absdiff(new_image, self.bg_image) blob_list, blob_image = blob_finder.find(diff_image) # DEVEL Temporary # -------------------------------- on_food = False # -------------------------------- if (elapsed_time >= self.pretrial_duration) and ( elapsed_time < self.pretrial_duration + self.experiment_duration): for scheduler in self.led_schedulers: scheduler.update(rospy.Time.now().to_time(), on_food) if elapsed_time > (self.pretrial_duration + self.experiment_duration + self.posttrial_duration): if not self.logger_killed: os.system('rosnode kill experiment_logger') self.logger_killed = True new_image_bgr = cv2.cvtColor(new_image, cv2.COLOR_GRAY2BGR) for region_dict in self.region_param: cx, cy = region_dict['food_pos'] x0 = int(cx - self.food_width / 2.0) y0 = int(cy - self.food_height / 2.0) x1 = int(cx + self.food_width / 2.0) y1 = int(cy + self.food_height / 2.0) cv2.rectangle(new_image_bgr, (x0, y0), (x1, y1), (0, 0, 255), 1) cv2.imshow('blob image', blob_image) cv2.imshow('walking arena 1d', new_image_bgr) cv2.waitKey(1)
def __init__(self): self.use_compressed = False rospy.init_node('disco_tracker') self.image_queue = Queue.Queue() self.bridge = CvBridge() self.roi = { 'x': (135, 485), 'y': (60, 405), } self.blob_finder = BlobFinder(threshold=20, min_area=2, max_area=200) if self.use_compressed: self.image_topic = '/raspicam_node/image/compressed' self.image_sub = rospy.Subscriber(self.image_topic, CompressedImage, self.on_image, queue_size=1) else: self.image_topic = '/raspicam_node/image' self.image_sub = rospy.Subscriber(self.image_topic, Image, self.on_image, queue_size=1)
def getAviFrameToSigDict(fileName, coord='x'): # Zeroing region - for removing frame counter number from image zeroN, zeroM = 100, 100 # Point tol ptTol = 4 # Blob finder parameters threshold = 50 filterByArea = True minArea = 20 maxArea = None blobFinder = BlobFinder(threshold=threshold, filterByArea=filterByArea, minArea=minArea, maxArea=maxArea) # Read frames from video and get list of blob centroid cap = cv2.VideoCapture(fileName) frmNum = 0 numBlobTest = True frmToPtList = {} while (cap.isOpened()): ret, frame = cap.read() if not ret: break frmNum += 1 print('processing frame: {0}'.format(frmNum)) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray[:zeroN, :zeroM] = numpy.zeros((zeroN, zeroM)) blobList, blobImage = blobFinder.find(gray) ptList = [] for blob in blobList: if coord == 'x': val = blob['centroidX'] else: val = blob['centroidY'] ptList.append(val) frmToPtList[frmNum] = ptList cv2.imshow('frame', blobImage) if cv2.waitKey(2) & 0xff == ord('q'): break # ----------------------------------------------- # Temp - for development, stop and examine frame # ----------------------------------------------- #if frmNum == 2086: # ans = raw_input('paused') cap.release() cv2.destroyAllWindows() # Get unique set of point values - based ptSet = set() for frmNum, ptList in frmToPtList.iteritems(): for val in ptList: found = False for pt in ptSet: if abs(pt - val) < ptTol: found = True if not found: ptSet.add(val) print(ptSet) if len(ptSet) > 3: raise ValueError, 'more than three unique pts found' # Create pt to signal number dictionary ptList = list(ptSet) ptList.sort() ptToSigNum = dict([(x, ptList.index(x)) for x in ptList]) # Create frame number to signal dictionary frmToSig = {} for frmNum, ptList in frmToPtList.iteritems(): sig = [0, 0, 0] for x in ptList: closest = min([(abs(pt - x), sigNum) for pt, sigNum in ptToSigNum.iteritems()]) ind = closest[1] sig[ind] = 1 frmToSig[frmNum] = sig return frmToSig
def run(self): cap = cv2.VideoCapture(self.input_video_name) bg_model = MedianBackground( window_size=self.param['bg_window_size'], threshold=self.param['fg_threshold'] ) blob_finder = BlobFinder( filter_by_area=True, min_area=self.param['min_area'], max_area=self.param['max_area'], open_kernel_size = self.param['open_kernel_size'], close_kernel_size = self.param['close_kernel_size'], kernel_shape = self.param['kernel_shape'], #---------KJL 2017_12_15 min_interblob_spacing = self.param['min_interblob_spacing']) # Output files vid = None blob_fid = None if self.param['blob_file_name'] is not None: blob_fid = open(self.param['blob_file_name'], 'w') frame_count = -1 while True: print('frame count: {0}'.format(frame_count)) # Get frame, mask and convert to gray scale ret, frame = cap.read() if not ret: break frame_count += 1 frame = self.apply_datetime_mask(frame) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if frame_count == 0 and self.param['output_video_name'] is not None: vid = cv2.VideoWriter( self.param['output_video_name'], 0x00000021, # hack for cv2.VideoWriter_fourcc(*'MP4V') self.param['output_video_fps'], (frame.shape[1], frame.shape[0]), ) # Update background model bg_model.update(frame) if not bg_model.ready: continue # Find blobs and add data to blob file blob_list, blob_image, circ_image = blob_finder.find(frame,bg_model.foreground_mask) if vid is not None: vid.write(circ_image) if blob_fid is not None: frame_data = {'frame': frame_count, 'blobs' : blob_list} frame_data_json = json.dumps(frame_data) blob_fid.write('{0}\n'.format(frame_data_json)) # Display preview images if self.param['show_dev_images']: cv2.imshow('original',frame) cv2.imshow('background', bg_model.background) cv2.imshow('foreground mask', bg_model.foreground_mask) cv2.imshow('blob_image', blob_image) cv2.imshow('circ_image', circ_image) else: cv2.imshow('circ_image', circ_image) wait_key_val = cv2.waitKey(1) & 0xFF if wait_key_val == ord('q'): break # Clean up cap.release() cv2.destroyAllWindows() if vid is not None: vid.release() if blob_fid is not None: blob_fid.close()