コード例 #1
0
    def run(self):

        blob_finder = BlobFinder(
            threshold=self.tracking_threshold,
            minArea=self.tracking_min_area,
            maxArea=self.tracking_max_area,
        )

        while not rospy.is_shutdown():

            try:
                new_image = self.image_queue.get_nowait()
            except Queue.Empty:
                continue

            current_time = rospy.Time.now().to_time()
            elapsed_time = current_time - self.start_time

            diff_image = cv2.absdiff(new_image, self.bg_image)
            blob_list, blob_image = blob_finder.find(diff_image)

            # DEVEL Temporary
            # --------------------------------
            on_food = False
            # --------------------------------

            if (elapsed_time >= self.pretrial_duration) and (
                    elapsed_time <
                    self.pretrial_duration + self.experiment_duration):
                for scheduler in self.led_schedulers:
                    scheduler.update(rospy.Time.now().to_time(), on_food)

            if elapsed_time > (self.pretrial_duration +
                               self.experiment_duration +
                               self.posttrial_duration):
                if not self.logger_killed:
                    os.system('rosnode kill experiment_logger')
                    self.logger_killed = True

            new_image_bgr = cv2.cvtColor(new_image, cv2.COLOR_GRAY2BGR)
            for region_dict in self.region_param:
                cx, cy = region_dict['food_pos']
                x0 = int(cx - self.food_width / 2.0)
                y0 = int(cy - self.food_height / 2.0)
                x1 = int(cx + self.food_width / 2.0)
                y1 = int(cy + self.food_height / 2.0)
                cv2.rectangle(new_image_bgr, (x0, y0), (x1, y1), (0, 0, 255),
                              1)

            cv2.imshow('blob image', blob_image)
            cv2.imshow('walking arena 1d', new_image_bgr)
            cv2.waitKey(1)
コード例 #2
0
class BaseObjectFinder(object):
    def __init__(self, param):
        self.param = param
        self.circle_size = 5
        self.circle_thickness = 1
        self.blob_finder = BlobFinder(
            threshold=self.param['tracking']['multiobj_threshold'],
            minArea=self.param['tracking']['min_area'],
            maxArea=self.param['tracking']['max_area'],
        )

    def update(self, frame_gray):
        blob_list, blob_image = self.blob_finder.find(frame_gray)
        #cv2.imshow('blob_image', blob_image)
        #cv2.waitKey(1)
        return self.find_objects(frame_gray, blob_list)
コード例 #3
0
class PuzzleBoxes(PuzzleBoxesBase):
    def __init__(self):
        super(PuzzleBoxes, self).__init__()
        self.blob_finder = BlobFinder(
            threshold=self.param['tracking']['threshold'],
            minArea=self.param['tracking']['min_area'],
            maxArea=self.param['tracking']['max_area'],
        )
        self.data_pub = rospy.Publisher('/puzzleboxes_data',
                                        PuzzleboxesData,
                                        queue_size=10)

    def process_frame(self, frame_data):

        ros_time_now = frame_data['ros_time_now']
        current_time = frame_data['current_time']
        elapsed_time = frame_data['elapsed_time']
        image = frame_data['image']
        diff_image = frame_data['diff_image']

        blob_list, blob_image = self.blob_finder.find(diff_image)
        #cv2.imshow('image', image)
        #cv2.imshow('bg', self.bg_image)
        #cv2.imshow('diff', diff_image)
        ##cv2.imshow('blob', blob_image)
        #cv2.waitKey(1)
        ## Devel
        ## -----------------------------------------------------------------------------
        #rospy.logwarn('len(blob_list) = {}'.format(len(blob_list)))
        ## -----------------------------------------------------------------------------

        tracked_objects = []
        for i, blob in enumerate(blob_list):
            obj = TrackedObject()  # Replace this with simple class
            obj.position.x = blob['centroidX']
            obj.position.y = blob['centroidY']
            obj.size = blob['area']
            #rospy.logwarn('  {},  {}'.format(i, blob['area']))
            tracked_objects.append(obj)
        self.process_regions(ros_time_now, elapsed_time, tracked_objects)
        #bgr_image = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)

        if self.visualizer_on:
            visualizer_data = {
                'elapsed_time': elapsed_time,
                'bgr_image': frame_data['bgr_image'],
                'trial_scheduler': self.trial_scheduler,
                'tracking_region_list': self.tracking_region_list,
            }

            if self.single_threaded:
                self.region_visualizer.update(visualizer_data)
            else:
                self.region_visualizer_queue.put(visualizer_data)

    def process_regions(self, ros_time_now, elapsed_time, tracked_objects):
        led_enabled = self.trial_scheduler.led_enabled
        msg = PuzzleboxesData()
        msg.header.stamp = ros_time_now
        msg.elapsed_time = elapsed_time
        msg.region_data_list = []
        msg.led_enabled = led_enabled
        msg.queue_overflow = self.queue_overflow
        msg.queue_size = self.image_queue.qsize()
        for tracking_region in self.tracking_region_list:
            region_data = tracking_region.update(elapsed_time, tracked_objects,
                                                 led_enabled)
            msg.region_data_list.append(region_data)
        self.data_pub.publish(msg)
コード例 #4
0
def getAviFrameToSigDict(fileName, coord='x'):

    # Zeroing region  - for removing frame counter number from image
    zeroN, zeroM = 100, 100

    # Point tol
    ptTol = 4

    # Blob finder parameters
    threshold = 50
    filterByArea = True
    minArea = 20
    maxArea = None
    blobFinder = BlobFinder(threshold=threshold,
                            filterByArea=filterByArea,
                            minArea=minArea,
                            maxArea=maxArea)

    # Read frames from video and get list of blob centroid
    cap = cv2.VideoCapture(fileName)
    frmNum = 0
    numBlobTest = True
    frmToPtList = {}
    while (cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        frmNum += 1
        print('processing frame: {0}'.format(frmNum))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray[:zeroN, :zeroM] = numpy.zeros((zeroN, zeroM))
        blobList, blobImage = blobFinder.find(gray)
        ptList = []
        for blob in blobList:
            if coord == 'x':
                val = blob['centroidX']
            else:
                val = blob['centroidY']
            ptList.append(val)
        frmToPtList[frmNum] = ptList

        cv2.imshow('frame', blobImage)
        if cv2.waitKey(2) & 0xff == ord('q'):
            break
        # -----------------------------------------------
        # Temp - for development, stop and examine frame
        # -----------------------------------------------
        #if frmNum == 2086:
        #    ans = raw_input('paused')
    cap.release()
    cv2.destroyAllWindows()

    # Get unique set of point values - based
    ptSet = set()
    for frmNum, ptList in frmToPtList.iteritems():
        for val in ptList:
            found = False
            for pt in ptSet:
                if abs(pt - val) < ptTol:
                    found = True
            if not found:
                ptSet.add(val)

    print(ptSet)
    if len(ptSet) > 3:
        raise ValueError, 'more than three unique pts found'

    # Create pt to signal number dictionary
    ptList = list(ptSet)
    ptList.sort()
    ptToSigNum = dict([(x, ptList.index(x)) for x in ptList])

    # Create  frame number to signal dictionary
    frmToSig = {}
    for frmNum, ptList in frmToPtList.iteritems():
        sig = [0, 0, 0]
        for x in ptList:
            closest = min([(abs(pt - x), sigNum)
                           for pt, sigNum in ptToSigNum.iteritems()])
            ind = closest[1]
            sig[ind] = 1
        frmToSig[frmNum] = sig
    return frmToSig
コード例 #5
0
class DiscoTracker(object):
    def __init__(self):
        self.use_compressed = False
        rospy.init_node('disco_tracker')
        self.image_queue = Queue.Queue()
        self.bridge = CvBridge()
        self.roi = {
            'x': (135, 485),
            'y': (60, 405),
        }
        self.blob_finder = BlobFinder(threshold=20, min_area=2, max_area=200)

        if self.use_compressed:
            self.image_topic = '/raspicam_node/image/compressed'
            self.image_sub = rospy.Subscriber(self.image_topic,
                                              CompressedImage,
                                              self.on_image,
                                              queue_size=1)
        else:
            self.image_topic = '/raspicam_node/image'
            self.image_sub = rospy.Subscriber(self.image_topic,
                                              Image,
                                              self.on_image,
                                              queue_size=1)

    def on_image(self, msg):
        self.image_queue.put(msg)

    def run(self):
        queue_get_count = 0
        frame_proc_count = 0
        t0 = time.time()

        while not rospy.is_shutdown():
            image_msg = None
            while True:
                try:
                    image_msg = self.image_queue.get_nowait()
                    queue_get_count += 1
                except Queue.Empty:
                    break
            if image_msg is None:
                continue
            t1 = time.time()
            dt = t1 - t0
            t0 = t1
            #print('ok: {}, {}'.format(dt, 1/dt))

            if self.use_compressed:
                image_cmp = np.fromstring(image_msg.data, np.uint8)
                image_bgr = jpeg.JPEG(image_cmp).decode()
            else:
                image_bgr = self.bridge.imgmsg_to_cv2(image_msg,
                                                      desired_encoding='bgr8')

            n0, n1 = self.roi['x']
            m0, m1 = self.roi['y']
            image_bgr = image_bgr[m0:m1, n0:n1]
            image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)

            if frame_proc_count == 0:
                self.blob_finder.bg_image = image
            blob_list, blob_image = self.blob_finder.find(image)

            try:
                max_blob = max([(blob['area'], blob) for blob in blob_list])[1]
            except ValueError:
                max_blob = None

            if max_blob is not None:
                print('cx: {}, cy: {}'.format(max_blob['cx'], max_blob['cy']))
            else:
                print('None')

            dropped_frames = queue_get_count - frame_proc_count - 1
            dropped_fraction = float(dropped_frames) / float(queue_get_count)
            frame_proc_count += 1
            #print('{}, {}, {:1.3f}'.format(frame_proc_count, queue_get_count, dropped_fraction))

            if frame_proc_count % 1 == 0:
                cv2.imshow('image', blob_image)
                cv2.waitKey(1)
コード例 #6
0
    def run(self):

        cap = cv2.VideoCapture(self.input_video_name)

        bg_model = MedianBackground(
                window_size=self.param['bg_window_size'],
                threshold=self.param['fg_threshold']
                )

        blob_finder = BlobFinder(
                filter_by_area=True, 
                min_area=self.param['min_area'], 
                max_area=self.param['max_area'],
                open_kernel_size = self.param['open_kernel_size'],
                close_kernel_size = self.param['close_kernel_size'],
                kernel_shape = self.param['kernel_shape'],
		#---------KJL 2017_12_15
                min_interblob_spacing = self.param['min_interblob_spacing'])

        # Output files
        vid = None  
        blob_fid = None

        if self.param['blob_file_name'] is not None:
            blob_fid = open(self.param['blob_file_name'], 'w')

        frame_count = -1

        while True:

            print('frame count: {0}'.format(frame_count))

            # Get frame, mask and convert to gray scale
            ret, frame = cap.read()
            if not ret:
                break
            frame_count += 1

            frame = self.apply_datetime_mask(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if frame_count == 0 and self.param['output_video_name'] is not None:
                vid = cv2.VideoWriter(
                        self.param['output_video_name'],
                        0x00000021,    # hack for cv2.VideoWriter_fourcc(*'MP4V')
                        self.param['output_video_fps'],
                        (frame.shape[1], frame.shape[0]),
                        )


            # Update background model 
            bg_model.update(frame)
            if not bg_model.ready:
                continue

            # Find blobs and add data to blob file
            blob_list, blob_image, circ_image = blob_finder.find(frame,bg_model.foreground_mask)

            if vid is not None:
                vid.write(circ_image)

            if blob_fid is not None:
                frame_data = {'frame': frame_count, 'blobs' : blob_list} 
                frame_data_json = json.dumps(frame_data)
                blob_fid.write('{0}\n'.format(frame_data_json))

            # Display preview images
            if self.param['show_dev_images']:
                cv2.imshow('original',frame)
                cv2.imshow('background', bg_model.background)
                cv2.imshow('foreground mask', bg_model.foreground_mask)
                cv2.imshow('blob_image', blob_image)
                cv2.imshow('circ_image', circ_image)
            else:
                cv2.imshow('circ_image', circ_image)
                
            wait_key_val = cv2.waitKey(1) & 0xFF
            if wait_key_val == ord('q'):
                break
            
        # Clean up
        cap.release()
        cv2.destroyAllWindows()

        if vid is not None:
            vid.release()

        if blob_fid is not None:
            blob_fid.close()