Exemple #1
0
    def __init__(self, trackId):
        super(BeeTrack, self).__init__()

        ##! Name that gets shown on the screen for that bee
        self._name = ""

        ## Last detected bee position
        self._last_dectect = None

        ## The tracks ID
        self.trackId = trackId

        # Initialize the klaman filter
        self.dt = 1
        self.KF = kinematic_kf(dim=2,
                               order=2,
                               dt=self.dt,
                               dim_z=1,
                               order_by_dim=True)
        self.KF.R *= 2
        self.KF.Q = np.array(
            [[self.dt**4 / 4, self.dt**3 / 2, self.dt**4 / 2, 0, 0, 0],
             [self.dt**3 / 2, self.dt**2, self.dt**4, 0, 0, 0],
             [self.dt**3 / 1, self.dt**1, self.dt**1 / 2, 0, 0, 0],
             [0, 0, 0, self.dt**4 / 4, self.dt**3 / 2, self.dt**4 / 2],
             [0, 0, 0, self.dt**3 / 2, self.dt**2, self.dt**4],
             [0, 0, 0, self.dt**3 / 1, self.dt**1, self.dt**1 / 2]])

        # Keep track of the
        self.trace = deque(maxlen=get_config("MAX_BEE_TRACE_LENGTH"))

        # Amount of missed detetions
        self.skipped_frames = 0

        # Amount of frames processed with this track
        self.processed_frames = 0

        # A set if determined track/bee characteristics
        self.tags = set()
        self.reported_tags = set()

        # The first detection which created this track
        self.first_position = None

        # Whether the track is underneath of a group of bees
        self.in_group = False

        self.__tagCnts = {}
    def initialize(self: Thread):
        """! Initializes and the LoRaWAN transceiver
        Configures european channels and perform ABP connection
        """

        # this only works with a connected serial interface
        if self._ser:
            try:
                self._ser.close()
            except:
                pass

        # Put the transeiver into a defined state
        self._ser = Serial(get_config("RN2483A_USB_PORT"), 57600, timeout=60)
        self._sendCmd("sys reset")
        self._sendCmd("sys factoryRESET")
        self._sendCmd("radio set freq 868000000")

        # Get channels
        channel_config = get_config("LORAWAN_CHANNEL_CONFIG")

        # Calculate Duty Cycle limitation
        # The 868 band has a 1% cycle, rn2483 has a duty-cycle per channel
        # this means the duty cycle per channel has to be shorter than 1%
        # Instead of 99% off, the channels have a (100% - (1 / channel-count)) dty-cylce
        if get_config("LORAWAN_DISABLE_DUTY_CYCLE_CHECKS"):
            dty = 9
        else:
            dty = int((100 - (1 / len(channel_config))) * 10)

        # Introduce the channel setup to the mac layer
        for ch in channel_config:
            self._sendCmd("mac set ch freq %i %i" % (ch[0], ch[1]))
            self._sendCmd("mac set ch dcycle %i %i" % (ch[0], dty))
            self._sendCmd("mac set ch drrange %i %i %i" %
                          (ch[0], ch[2], ch[3]))
            self._sendCmd("mac set ch status %i on" % (ch[0], ))

        # Set connection details
        self._sendCmd("mac set devaddr %s" % (get_config("LORAWAN_DEVADDR"), ))
        self._sendCmd("mac set nwkskey %s" %
                      (get_config("LORAWAN_NET_SESSION_KEY"), ))
        self._sendCmd("mac set appskey %s" %
                      (get_config("LORAWAN_APP_SESSION_KEY"), ))
        self._sendCmd("mac set sync 34")

        # Save settings
        self._sendCmd("mac save")

        # Initiate the join process
        self._sendCmd("mac join abp")
        self._read()
Exemple #3
0
    def __init__(self,
                 dist_threshold,
                 max_frame_skipped,
                 frame_size=(960, 540)):
        """! Initializes the 'BeeTracker'
        """
        super(BeeTracker, self).__init__()
        self.dist_threshold = dist_threshold
        self.max_frame_skipped = max_frame_skipped
        self.trackId = 0
        self.tracks = []
        self.names = loadWomanNames()
        self._frame_height = frame_size[1]
        self._frame_width = frame_size[0]

        # Create random track colors
        self.track_colors = []
        for i in range(get_config("TRACK_COLOR_COUNT")):
            self.track_colors.append(
                (random.randint(100, 255), random.randint(100, 255),
                 random.randint(100, 255)))
    def run(self: Thread) -> None:
        """! The main thread that runs the 'ImageConsumer'
        """
        _process_time = 0
        _process_cnt = 0
        writer = None

        # Create a Bee Tracker
        tracker = BeeTracker(50, 20)

        # Create statistics object
        statistics = getStatistics()

        if type(self._imageQueue) == type(None):
            raise ("No image queue provided!")

        while not self.stopped:

            _start_t = time.time()

            # When the neural network is enabled, then read results from the classifcation queue
            # and forward them the the corresponding track and statistics
            if get_config("NN_ENABLE"):
                if _process_cnt % 100 == 0:
                    logger.debug("Process time(q): %0.3fms" %
                                 ((time.time() - _start_t) * 1000.0))

                # Populate classification results
                while not self._classifierResultQueue.empty():
                    if _process_cnt % 100 == 0:
                        logger.debug("Process time(nn): %0.3fms" %
                                     ((time.time() - _start_t) * 1000.0))

                    # Transfer results to the track
                    trackId, result, image = self._classifierResultQueue.get()
                    track = tracker.getTrackById(trackId)
                    if type(track) != type(None):
                        track.imageClassificationComplete(result)
                    else:
                        statistics.addClassificationResult(trackId, result)

            # Process every incoming image
            if not self._imageQueue.empty():
                _process_cnt += 1

                if _process_cnt % 100 == 0:
                    logger.debug("Process time(get): %0.3fms" %
                                 ((time.time() - _start_t) * 1000.0))

                # Get frame set
                fs = self._imageQueue.get()
                if get_config("NN_EXTRACT_RESOLUTION") == "EXT_RES_150x300":
                    img_1080, img_540, img_180 = fs
                elif get_config("NN_EXTRACT_RESOLUTION") == "EXT_RES_75x150":
                    img_540, img_180 = fs

                if _process_cnt % 100 == 0:
                    logger.debug("Process time(detec): %0.3fms" %
                                 ((time.time() - _start_t) * 1000.0))

                # Detect bees on smallest frame
                detected_bees, detected_bee_groups = detect_bees(img_180, 3)

                if _process_cnt % 100 == 0:
                    logger.debug("Process time(track): %0.3fms" %
                                 ((time.time() - _start_t) * 1000.0))

                # # Update tracker with detected bees
                if get_config("ENABLE_TRACKING"):
                    tracker.update(detected_bees, detected_bee_groups)

                # Extract detected bee images from the video, to use it our neural network
                # Scale is 2 because detection was made on img_540 but cutting is on img_1080
                if get_config("ENABLE_IMAGE_EXTRACTION"):
                    data = tracker.getLastBeePositions(
                        get_config("EXTRACT_FAME_STEP"))
                    if len(data) and type(self._extractQueue) != type(None):
                        if get_config(
                                "NN_EXTRACT_RESOLUTION") == "EXT_RES_150x300":
                            self._extractQueue.put((data, img_1080, 2))
                        elif get_config(
                                "NN_EXTRACT_RESOLUTION") == "EXT_RES_75x150":
                            self._extractQueue.put((data, img_540, 1))
                        else:
                            raise (
                                "Unknown setting for EXT_RES_75x150, expected EXT_RES_150x300 or EXT_RES_75x150"
                            )

                if _process_cnt % 100 == 0:
                    logger.debug("Process time(print): %0.3fms" %
                                 ((time.time() - _start_t) * 1000.0))

                # Draw preview if wanted
                if not get_args().noPreview:

                    draw_on = img_540.copy()
                    if get_config("DRAW_DETECTED_ELLIPSES"):
                        for item in detected_bees:
                            cv2.ellipse(draw_on, item, (0, 0, 255), 2)
                    if get_config("DRAW_DETECTED_GROUPS"):
                        for item in detected_bee_groups:
                            cv2.ellipse(draw_on, item, (255, 0, 0), 2)

                    if get_config("DRAW_TRACKING_RESULTS"):
                        tracker.drawTracks(draw_on)

                    skipKey = 1 if get_config("FRAME_AUTO_PROCESS") else 0

                    cv2.imshow("frame", draw_on)
                    if cv2.waitKey(skipKey) & 0xFF == ord('q'):
                        break

                    # Save as Video
                    if get_config("SAVE_AS_VIDEO"):
                        if type(writer) == type(None):
                            h, w, c = draw_on.shape
                            writer = cv2.VideoWriter(get_config("SAVE_AS_VIDEO_PATH"), \
                                    cv2.VideoWriter_fourcc(*'MJPG'), 18, (w, h))
                        writer.write(draw_on)

                # Print log entry about process time each 100 frames
                _process_time += time.time() - _start_t
                if _process_cnt % 100 == 0:
                    logger.debug("Process time: %0.3fms" %
                                 (_process_time * 10.0))
                    _process_time = 0

                # Limit FPS by delaying manually
                _end_t = time.time() - _start_t
                limit_time = 1 / get_config("LIMIT_FPS_TO")
                if _end_t < limit_time:
                    time.sleep(limit_time - _end_t)

                # Update statistics
                _dh = getStatistics()
                _dh.frameProcessed()

            else:
                time.sleep(0.1)

        self._done = True
        logger.info("Image Consumer stopped")
#
# @brief This module contains the 'ImageConsumer', which processes the video frames.
#
# @section authors Author(s)
# - Created by Fabian Hickert on december 2020
#
import cv2
import time
import logging
from Statistics import getStatistics
from threading import Thread
from ImageProvider import ImageProvider
from BeeDetection import detect_bees
from BeeTracking import BeeTracker, BeeTrack
from Utils import get_config, get_args
if get_config("NN_ENABLE"):
    from BeeClassification import BeeClassification

from multiprocessing import Queue

logger = logging.getLogger(__name__)


class ImageConsumer(Thread):
    """! The 'ImageConsumer' processes the frames which are provided
    by the 'ImageProvider'. It performs the bee detection, bee tracking and
    forwards findings to the 'ImageExtractor' to feed them to the neural network.
    """
    def __init__(self):
        """! Intitilizes the 'ImageConsumer'
        """
Exemple #6
0
    def _neuralN(q_in, q_out, ready, stopped):
        """! Static method, starts a new process that runs the neural network
        """

        # Include tensorflow within the process
        import tensorflow as tf
        from tensorflow import keras
        from tensorflow.keras import layers
        from tensorflow.keras.models import Sequential
        from tensorflow.keras import layers
        import signal

        # Ignore interrupts
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        _process_time = 0
        _process_cnt = 0

        # Enable growth of GPU usage
        config = tf.compat.v1.ConfigProto()
        config.gpu_options.allow_growth = True
        session = tf.compat.v1.InteractiveSession(config=config)

        # Load the model
        try:
            _model = tf.keras.models.load_model(get_config("NN_MODEL_FOLDER"))
        except Exception as e:
            ready.value = True
            logger.error("Failed to load Model: %s" % (e, ))
            return

        # Detect desired image size for classification
        img_height = 300
        img_width = 150
        if get_config("NN_EXTRACT_RESOLUTION") == "EXT_RES_75x150":
            img_height = 150
            img_width = 75

        # Initialize the network by using it
        # This ensures everything is preloaded when needed
        if True:

            # Load all images from the "Images" folder and feed them to the neural network
            # This ensures that the network is fully running when we start other processes
            test_images = [
                "Images/" + f for f in listdir("Images")
                if isfile(join("Images", f))
            ]
            imgs = []
            for item in test_images:
                img = tf.io.read_file(item)
                img = tf.image.decode_jpeg(img, channels=3)
                img = tf.image.resize(img, [img_height, img_width])
                imgs.append(img)

            # Perform prediction
            _model.predict_step(tf.convert_to_tensor(imgs))

        # Mark process as ready
        ready.value = True

        # Create folders to store images with positive results
        if get_config("SAVE_DETECTION_IMAGES"):
            for lbl in ["varroa", "pollen", "wasps", "cooling"]:
                s_path = get_config("SAVE_DETECTION_PATH")
                if not exists(join(s_path, lbl)):
                    makedirs(join(s_path, lbl))

        classify_thres = get_config("CLASSIFICATION_THRESHOLDS")
        while stopped.value == 0:

            # While the image classification queue is not empty
            # feed the images to the network and push the result
            # back in the outgoing queue
            if not q_in.empty():
                _start_t = time.time()
                _process_cnt += 1

                images = []
                tracks = []

                # Load the images from the in-queue and prepare them for the use in the network
                failed = False
                while not q_in.empty() and len(
                        images) < 20 and stopped.value == 0:
                    item = q_in.get()
                    t, img = item

                    # Change color from BGR to RGB
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    if img.shape != (img_height, img_width, 3):
                        img = tf.image.resize(img, [img_height, img_width])
                    images.append(img)
                    tracks.append(t)

                # Quit process if requested
                if stopped.value != 0:
                    return

                # Feed collected images to the network
                if len(tracks):
                    results = _model.predict_on_batch(
                        tf.convert_to_tensor(images))

                    # precess results
                    for num, track in enumerate(tracks):

                        # Create dict with results
                        entry = set([])
                        for lbl_id, lbl in enumerate(
                            ["varroa", "pollen", "wasps", "cooling"]):
                            if results[lbl_id][num][0] > classify_thres[lbl]:
                                entry.add(lbl)

                                # Save the corresponding image on disc
                                if get_config("SAVE_DETECTION_IMAGES"
                                              ) and lbl in get_config(
                                                  "SAVE_DETECTION_TYPES"):
                                    img = images[num]
                                    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                                    cv2.imwrite(get_config("SAVE_DETECTION_PATH") + "/%s/%i-%s.jpeg" % (lbl, _process_cnt, \
                                            datetime.now().strftime("%Y%m%d-%H%M%S")), img)

                        # Push results back
                        q_out.put((tracks[num], entry, images[num]))

                _end_t = time.time() - _start_t
                logger.debug(
                    "Process time: %0.3fms - Queued: %i, processed %i" %
                    (_end_t * 1000.0, q_in.qsize(), len(images)))
                _process_time += _end_t
            else:
                time.sleep(0.01)
        logger.info("Classifcation stopped")
    def __init__(self, video_source=None, video_file=None):
        """! Initializes the image provider process and queue
        """
        self.frame_config = None
        self._videoStream = None
        self._stopped = multiprocessing.Value('i', 0)
        self._started = multiprocessing.Value('i', 0)
        self._process = None

        # Validate the frame_config
        max_w = max_h = 0
        frame_config = get_frame_config()
        if not len(frame_config):
            raise BaseException(
                "At least one frame config has to be provided!")

        # Ensure that each item of the frame config has the same size or less as the previous one
        for num, item in enumerate(frame_config):
            if type(item[0]) != int:
                raise BaseException(
                    "Expected item 1 of frame_config %i to be integer" %
                    (num + 1, ))
            if type(item[1]) != int:
                raise BaseException(
                    "Expected item 2 of frame_config to be integer" %
                    (num + 1, ))
            if item[2] not in (cv2.IMREAD_COLOR, cv2.IMREAD_GRAYSCALE,
                               cv2.IMREAD_UNCHANGED):
                raise BaseException(
                    "Expected item 3 of frame_config to be one of cv2.IMREAD_COLOR, cv2.IMREAD_GRAYSCALE, cv2.IMREAD_UNCHANGED"
                )

            if max_w < item[0]:
                max_w = item[0]
            if max_h < item[1]:
                max_h = item[1]

        # Ensure that at least one source is defined
        if video_source is None and video_file is None:
            raise BaseException(
                "Either a video file or a video source id is required")

        # Prepare for reading from video file
        self.frame_config = frame_config
        if video_file is not None:
            self._queue = multiprocessing.Queue(
                maxsize=get_config("FRAME_SET_BUFFER_LENGTH_VIDEO"))
            vFile = Path(video_file)
            if not vFile.is_file():
                raise BaseException(
                    "The given file '%s' doesn't seem to be valid!" %
                    (video_file, ))
        else:
            self._queue = multiprocessing.Queue(
                maxsize=get_config("FRAME_SET_BUFFER_LENGTH_CAMERA"))

        self._process = multiprocessing.Process(
            target=self._imgProcess,
            args=(self._queue, frame_config, video_source, video_file,
                  self._stopped, self._started))
        self._process.start()
    def _imgProcess(q_out, config, video_source, video_file, stopped, started):

        # Ignore interrupts
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        # Open video stream
        if video_source == None:
            logger.info("Starting from video file input: %s" % (video_file, ))
            _videoStream = cv2.VideoCapture(video_file)
        else:
            logger.info("Starting from camera input")
            _videoStream = cv2.VideoCapture(video_source)
            w, h, f = get_config("CAMERA_INPUT_RESOLUTION")
            if f != None:
                fourcc = cv2.VideoWriter_fourcc(*f)
                _videoStream.set(cv2.CAP_PROP_FOURCC, fourcc)
            if w != None:
                _videoStream.set(cv2.CAP_PROP_FRAME_WIDTH, int(w))
            if h != None:
                _videoStream.set(cv2.CAP_PROP_FRAME_HEIGHT, int(h))

        _process_time = 0
        _process_cnt = 0
        _skipped_cnt = 0
        while stopped.value == 0:

            # Check if the queue is full
            if q_out.full():

                # If the queue is full, then report it
                if _skipped_cnt % 100 == 0:
                    logger.debug("Buffer reached %i" % (q_out.qsize(), ))
                time.sleep(get_config("FRAME_SET_FULL_PAUSE_TIME"))
                _skipped_cnt += 1
            else:

                # There is still space in the queue, get a frame and process it
                _start_t = time.time()
                (_ret, _frame) = _videoStream.read()

                if started.value == 0:
                    started.value = 1

                if _ret:

                    # Get the original shape
                    h, w, c = _frame.shape

                    # Convert the frame according to the given configuration.
                    # The image will be resized if necessary and converted into gray-scale
                    #  if needed.
                    fs = tuple()
                    for item in config:
                        width, height = _frame.shape[0:2]
                        if width != item[0] or height != item[1]:
                            _frame = cv2.resize(_frame, (item[1], item[0]))
                        if item[2] == cv2.IMREAD_GRAYSCALE:
                            tmp = cv2.cvtColor(_frame, cv2.COLOR_BGR2GRAY)
                            fs += (tmp, )
                        else:
                            fs += (_frame, )

                    # put the result in the outgoing queue
                    q_out.put(fs)

                    # Calculate the time needed to process the frame and print it
                    _process_time += time.time() - _start_t
                    _process_cnt += 1
                    if _process_cnt % 100 == 0:
                        logger.debug(
                            'FPS: %i (%i, %i)\t\t buffer size: %i' %
                            (100 / _process_time, w, h, q_out.qsize()))
                        _process_time = 0
                else:
                    logger.error("No frame received!")
                    stopped.value = 1

        # End of process reached
        logger.info("Image provider stopped")
Exemple #9
0
def main():

    # Check input format: camera or video file
    args = get_args()
    if args.video:
        logger.info("Starting on video file '%s'" % (args.video))
        imgProvider = ImageProvider(video_file=args.video)
    else:
        logger.info("Starting on camera input")
        imgProvider = ImageProvider(video_source=0)

    while (not (imgProvider.isStarted() or imgProvider.isDone())):
        time.sleep(1)

    if imgProvider.isDone():
        logger.error(
            "Aborted, ImageProvider did not start. Please see log for errors!")
        return

    # Enable bee classification process only when its enabled
    imgClassifier = None
    if get_config("NN_ENABLE"):
        imgClassifier = BeeClassification()

    # Create processes and connect message queues between them
    lorawan = None
    if get_config("RN2483A_LORA_ENABLE"):
        lorawan = LoRaWANThread()
    imgExtractor = ImageExtractor()
    imgConsumer = ImageConsumer()
    imgConsumer.setImageQueue(imgProvider.getQueue())
    if get_config("NN_ENABLE"):
        imgExtractor.setResultQueue(imgClassifier.getQueue())
        imgConsumer.setClassifierResultQueue(imgClassifier.getResultQueue())
    imgExtractor.setInQueue(imgConsumer.getPositionQueue())

    try:

        # Start the processes
        imgConsumer.start()
        imgExtractor.start()
        if lorawan is not None:
            lorawan.start()

        # Quit program if end of video-file is reached or
        # the camera got disconnected
        while True:
            time.sleep(0.01)
            if imgConsumer.isDone() or imgProvider.isDone():
                raise SystemExit(0)

    except (KeyboardInterrupt, SystemExit):

        # Tear down all running process to ensure that we don't get any zombies
        if lorawan is not None:
            lorawan.stop()
        imgProvider.stop()
        imgExtractor.stop()
        imgConsumer.stop()
        if imgClassifier:
            imgClassifier.stop()
            imgClassifier.join()
        imgExtractor.join()
        imgProvider.join()
Exemple #10
0
def detect_bees(frame, scale):

    # Helper method to calculate distance between to ellipses
    def near(p1,p2):
        return math.sqrt(math.pow(p1[0]-p2[0], 2) + math.pow(p1[1]-p2[1], 2))

    # Helper method to calculate the area of an ellipse
    def area(e1):
        return np.pi * e1[1][0] * e1[1][1]

    # Extract BGR and HSV channels
    b,g,r = cv2.split(frame)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    h,s,v = cv2.split(hsv)

    # Substract G and V
    o = 255 - (g - v)

    # Blur Image and perform a binary thresholding
    o = cv2.GaussianBlur(o, (9,9), 9)
    _, o = cv2.threshold(o, get_config("BINARY_THRESHOLD_VALUE"), \
            get_config("BINARY_THRESHOLD_MAX"), cv2.THRESH_BINARY)

    # Invert result
    o = 255 -o

    # Detect contours
    contours, hierarchy = cv2.findContours(o, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
    ellipses = []
    groups = []
    for i in range(len(contours)):

        # Only countours with more than five edges can fit an ellipse
        if(len(contours[i]) >= 5):

            # Fit ellipse
            e = cv2.fitEllipse(contours[i])

            # Skip too small detections
            if e[1][0] < 8 or e[1][1] < 8:
                continue

            # Only use ellipses with minium size
            ellipseArea = area(e)
            if ellipseArea > get_config("DETECT_ELLIPSE_AREA_MIN_SIZE") \
                    and ellipseArea < get_config("DETECT_ELLIPSE_AREA_MAX_SIZE"):

                # Scale ellipse to desired size
                e = ((e[0][0] * scale, e[0][1] * scale), (e[1][0] * scale, e[1][1] * scale), e[2])
                ellipses.append(e)
            elif ellipseArea > get_config("DETECT_GROUP_AREA_MIN_SIZE") and \
                    ellipseArea < get_config("DETECT_GROUP_AREA_MAX_SIZE"):

                # Scale ellipse to desired size
                e = ((e[0][0] * scale, e[0][1] * scale), (e[1][0] * scale, e[1][1] * scale), e[2])
                groups.append(e)

    # Merge nearby detection into one
    done = []
    skip = []
    solved = []
    for a in ellipses:

        # Find ellipses that are close to each other and store them as a group
        group = []
        for b in ellipses:

            # Skip self and already processed ellipes
            if (a,b) in done or (b,a) in done or a == b:
                continue
            done.append((a,b))

            # Calculate distance between both ellipses
            dist = near(a[0],b[0])
            if dist < 50:

                # Put them into the group
                if a not in group:
                    group.append(a)
                if b not in group:
                    group.append(b)

                # Remember which ellipses were processed
                if not a in skip:
                    skip.append(a)
                if not b in skip:
                    skip.append(b)

        # Isolate the ellipse with the biggest area
        if len(group):
            solved.append(max(group, key=area))

    # Merge isolated ellipses with remaining ones
    rest = list(filter(lambda x: x not in skip, ellipses))
    merged = rest + solved

    return merged, groups
    def extractor(in_q, out_q, stopped, done):
        """! Static method, starts the process of the image extractor
        """

        # Ignore interrupt signals
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        _process_time = 0
        _process_cnt = 0

        # Prepare save path
        e_path = get_config("SAVE_EXTRACTED_IMAGES_PATH")
        if get_config("SAVE_EXTRACTED_IMAGES") and not exists(e_path):
            makedirs(e_path)

        while stopped.value == 0:
            if not in_q.empty():

                _start_t = time.time()
                _process_cnt += 1

                # Read one entry from the process queue
                data, image, scale = in_q.get()

                # Extract the bees from the image
                for item in data:
                    trackId, lastPosition = item

                    # Extract the bee image and sharpness value of the image
                    img, sharpness = cutEllipseFromImage(
                        lastPosition, image, 0, scale)

                    # Check result, in some cases the result may be None
                    #  e.g. when the bee is close to the image border
                    if type(img) != type(None):

                        # Filter by minimum sharpness
                        if sharpness > get_config("EXTRACT_MIN_SHARPNESS"):

                            # Forward the image to the classification process (if its running)
                            if get_config("NN_ENABLE"):
                                if out_q.full():
                                    logger.debug("Classifier Queue full")
                                    # Remove oldest entry to add a new one
                                    out_q.get()
                                out_q.put((trackId, img))

                            # Save the image in case its requested
                            if get_config("SAVE_EXTRACTED_IMAGES"):
                                cv2.imwrite(
                                    e_path + "/%i-%s.jpeg" %
                                    (_process_cnt, datetime.datetime.now().
                                     strftime("%Y%m%d-%H%M%S")), img)

                _process_time += time.time() - _start_t

                # Print log entry about process time each 100 frames
                if _process_cnt % 100 == 0:
                    logger.debug("Process time: %0.3fms" %
                                 (_process_time * 10.0))
                    _process_time = 0

            else:
                time.sleep(0.01)

        # The process stopped
        logger.info("Image extractor stopped")
Exemple #12
0
    def drawTracks(self, frame):
        """! Draw the current tracker status on the given frame.
        Draw tracks, names, ids, groups, ... depending on configuration
        @param  frame   The frame to draw on
        @return The resulting frame
        """

        # Draw tracks and detections
        for j in range(len(self.tracks)):

            # Only Draw tracks that have more than one waypoints
            if len(self.tracks[j].trace) > 1:

                # Select a track color
                t_c = self.track_colors[self.tracks[j].trackId %
                                        len(self.track_colors)]

                # Draw marker that shows tracks underneath groups
                if get_config("DRAW_GROUP_MARKER") and self.tracks[j].in_group:
                    x = int(self.tracks[j].trace[-1][0])
                    y = int(self.tracks[j].trace[-1][1])
                    tl = (x - 30, y - 30)
                    br = (x + 30, y + 30)
                    cv2.rectangle(frame, tl, br, (0, 0, 0), 10)

                # Draw rectangle over last position
                if get_config("DRAW_RECTANGLE_OVER_LAST_POSTION"):
                    x = int(self.tracks[j].trace[-1][0])
                    y = int(self.tracks[j].trace[-1][1])
                    tl = (x - 10, y - 10)
                    br = (x + 10, y + 10)
                    cv2.rectangle(frame, tl, br, t_c, 1)

                # Draw trace
                if get_config("DRAW_TRACK_TRACE"):
                    for k in range(len(self.tracks[j].trace)):
                        x = int(self.tracks[j].trace[k][0])
                        y = int(self.tracks[j].trace[k][1])

                        if k > 0:
                            x2 = int(self.tracks[j].trace[k - 1][0])
                            y2 = int(self.tracks[j].trace[k - 1][1])
                            cv2.line(frame, (x, y), (x2, y2), t_c, 4)
                            cv2.line(frame, (x, y), (x2, y2), (0, 0, 0), 1)

                # Draw prediction
                if get_config("DRAW_TRACK_PREDICTION"):
                    x = int(self.tracks[j].last_predict[0])
                    y = int(self.tracks[j].last_predict[3])
                    cv2.circle(frame, (x, y), self.dist_threshold, (0, 0, 255),
                               1)

                # Draw velocity, acceleration
                if get_config("DRAW_ACCELERATION") or get_config(
                        "DRAW_VELOCITY"):
                    l_p = self.tracks[j].last_predict

                    l_px = int(l_p[0])
                    v_px = int(l_p[1]) * 10 + l_px
                    a_px = int(l_p[2]) * 10 + l_px
                    l_py = int(l_p[3])
                    v_py = int(l_p[4]) * 10 + l_py
                    a_py = int(l_p[5]) * 10 + l_py

                    if DRAW_VELOCITY:
                        cv2.line(frame, (l_px, l_py), (v_px, v_py),
                                 (255, 255, 255), 4)
                        cv2.line(frame, (l_px, l_py), (v_px, v_py), t_c, 2)

                    if DRAW_ACCELERATION:
                        cv2.line(frame, (l_px, l_py), (a_px, a_py),
                                 (255, 255, 255), 8)
                        cv2.line(frame, (l_px, l_py), (a_px, a_py), t_c, 6)

                x = int(self.tracks[j].trace[-1][0])
                y = int(self.tracks[j].trace[-1][1])
                if "varroa" in self.tracks[j].tags:
                    cv2.circle(frame, (x - 10, y - 50), 9, (0, 0, 255), -1)
                    cv2.circle(frame, (x - 10, y - 50), 10, (0, 0, 0), 2)
                if "pollen" in self.tracks[j].tags:
                    cv2.circle(frame, (x - 30, y - 50), 9, (255, 0, 0), -1)
                    cv2.circle(frame, (x - 30, y - 50), 10, (0, 0, 0), 2)
                if "cooling" in self.tracks[j].tags:
                    cv2.circle(frame, (x + 10, y - 50), 9, (0, 255, 0), -1)
                    cv2.circle(frame, (x + 10, y - 50), 10, (0, 0, 0), 2)
                if "wasps" in self.tracks[j].tags:
                    cv2.circle(frame, (x + 30, y - 50), 9, (0, 0, 0), -1)
                    cv2.circle(frame, (x + 30, y - 50), 10, (0, 0, 0), 2)

                # Add Track Id
                if get_config("DRAW_TRACK_ID"):
                    cv2.putText(frame, str(self.tracks[j].trackId) + " " + \
                            self.tracks[j]._name, (x,y-30),
                            cv2.FONT_HERSHEY_DUPLEX, 1, (255,255,255))
        # Draw count of bees
        if get_config("DRAW_IN_OUT_STATS"):
            _dh = getStatistics()
            bees_in, bees_out = _dh.getBeeCountOverall()
            cv2.putText(frame, "In: %i, Out: %i" % (bees_in, bees_out),
                        (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0), 5)

        return frame