Exemple #1
0
def encode_jpeg(arr, quality=85):
    if not np.issubdtype(arr.dtype, np.uint8):
        raise ValueError("Only accepts uint8 arrays. Got: " + str(arr.dtype))

    # simulate multi-channel array for single channel arrays
    while arr.ndim < 4:
        arr = arr[..., np.newaxis]  # add channels to end of x,y,z

    num_channel = arr.shape[3]
    reshaped = arr.T
    reshaped = np.moveaxis(reshaped, 0, -1)
    reshaped = reshaped.reshape(reshaped.shape[0] * reshaped.shape[1],
                                reshaped.shape[2], num_channel)
    if num_channel == 1:
        return simplejpeg.encode_jpeg(
            np.ascontiguousarray(reshaped),
            colorspace="GRAY",
            colorsubsampling="GRAY",
            quality=quality,
        )
    elif num_channel == 3:
        return simplejpeg.encode_jpeg(
            np.ascontiguousarray(reshaped),
            colorspace="RGB",
            quality=quality,
        )
    raise ValueError(
        "Number of image channels should be 1 or 3. Got: {}".format(
            arr.shape[3]))
 def get_fiducial_map(self, search_pos = np.array([[0],[200],[240]]), mask=None, raw=False):
     '''
     Move to the search location, take a picture, and get the locations of all fiducials seen in the image
     '''
     self.move(search_pos, wait=True)
     time.sleep(3) # stabilize before taking photo
     frame = self.capture(raw=True,imagepath="temp.jpg")
     if mask is not None:
         x,y,w,h = mask
         black_frame = np.zeros(frame.shape, dtype=np.uint8)
         black_frame[y:y+h,x:x+w,:] = frame[y:y+h,x:x+w,:]
         frame = black_frame
     # cv2.imwrite("temp2.jpg", frame)
     start = time.time()
     # _, img_encoded = cv2.imencode('.jpg', frame)
     img_encoded = encode_jpeg(frame)
     #base64 encode the img
     img_encoded = base64.b64encode(img_encoded).decode('utf-8')
     print(f"Encoding time: {time.time()-start}")
     pixel2mm = float(self.position[2,0]*self.pixel_scalar + self.pixel_intercept)
     print("getting fiducial map")
     start = time.time()
     r = requests.post(self.api_url+"/get_fiducials", json={"image":img_encoded,"pixel2mm":pixel2mm, "pose":self.position.tolist()})
     print(f"Request time: {time.time()-start}")
     print("request back")
     res = r.json()
     self.fiducial_map = res['location_dict']
     self.raw_fiducial_map = res['location_dict']
     if raw:
         # use raw for calibration
         return {int(fid):loc for fid, loc in res['location_dict'].items()}
     else:
         self.fiducial_map = {int(fid):self.offset_view_loc(loc) for fid, loc in res['location_dict'].items()}
         return self.fiducial_map
Exemple #3
0
    def gen_frames(self):  # generate frame by frame from camera
        try:
            while True:

                try:
                    image_list, err = self.myrobot.get_modality("vision", True)
                except:
                    continue

                if err.success:
                    # clear_img=self.fixImage(image_list[0].image)
                    # ret, buffer = cv2.imencode('.jpg', clear_img)
                    # # ret, buffer = cv2.imencode('.jpg', image_list[0].image)
                    # frame = buffer.tobytes()
                    frame = simplejpeg.encode_jpeg(image_list[0].image,
                                                   colorspace='BGR')
                    yield (b'--frame\r\n'
                           b'Content-Type: image/jpeg\r\n\r\n' + frame +
                           b'\r\n')  # concat frame one by one and show result
                    # print("frame")
                    time.sleep(0.001)

            # frame = buffer.tobytes()

            # self.videoImgSrc=b'--frame\r\n Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n'
            # yield(self.videoImgSrc)
        except KeyboardInterrupt:
            # cv2.destroyAllWindows()
            self.log.info("Closing down")
            # self.myrobot.disconnect()
            # animus.close_client_interface()
            # sys.exit(-1)
        except SystemExit:
            # cv2.destroyAllWindows()
            self.log.info("Closing down")
Exemple #4
0
def getRGB(data):
    global RGB
    #RGB = np.frombuffer(data.data, dtype=np.uint8).reshape(data.height, data.width, -1)
    img = np.ndarray(buffer=data.data,
                     dtype=np.uint8,
                     shape=(data.height, data.width, 3))
    RGB = simplejpeg.encode_jpeg(img)
    event.set()
Exemple #5
0
def img_to_jpeg_bytes(img: np.ndarray) -> bytes:
    if img.ndim == 2:
        colorspace = "Gray"
        img = img[..., None]
    elif img.ndim == 3:
        colorspace = "RGB"
    else:
        raise ValueError("Unsupported img shape: {}".format(img.shape))
    return simplejpeg.encode_jpeg(img,
                                  quality=_JPEG_WRITE_QUALITY,
                                  colorspace=colorspace)
Exemple #6
0
def _encodeImageBinary(image, encoding, jpegQuality, jpegSubsampling,
                       tiffCompression):
    """
    Encode a PIL Image to a binary representation of the image (a jpeg, png, or
    tif).

    :param image: a PIL image.
    :param encoding: a valid PIL encoding (typically 'PNG' or 'JPEG').  Must
        also be in the TileOutputMimeTypes map.
    :param jpegQuality: the quality to use when encoding a JPEG.
    :param jpegSubsampling: the subsampling level to use when encoding a JPEG.
    :param tiffCompression: the compression format to use when encoding a TIFF.
    :returns: a binary image or b'' if the image is of zero size.
    """
    encoding = TileOutputPILFormat.get(encoding, encoding)
    if image.width == 0 or image.height == 0:
        return b''
    params = {}
    if encoding == 'JPEG':
        if image.mode not in ({'L', 'RGB', 'RGBA'}
                              if simplejpeg else {'L', 'RGB'}):
            image = image.convert('RGB' if image.mode != 'LA' else 'L')
        if simplejpeg:
            return ImageBytes(simplejpeg.encode_jpeg(
                _imageToNumpy(image)[0],
                quality=jpegQuality,
                colorspace=image.mode
                if image.mode in {'RGB', 'RGBA'} else 'GRAY',
                colorsubsampling={
                    -1: '444',
                    0: '444',
                    1: '422',
                    2: '420'
                }.get(jpegSubsampling,
                      str(jpegSubsampling).strip(':')),
            ),
                              mimetype='image/jpeg')
        params['quality'] = jpegQuality
        params['subsampling'] = jpegSubsampling
    elif encoding in {'TIFF', 'TILED'}:
        params['compression'] = {
            'none': 'raw',
            'lzw': 'tiff_lzw',
            'deflate': 'tiff_adobe_deflate',
        }.get(tiffCompression, tiffCompression)
    elif encoding == 'PNG':
        params['compress_level'] = 2
    output = io.BytesIO()
    image.save(output, encoding, **params)
    return ImageBytes(
        output.getvalue(),
        mimetype=f'image/{encoding.lower().replace("tiled", "tiff")}')
Exemple #7
0
def encode_jpeg(arr, quality=85):
    if not np.issubdtype(arr.dtype, np.uint8):
        raise ValueError("Only accepts uint8 arrays. Got: " + str(arr.dtype))

    arr, num_channel = as2d(arr)
    arr = np.ascontiguousarray(arr)

    if num_channel == 1:
        return simplejpeg.encode_jpeg(
            arr,
            colorspace="GRAY",
            colorsubsampling="GRAY",
            quality=quality,
        )
    elif num_channel == 3:
        return simplejpeg.encode_jpeg(
            arr,
            colorspace="RGB",
            quality=quality,
        )
    raise ValueError(
        "Number of image channels should be 1 or 3. Got: {}".format(
            arr.shape[3]))
Exemple #8
0
def gen_slam():
    while True:
        if 'fram' in globals():
            event.wait()
            event.clear()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + fram + b'\r\n')
        else:
            img = np.zeros((500, 500, 3), dtype=np.uint8)
            cv2.putText(img, 'no slam img', (50, 300), cv2.FONT_HERSHEY_PLAIN,
                        4.5, (int(time.time() * 100) % 255, 109, 201), 5,
                        cv2.LINE_AA)
            framx = simplejpeg.encode_jpeg(img)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + framx + b'\r\n')
Exemple #9
0
def gen():
    global t
    while True:
        if 'fram' in globals():
            event.wait()
            event.clear()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + fram + b'\r\n')
        else:
            img = np.zeros((500, 500, 3), dtype=np.uint8)
            cv2.putText(img, 'no img QQ', (50, 300), cv2.FONT_HERSHEY_PLAIN, 5,
                        (0, 255, 255), 5, cv2.LINE_AA)
            framx = simplejpeg.encode_jpeg(img)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + framx + b'\r\n')
Exemple #10
0
def on_image(data):
    global fram
    if show_img == 'slam':
        img = np.ndarray(buffer=data.data,
                         dtype=np.uint8,
                         shape=(data.height, data.width, 3))
        fram = simplejpeg.encode_jpeg(img)
        #t1 = time.time()
        #array = list(data.data)
        #img = np.array(array,np.float32)
        #img = img.reshape(data.height,data.width,3)
        #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        #rospy.loginfo(np.shape(img))
        #fram = cv2.imencode(".jpg",img)[1].tobytes()
        #print(time.time() - t1)
        event.set()
Exemple #11
0
 def gen_frames(self):  # generate frame by frame from camera
     try:
         frame = simplejpeg.encode_jpeg(self.capFrame,
                                        colorspace='BGR',
                                        quality=90)
         yield (b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n'
                )  # concat frame one by one and show result
         # yield(self.videoImgSrc)
     except KeyboardInterrupt:
         cv2.destroyAllWindows()
         self.log.info("Closing down")
         self.vidThread.join()
         self.myrobot.disconnect()
         animus.close_client_interface()
         sys.exit(-1)
     except SystemExit:
         cv2.destroyAllWindows()
         self.log.info("Closing down")
         self.vidThread.join()
         self.myrobot.disconnect()
         animus.close_client_interface()
         sys.exit(-1)
def take_picture(queue_signal):
    while True:
        # if receive a signal then start the camera
        if (not queue_signal.empty()):
            print("[*] Ignore sensor")
            print("[+] start camera")
            camera = PiCamera()
            camera.resolution = (1280, 720)
            camera.start_preview()

            sleep(2)

            output = empty((720, 1280, 3), dtype=uint8)
            camera.capture(output, 'rgb')
            data = simplejpeg.encode_jpeg(output, 90, "RGB",
                                          "444")  # return bytes

            device_xbee.write(data)

            camera.stop_preview()
            camera.close()
            print("[+] AMBIL GAMBAR")

            queue_signal.get()  # empty the queue
import time
import traceback
import cv2
import imagezmq
import simplejpeg
from imutils.video import VideoStream

rpi_name = socket.gethostname()  # send RPi hostname with each image
picam = VideoStream(usePiCamera=True).start()
time.sleep(2.0)  # allow camera sensor to warm up
jpeg_quality = 95  # 0 to 100, higher is better quality

try:
    with imagezmq.ImageSender(connect_to='tcp://192.168.86.34:5555') as sender:
        while True:  # send images as a stream until Ctrl-C
            image = picam.read()
            jpg_buffer = simplejpeg.encode_jpeg(image,
                                                quality=jpeg_quality,
                                                colorspace='BGR')
            reply_from_mac = sender.send_jpg(rpi_name, jpg_buffer)

except (KeyboardInterrupt, SystemExit):
    pass  # Ctrl-C was pressed to end program
except Exception as ex:
    print('Python error with no Exception handler:')
    print('Traceback error:', ex)
    traceback.print_exc()
finally:
    picam.stop()  # stop the camera thread
    sys.exit()
Exemple #14
0
sender = imagezmq.ImageSender(connect_to="tcp://*:5555", REQ_REP=False)

context = zmq.Context()
rx = context.socket(zmq.SUB)
rx.connect(f"tcp://localhost:3000")
# subscribe to all events
rx.setsockopt_string(zmq.SUBSCRIBE, '')

print("Waiting for camera init")

vs = VideoStream(usePiCamera=True,
                 resolution=(640, 480),
                 exposure_mode="sports").start()
time.sleep(2.0)

print("Started")

while True:
    data = rx.recv_json()

    if data["type"] != "robotinfo":
        continue

    robot_info = data["data"]

    print(f"rx location {robot_info}")

    frame = vs.read()
    jpg_buffer = simplejpeg.encode_jpeg(frame, quality=100, colorspace='BGR')
    sender.send_jpg(json.dumps(robot_info), jpg_buffer)
    def social_move(self,move, start_pos = None, speed = None):
        print("social move")
        if start_pos is None:
            start_pos = np.array([[0],[200],[240]])
        if speed is None:
            speed = self.speed
        if move is None:
            print("no move")
            return  
        
        elif move == 'wake':
            print("waking up")
            locs = np.array(list(self.fiducial_map.values()))[:,:2]
            if len(locs) > 1:
                # quick and dirty cluster the list and get the centroids
                # hover over the centroids
                clustering = DBSCAN(eps=100, min_samples=2).fit(locs)
                n_clusters = max(clustering.labels_) + 1
                print("I am here")
                centroids = [np.mean(locs[clustering.labels_==i], axis=0) for i in range(n_clusters)]
                for c in centroids:
                    self.move(np.array([[c[0]],[c[1]],[150]]), wait=True)
                    time.sleep(0.5)

            else:
                self.move(np.array([[-30],[200],[150]]))
                self.move(np.array([[-250],[150],[150]]),wrist_mode=1, wrist_angle=45)
                self.move(np.array([[-250],[150],[150]]),wrist_mode=1, wrist_angle=0)
                self.move(np.array([[-250],[150],[150]]),wrist_mode=1, wrist_angle=45)
                self.move(np.array([[30],[200],[150]]),wrist_mode=1, wrist_angle=0)
                self.move(np.array([[0],[250],[250]]))
            self.move(np.array([[0],[200],[240]]))
        elif move == 'follow':
            print("following")
            self.move(start_pos,wait=True)
            frame = self.capture(raw=True, imagepath="tempfollow.jpg")
            # frame = cv2.imread("/home/pi/hiro/hand_detection/data/1.jpg")
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) #convert color format b/c backend uses PIL
            img_encoded = encode_jpeg(frame)
            img_encoded = base64.b64encode(img_encoded).decode('utf-8')
            resp = requests.post(self.api_url+"/gethand", json={"image":img_encoded}).json()
            side = resp["side"]
            output = resp["output"]
            print(f"{output},{side}")
            if side == 1: #right
                self.move(np.array([[-100],[200],[200]]),wait=True,speed=4)
                cv2.imwrite(f"temp_right_{time.time()}.jpg",frame)
                time.sleep(1)
            elif side == 2:
                self.move(np.array([[100],[200],[200]]),wait=True, speed=4)
                cv2.imwrite(f"temp_left_{time.time()}.jpg",frame)
                time.sleep(1)
            else:
                return False
            return True
        elif move == 'breathe':
            print("breathing")
            x1 = 0 #np.random.random()*30-15
            x2 = np.random.random()*30-15
            z1 = (np.random.random()*20-10)+215
            z2 = (np.random.random()*20-10)+190
            self.move(np.array([[x1],[170],[z1]]),speed=2)
            time.sleep(2)
            if np.random.random() < 0.3:
                if np.random.random() < 0.5:
                    self.move(np.array([[x2-20],[170],[z2]]),speed=2)
                else:
                    self.move(np.array([[x2+20],[170],[z2]]),speed=2)
                time.sleep(1)
            # self.move(np.array([[x2],[170],[z2]]),speed=2)
            self.move(np.array([[0],[200],[240]]),speed=2)
            
            
            followed = self.social_move('follow', start_pos = np.array([[0],[200],[240]]), speed=2)
            if followed:
                time.sleep(2)
            else:
                if np.random.random() < 0.3:
                    time.sleep(2)
                    if np.random.random() < 0.5:
                        self.move(np.array([[x2-20],[170],[z2]]),speed=2)
                    else:
                        self.move(np.array([[x2+20],[170],[z2]]),speed=2)
            self.move(start_pos, speed=2)
Exemple #16
0
    def object_tracker(self, camera, image, send_q):
        """ Called as an imagnode Detector for each image in the pipeline.
        Using this code will deploy the SpyGlass for view and event managment.
        
        The SpyGlass provides for motion detection, followed by object detection,
        along with object tracking, in a cascading technique. Parallelization is
        implemented through multiprocessing with a data exchange in shared memory. 

        Parameters:
            camera (Camera object): current camera
            image (OpenCV image): current image
            send_q (Deque): where (text, image) tuples can be passed
                            to the imagehub for Librarian processing

        """
        if self.publish_cam:
            ns = time.time_ns()
            if self.encoder[0] == 'c':
                # Try not to over-publish, estimate throttling based on expected frame rate
                if (ns - self._lastPublished) > (1000000000 //
                                                 camera.framerate):
                    buffer = simplejpeg.encode_jpeg(
                        image, quality=camera.jpeg_quality, colorspace='BGR')
                    # TODO: Question: does the threshold above need to allow for image
                    # compression overhead? Asked for 32 FPS, getting about 24-25 per second
                    # for a (640,480) image size, within a pipeline velocity of 40-50 ticks/second.
                    # Inexplicably, publishing rates seem to increase slightly as subscribers connect.
                else:
                    buffer = None
            elif self.encoder[0] == 'o':
                encFrameMsg = self.jpegQ.tryGet()
                if encFrameMsg is not None:
                    buffer = bytearray(encFrameMsg.getData())
                else:
                    buffer = None
            else:
                # TODO add support for uncompressed, and video formats
                buffer = None
                self.publish_cam = False
                logging.error(
                    f"JPEG only. Unsupported compression for image publishing '{self.encoder}', function disabled."
                )

            if buffer:
                Outpost.publisher.send_jpg(camera.text, buffer)
                self._lastPublished = ns
                # Heartbeat message with current pipeline frame rates over the logger.
                # TODO: Make this a configurable setting. Currently every 5 minutes.
                mm = self._rate.update().minute
                if mm % 5 == 0 and mm != self._heartbeat[1]:
                    tickrate = (self._tick - self._heartbeat[0]) / (5 * 60)
                    logging.info(
                        f"fps({self._tick}, {self._looks}, {self._evts}, {tickrate:.2f}, {self._rate.fps():.2f})"
                    )
                    self._heartbeat = (self._tick, mm)

        rects = []  # fresh start here, no determinations made
        targets = self.sg.get_count(
        )  # number of ojects tracked by the SpyGlass
        interestingTargetFound = False  # only begin event capture when interested

        # Always apply the motion detector. It's fast and the information
        # is generally useful. Apply background subtraction model within
        # region of interest only.
        x1, y1 = self.detector.top_left
        x2, y2 = self.detector.bottom_right
        ROI = image[y1:y2, x1:x2]
        # convert to grayscale and smoothen using gaussian kernel
        gray = cv2.cvtColor(ROI, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        # Motion detection. This returns an aggregate
        # rectangle of the estimated area of motion.
        lens = Outpost.Lens_MOTION
        motionRect = self.sg.detect_motion(gray)
        if motionRect:
            self._noMotion = 0
            # motion-only mode provides for the capture and logging
            # of only the aggregate area of motion, without applying
            # any specialized lenses to the SpyGlass
            if self.motion_only:
                self.nextLens = Outpost.Lens_MOTION
                rects = [motionRect]
                labels = ["motion: 0"]
            else:
                if self.status != Outpost.Status_ACTIVE:
                    self._looks += 1
                    if self.nextLens not in [
                            Outpost.Lens_REDETECT, Outpost.Lens_RESET
                    ]:
                        self.nextLens = Outpost.Lens_DETECT
        else:
            self._noMotion += 1

        if self.depthAI:
            # When running DepthAI on an OAK camera, pull down any neural net results now.
            # SpyGlass can provide for optional supplemental analysis, append any such results afterwards.
            cnt = 0
            imageSeqThreshold = camera.cam.getImgFrame().getSequenceNum()
            normVals = np.full(4, self.dimensions[1])
            normVals[::2] = self.dimensions[0]
            for net in self.nnQs:
                # multiple neural nets may be used in parallel
                for nnMsg in net.tryGetAll():
                    # Each message relates to a single image, and can contain multiple results. For this
                    # initial shakedown, no assumptions regarding synchronization to the current frame are
                    # implemented. This should be OK, as long as the content of each queue is closely aligned.
                    nnSeq = nnMsg.getSequenceNum()
                    if nnSeq > imageSeqThreshold:
                        logging.debug(
                            f"ImgDetection sequence {nnSeq}, ImgFrame sequence {imageSeqThreshold}"
                        )
                    rects, labels = [], []
                    for nnDet in nnMsg.detections:
                        text = Outpost.MobileNetSSD_labels[nnDet.label]
                        logging.debug(
                            f"nnDet[{cnt}] image {nnSeq}, {text}, look {self._looks}"
                        )
                        # normalize detection result bounding boxes to frame dimensioms
                        bbox = np.array(
                            [nnDet.xmin, nnDet.ymin, nnDet.xmax, nnDet.ymax])
                        rects.append(
                            (np.clip(bbox, 0, 1) * normVals).astype(int))
                        labels.append("{}: {:.4f}".format(
                            text, nnDet.confidence))
                        cnt += 1
                    if len(rects) > 0:
                        interested = self.sg.reviseTargetList(
                            Outpost.Lens_DETECT, rects, labels)
                        if interested:
                            interestingTargetFound = True
            if cnt:
                # TODO: might want to apply a special lens to the SpyGlass?
                self.nextLens = Outpost.Lens_MOTION

        if self.spyGlassOnly and (targets > 0 or motionRect):
            # ----------------------------------------------------------------------------------------
            #                     SpyGlass-only draft design pattern
            # ----------------------------------------------------------------------------------------
            # Motion was detected or already have targets in view
            # Alternating between detection and tracking
            # - object detection first, begin looking for characteristics on success
            # - initiate and run with tracking after objects detected
            # - re-deploy detection periodically, more frequently for missing expected attributes
            #   - such as persons without faces (after applying a specialized lens)
            #   - these supplementary results would not have individual trackers applied to them
            #   - otherwise re-detect as configured, building a new list of trackers as an outcome
            # ----------------------------------------------------------------------------------------
            #  This a lot to ask of the imagenode module on a Raspberry Pi 4B. Minus some tricked-out
            #  hardware provisioning, such as a USB-driven accelerator, most of this should be in
            #  batch jobs on the Sentinel instead of out here on the edge.
            # ----------------------------------------------------------------------------------------

            if self.sg.has_result():

                # SpyGlass has results available, retrieve them now
                (lens, rects, labels) = self.sg.get_data()
                logging.debug(
                    f"LensTasking lenstype {lens} result: {len(rects)} objects, motionRect {motionRect}, tick {self._tick}"
                )

                if self.nextLens == Outpost.Lens_RESET:
                    # This is effectively a NOOP for the SpyGlass, clearing current results.
                    # A lens command = 0 insures that the next result set will be empty.
                    self.nextLens = Outpost.Lens_MOTION
                    rects = []
                elif self.nextLens == Outpost.Lens_REDETECT:
                    # If requested, clear results and apply a new lens.
                    self.nextLens = Outpost.Lens_DETECT
                    rects = []

                # Based on the Outlook <-> SpyGlass protocol, any result set
                # could be old, now very stale, and just received. In which case
                # it's meaningless. So try to keep descision making in context.

                if len(rects) > 0:
                    # Have a non-empty result set back from the
                    # SpyGlass, note the time and default to tracking.
                    self.sg.lastUpdate = datetime.utcnow()
                    if self.object_tracking:
                        self.nextLens = Outpost.Lens_TRACK

                if self.nextLens != lens:
                    logging.debug(
                        f"Changing lens from {lens} to {self.nextLens}, tick {self._tick}"
                    )

                # Send current frame on out to the SpyGlass for processing.
                # This handshake is based on a REQ/REP socket pair over ZMQ.
                # More simply, for every send() there must always be a recv()
                #
                # Always. Every time. Why the emphasis?
                # -------------------------------------
                # The design of this protocol requires that whenever results are recieved,
                # a request must follow. This is usually what's desired. Since it always takes
                # a while to get an answer back, just send the SpyGlass request now without delay.
                #
                # What does this mean for the Outpost?  Results are always evaluated only after
                # another request has already gone out. Thus that pending result set may linger
                # quite a while before the next recv() if the Outpost determines there is nothing
                # left to look at and loses interest.

                logging.debug(
                    f"Sending '{self.nextLens}' to LensTasking, tick {self._tick}, look {self._looks}"
                )
                self.sg.apply_lens(self.nextLens, image)

                # With current frame sent to the SpyGlass for analysis, there is now
                # time to work through the result set from the prior request, if any.
                if self.spyGlassOnly and len(rects) > 0:
                    interestingTargetFound = self.sg.reviseTargetList(
                        lens, rects, labels)

                # To do this correctly, we need more CPU power. Ideally, detection and tracking
                # should run in parallel to provide for a more responsive feedback loop to
                # tune the tracker. This technique would run the detector more often, but only on
                # selected regions of interest within the image, where we already think the object
                # should be. This is how a country boy might try build something that attempts to
                # masquerade as a HyrdaNet.

            else:
                # SpyGlass is busy. Skip this cycle and keep going.
                pass

        if len(rects) > 0:
            # If tracking and any Target of interest has not moved, re-detect immediately
            # and take note of this. Maybe the tracker got lost, or perhaps the Target
            # is just standing still. (TODO)

            targets = self.sg.get_count()
            logging.debug(f"Now tracking {targets} objects, tick {self._tick}")
            if targets == 0:
                # Finished processing results, and came up empty. Detection should run
                # again by default. Note the forced change in state for the next pass.
                self.nextLens = Outpost.Lens_REDETECT
            else:
                if self.status == Outpost.Status_INACTIVE:
                    if motionRect and (interestingTargetFound
                                       or self.motion_only):
                        # This is a new event, begin logging the tracking data
                        self.status = Outpost.Status_ACTIVE
                        ote = self.sg.new_event()
                        ote['fps'] = self._rate.fps()
                        logging.info(f"ote{json.dumps(ote)}")
                        self.event_start = self.sg.event_start
                        self._evts += 1

                if self.status == Outpost.Status_ACTIVE:
                    # event in progress
                    ote = self.sg.trackingLog('trk')
                    ote['lens'] = lens  # just curious, can see when camwatcher logging=DEBUG
                    ote['looks'] = self._looks  # just curious, can see when camwatcher logging=DEBUG
                    for target in self.sg.get_targets():
                        if target.upd == self.sg.lastUpdate:
                            ote.update(target.toTrk())
                            logging.info(f"ote{json.dumps(ote)}")

        # outpost tick count
        self._tick += 1
        if self._tick % self.skip_factor == 0:
            # Tracking threshold encountered? Run detection again. Should perhaps measure this
            # based on both the number of successful tracking calls, as well as an elapsed time
            # threshold. It might make sense to formulate based on the tick count if there is
            # an efficient way to gather metrics in-flight (something for the TODO list).
            # As implemented, this is often out of phase from the surrounding logic, i.e. detection
            # may have just completed. Formulating a one-size-fits-all solution is not simple. Much
            # depends on the field and depth of view. Cameras in close proximity to the subject ROI
            # need to be able to respond quickly to large changes in the images, such as when the
            # subject is moving towards the camera. Correlation tracking may not even be effective
            # in these scenarios.
            if lens == Outpost.Lens_TRACK:
                logging.debug(
                    f"tracking threshold reached, tick {self._tick}, look {self._looks}"
                )
                self.nextLens = Outpost.Lens_REDETECT

        if self.status == Outpost.Status_ACTIVE:
            # If an event is in progress, is it time to end it?
            stayalive = True  # Assume there is still something going on
            if targets == 0:
                stayalive = False
                logging.debug(
                    f"Event {self.sg.eventID} is tracking no targets")
            elif self._noMotion > 5:
                # This is more bandaid than correct. Mostly because the
                # CentroidTracker is currently hanging on to objects longer
                # than necessary
                self.status = Outpost.Status_QUIET
            else:
                # Fail safe kill switch, forced shutdown after 15 seconds.
                # TODO: Design flexibility for this via ruleset in configuration?
                #  ----------------------------------------------------------
                event_elapsed = datetime.utcnow() - self.event_start
                if event_elapsed.seconds > 15:
                    self.status = Outpost.Status_QUIET

            if self.status == Outpost.Status_QUIET:
                # TODO: Placeholder for something more clever.
                # For now, just call it quits and go back to motion detection
                logging.debug(
                    f"Status is quiet, ending event {self.sg.eventID}, targets {targets} noMotion {self._noMotion} tick {self._tick}"
                )
                stayalive = False

            if not stayalive:
                logging.info(f"ote{json.dumps(self.sg.trackingLog('end'))}")
                # Ultimately, need more smarts around this. For now though,
                # this is quick, easy, and painless. Just erase the SpyGlass
                # memory and reset for a fresh start.
                self.sg.resetTargetList()
                self.nextLens = Outpost.Lens_RESET
                self.status = Outpost.Status_INACTIVE