Esempio n. 1
0
run.

2.Run the image sending program in a different terminal window:
python test_1_pub.py

A cv2.imshow() window will appear showing the tramsmitted image. The sending
program sends images with an incrementing counter so you can see what is sent
and what is received.

If you terminate receiving script pay attention to the fact that sending script
will continue to increment and send images.

If you start receiving script again it will start picking images from the
current position.

To end the programs, press Ctrl-C in the terminal window of the sending program
first. Then press Ctrl-C in the terminal window of the receiving proram. You
may have to press Ctrl-C in the display window as well.
"""

import sys
import cv2
sys.path.insert(0, '../imagezmq')  # imagezmq.py is in ../imagezmq
import imagezmq

image_hub = imagezmq.ImageHub(open_port='tcp://127.0.0.1:5555', REQ_REP=False)
while True:  # press Ctrl-C to stop image display program
    image_name, image = image_hub.recv_image()
    cv2.imshow(image_name, image)
    cv2.waitKey(1)  # wait until a key is pressed
Esempio n. 2
0
def initReciver(in_port=5050):
    # initialize the ImageHub object
    imageHub = imagezmq.ImageHub(open_port='tcp://127.0.0.1:%d' % in_port)

    return imageHub
Esempio n. 3
0
1. Run this program in its own terminal window on the mac:
python test_3_mac_receive_jpg.py

This "receive and display images" program must be running before starting the
RPi sending program.

2. Run the jpg sending program on the RPi:
python test_3_rpi_send_jpg.py

A cv2.imshow() window will appear on the Mac showing the tramsmitted images as
a video stream. You can repeat Step 2 and start the test_3_rpi_send_jpg.py on
multiple RPis and each one will cause a new cv2.imshow() window to open.

To end the programs, press Ctrl-C in the terminal window of the RPi  first.
Then press Ctrl-C in the terminal window of the receiving proram. You may
have to press Ctrl-C in the display window as well.
"""
# import imagezmq from parent directory
import numpy as np
import cv2
import imagezmq

image_hub = imagezmq.ImageHub()
while True:  # show streamed images until Ctrl-C
    rpi_name, jpg_buffer = image_hub.recv_jpg()
    image = cv2.imdecode(np.fromstring(jpg_buffer, dtype='uint8'), -1)
    # see opencv docs for info on -1 parameter
    cv2.imshow(rpi_name, image)  # 1 window for each RPi
    cv2.waitKey(1)
    image_hub.send_reply(b'OK')
additional image sending programs using step 2, then a new cv2.imshow() window
will open for each sending computer; this happens because each sending computer
is using its hostname as a unique label for its images.

If you terminate receiving script, the image sending program will continue to
send images (but they will not be displayed).

If you start receiving script again it will start displaying images from the
current image being sent by the image sending program.

To end the programs, press Ctrl-C in the terminal window of each program. It is
normal to get error messages when pressing Ctrl-C. There is no error trapping in
this simple example program.
"""

import cv2
import imagezmq

# Instantiate and provide the first publisher address
image_hub = imagezmq.ImageHub(open_port='tcp://192.168.86.39:5555',
                              REQ_REP=False)
image_hub.connect('tcp://192.168.86.38:5555')  # second publisher address
# image_hub.connect('tcp://192.168.0.102:5555')  # third publisher address
# image_hub.connect('tcp://192.168.0.103:5555')  # must specify address for every sender
# repeat hub.connect() statements as needed for all senders.

while True:  # show received images
    rpi_name, image = image_hub.recv_image()
    cv2.imshow(rpi_name, image)  # 1 window for each unique RPi name
    cv2.waitKey(1)
Esempio n. 5
0
# python zmq_consumer.py --port 5555

import os
import cv2
import redis
import argparse
import numpy as np
import imagezmq

try:
    server_port = os.environ['SERVER_PORT']
except Exception as ex:
    server_port = '5555'

open_port = 'tcp://*:{}'.format(server_port)
image_hub = imagezmq.ImageHub(open_port=open_port)

print('Open Port is {}'.format(open_port))

r = redis.Redis(host='localhost', port=6379, db=0)

print('Receiving frames...')

hosts = {}
# show streamed images
while True:
    try:
        # tpye(jpg_buffer) is <class 'zmq.sugar.frame.Frame'>
        host_name, jpg_buffer = image_hub.recv_jpg()
        
        # image is 1-d numpy.ndarray and decode to 3-d array
 def _run(self):
     receiver = imagezmq.ImageHub("tcp://{}:{}".format(self.hostname, self.port), REQ_REP=False)
     while not self._stop:
         self._data = receiver.recv_jpg()
         self._data_ready.set()
     receiver.close()
Esempio n. 7
0
logging.basicConfig(format="[%(levelname)s][%(asctime)s][hub]: %(message)s", level=logging.INFO)
if not args["dynamic"]:
    logging.info("Resulting stream will use {} row(s) and {} column(s)".format(args["rows"], args["cols"]))
else:
    logging.info("Resulting stream will be resized dynamically")

# frames from all clients
frames = {}

# time when clients were last time active
last_active_time = {}
last_active_check = datetime.now()

# hub for connection with client
client = imagezmq.ImageHub()
# non-blocking server connection
server = imagezmq.ImageSender(connect_to = 'tcp://*:5566', REQ_REP = False)

logging.info("Hub started")
while True:
    # receive image from client
    client_id, frame = client.recv_image()
    client.send_reply(b'OK')

    if client_id not in last_active_time.keys():
        logging.info("New client with id {} connected".format(client_id))
    
    last_active_time[client_id] = datetime.now()

    clients_count = len(last_active_time.keys())
# run this program to receive and display frames
# stream frames received from the camera to the browser
import cv2
import imagezmq

# When there is a request from the web browser, create a subscriber
image_hub = imagezmq.ImageHub(open_port='tcp://127.0.0.1:5555')
while True:  # show streamed images
    rpi_name, image = image_hub.recv_image()
    cv2.imshow(rpi_name, image)  # 1 window for each RPi
    cv2.waitKey(1)
    image_hub.send_reply(b'OK')
Esempio n. 9
0
@app.route('/')
def index():
    """Home page."""
    return render_template('index.html')


def gen_video_feed():
    while True:
        img_hub = app.config['IMAGE_HUB']
        text, image = img_hub.recv_image()

        # Encode image as jpeg
        image = cv2.imencode('.jpg', image,
                             [int(cv2.IMWRITE_JPEG_QUALITY), 75])[1].tobytes()
        img_hub.send_reply(b'ok')
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n')


@app.route('/video')
def video():
    return Response(gen_video_feed(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == "__main__":
    print('[INFO] Starting server at http://localhost:5002')
    app.config['IMAGE_HUB'] = imagezmq.ImageHub()
    app.run(host='0.0.0.0', port=5002)
Esempio n. 10
0
import pyrealsense2 as rs
import numpy as np
import cv2
import zmq, sys
import imagezmq

image_hub = imagezmq.ImageHub(open_port='tcp://119.192.209.246:5555',
                              REQ_REP=False)

while True:
    name, image = image_hub.recv_image()
    cv2.imshow("image", image)
    cv2.waitKey(1)
Esempio n. 11
0
# https://github.com/jeffbass/imagezmq
import cv2
import imagezmq
image_hub = imagezmq.ImageHub(open_port='tcp://*:8500')
print('ready')

while True:  # show streamed images until Ctrl-C
    rpi_name, image = image_hub.recv_image()
    print(f'recv ok ${rpi_name}')
    cv2.imshow(rpi_name, image)  # 1 window for each RPi
    cv2.waitKey(1)
    image_hub.send_reply(b'OK')
Esempio n. 12
0
def initiateZMQ():
    imageHub = imagezmq.ImageHub()
    return imageHub
Esempio n. 13
0
def sendImagesToWeb():
    receiver = imagezmq.ImageHub(open_port='tcp://127.0.0.1:5566', REQ_REP = False)
    while True:
        camName, frame = receiver.recv_image()
        jpg = cv2.imencode('.jpg', frame)[1]
        yield b'--frame\r\nContent-Type:image/jpeg\r\n\r\n'+jpg.tostring()+b'\r\n'
Esempio n. 14
0
# https://pypi.org/project/imagezmq/

import cv2
import imagezmq

from config.config import SOCKET_ROBOT_ADDRESS, STREAM_PI_FEED_PORT

image_hub = imagezmq.ImageHub(open_port=SOCKET_ROBOT_ADDRESS +
                              STREAM_PI_FEED_PORT)

if __name__ == "__main__":
    while True:
        rpi_name, image = image_hub.recv_image()
        cv2.imshow(rpi_name, image)
        cv2.waitKey(1)
        image_hub.send_reply(b"OK")
Esempio n. 15
0
def detect_camera():
    # initialize the ImageHub object
    global outputFrame, lock
    imageHub = imagezmq.ImageHub()

    # initialize the list of class labels MobileNet SSD was trained to
    # detect, then generate a set of bounding box colors for each class
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # initialize the consider set (class labels we care about and want
    # to count), the object count dictionary, and the frame  dictionary
    CONSIDER = set(["motorbike", "bus", "car", "train", "person"])
    objCount = {obj: 0 for obj in CONSIDER}
    frameDict = {}

    # initialize the dictionary which will contain  information regarding
    # when a device was last active, then store the last time the check
    # was made was now
    lastActive = {}
    lastActiveCheck = datetime.now()

    # stores the estimated number of Pis, active checking period, and
    # calculates the duration seconds to wait before making a check to
    # see if a device was active
    ESTIMATED_NUM_PIS = 4
    ACTIVE_CHECK_PERIOD = 10
    ACTIVE_CHECK_SECONDS = ESTIMATED_NUM_PIS * ACTIVE_CHECK_PERIOD

    # assign montage width and height so we can view all incoming frames
    # in a single "dashboard"
    mW = args["montageW"]
    mH = args["montageH"]
    print("[INFO] detecting: {}...".format(", ".join(obj for obj in CONSIDER)))

    # start looping over all the frames
    while True:
        # receive RPi name and frame from the RPi and acknowledge
        # the receipt
        (rpiName, frame) = imageHub.recv_image()
        imageHub.send_reply(b'OK')

        # if a device is not in the last active dictionary then it means
        # that its a newly connected device
        if rpiName not in lastActive.keys():
            print("[INFO] receiving data from {}...".format(rpiName))

        # record the last active time for the device from which we just
        # received a frame
        lastActive[rpiName] = datetime.now()

        # resize the frame to have a maximum width of 400 pixels, then
        # grab the frame dimensions and construct a blob
        frame = imutils.resize(frame, width=400)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and
        # predictions
        net.setInput(blob)
        detections = net.forward()

        # reset the object count for each object in the CONSIDER set
        objCount = {obj: 0 for obj in CONSIDER}

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the confidence is
            # greater than the minimum confidence
            if confidence > args["confidence"]:
                # extract the index of the class label from the
                # detections
                idx = int(detections[0, 0, i, 1])

                # check to see if the predicted class is in the set of
                # classes that need to be considered
                if CLASSES[idx] in CONSIDER:
                    # increment the count of the particular object
                    # detected in the frame
                    objCount[CLASSES[idx]] += 1

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")

                    # draw the bounding box around the detected object on
                    # the frame
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (255, 0, 0), 2)

        # draw the sending device name on the frame
        cv2.putText(frame, rpiName, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 0, 255), 2)

        # draw the object count on the frame
        label = ", ".join("{}: {}".format(obj, count)
                          for (obj, count) in objCount.items())
        cv2.putText(frame, label, (10, h - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 255, 0), 2)

        # update the new frame in the frame dictionary
        frameDict[rpiName] = frame

        # build a montage using images in the frame dictionary
        montages = build_montages(frameDict.values(), (w, h), (mW, mH))

        # display the montage(s) on the screen
        for (i, montage) in enumerate(montages):
            with lock:
                outputFrame = montage.copy()
            # cv2.imshow("Home pet location monitor ({})".format(i),
            # 	montage)

        # detect any kepresses
        key = cv2.waitKey(1) & 0xFF

        # if current time *minus* last time when the active device check
        # was made is greater than the threshold set then do a check
        if (datetime.now() - lastActiveCheck).seconds > ACTIVE_CHECK_SECONDS:
            # loop over all previously active devices
            for (rpiName, ts) in list(lastActive.items()):
                # remove the RPi from the last active and frame
                # dictionaries if the device hasn't been active recently
                if (datetime.now() - ts).seconds > ACTIVE_CHECK_SECONDS:
                    print("[INFO] lost connection to {}".format(rpiName))
                    lastActive.pop(rpiName)
                    frameDict.pop(rpiName)

            # set the last active check time as current time
            lastActiveCheck = datetime.now()

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
Esempio n. 16
0
    type=int,
    help=
    "type of messaging (default is 0: REQ/REP; 1 is PUB/SUB, 2 is REQ/REP + PUB/SUB)"
)
ap.add_argument("-bl",
                "--bl_li",
                default=1,
                type=int,
                help="run blur and lighting checks on server")
ap.add_argument("-ex", "--exp_num", required=True, help="for naming files")

args = vars(ap.parse_args())

# initialize the ImageHub object
if args["messaging"] == 0:
    imageHub = imagezmq.ImageHub()
elif args["messaging"] == 1:
    # imageHub = imagezmq.ImageHub(open_port='tcp://localhost:5556', REQ_REP = False)
    imageHub = imagezmq.ImageHub(open_port='tcp://192.168.0.145:5588',
                                 REQ_REP=False)  #Ryan's laptop
elif args["messaging"] == 2:
    imageHub_rr = imagezmq.ImageHub()
else:
    raise ValueError("messaging input value must be 0, 1, or 2")

count = 0
time_data = [''] * 1000

# start looping over all the frames
while True:
    # receive client name and frame from the client and acknowledge
Esempio n. 17
0
@app.route('/')
def index():
    """Home page."""
    return render_template('index.html')


def gen_video_feed():
    while True:
        img_hub = app.config['IMAGE_HUB']
        text, image = img_hub.recv_image()

        # Encode image as jpeg
        image = cv2.imencode('.jpg', image,
                             [int(cv2.IMWRITE_JPEG_QUALITY), 75])[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n')


@app.route('/video')
def video():
    return Response(gen_video_feed(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == "__main__":
    print('[INFO] Starting server at http://localhost:5000')
    app.config['IMAGE_HUB'] = imagezmq.ImageHub(
        open_port='tcp://127.0.0.1:5001', REQ_REP=False)
    app.run(host='0.0.0.0', port=5000)
def main():
    """
    main function interface
    :return: nothing
    """
    # statistics info
    moving_average_points = 50

    # initialize model
    model = Model()
    model.load_model('models_edgetpu/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')
    model.load_labels('labels_edgetpu/coco_labels.txt')
    model.set_confidence_level(0.3)

    # initialize receiver
    image_hub = imagezmq.ImageHub()
    print('RPi Stream -> Receiver Initialized')
    time.sleep(1.0)

    # initialize render
    render = Render()
    print('RPi Stream -> Render Ready')

    # statistics
    moving_average_fps = MovingAverage(moving_average_points)
    moving_average_receive_time = MovingAverage(moving_average_points)
    moving_average_decompress_time = MovingAverage(moving_average_points)
    moving_average_model_load_image_time = MovingAverage(moving_average_points)
    moving_average_model_inference_time = MovingAverage(moving_average_points)
    moving_average_reply_time = MovingAverage(moving_average_points)
    moving_average_image_show_time = MovingAverage(moving_average_points)
    image_count = 0

    # streaming
    print('RPi Stream -> Receiver Streaming')
    while True:
        start_time = time.monotonic()

        # receive image
        name, compressed = image_hub.recv_jpg()
        received_time = time.monotonic()

        # decompress image
        image = cv2.imdecode(np.frombuffer(compressed, dtype='uint8'), -1)
        decompressed_time = time.monotonic()

        # load image into model (cv2 or pil backend)
        model.load_image_cv2_backend(image)
        model_loaded_image_time = time.monotonic()

        # do model inference
        class_ids, scores, boxes = model.inference()
        model_inferenced_time = time.monotonic()

        # send reply
        image_hub.send_reply(b'OK')
        replied_time = time.monotonic()

        # render image
        render.set_image(image)
        render.render_detection(model.labels, class_ids, boxes, image.shape[1], image.shape[0], (45, 227, 227), 3)
        render.render_fps(moving_average_fps.get_moving_average())

        # show image
        cv2.imshow(name, image)
        image_showed_time = time.monotonic()
        if cv2.waitKey(1) == ord('q'):
            break

        # statistics
        instant_fps = 1 / (image_showed_time - start_time)
        moving_average_fps.add(instant_fps)
        receive_time = received_time - start_time
        moving_average_receive_time.add(receive_time)
        decompress_time = decompressed_time - received_time
        moving_average_decompress_time.add(decompress_time)
        model_load_image_time = model_loaded_image_time - decompressed_time
        moving_average_model_load_image_time.add(model_load_image_time)
        model_inference_time = model_inferenced_time - model_loaded_image_time
        moving_average_model_inference_time.add(model_inference_time)
        reply_time = replied_time - model_inferenced_time
        moving_average_reply_time.add(reply_time)
        image_show_time = image_showed_time - replied_time
        moving_average_image_show_time.add(image_show_time)
        total_time = moving_average_receive_time.get_moving_average() \
                     + moving_average_decompress_time.get_moving_average() \
                     + moving_average_model_load_image_time.get_moving_average() \
                     + moving_average_model_inference_time.get_moving_average() \
                     + moving_average_reply_time.get_moving_average() \
                     + moving_average_image_show_time.get_moving_average()

        # terminal prints
        if image_count % 10 == 0:
            print(" receiver's fps: %4.1f"
                  " receiver's time components: "
                  "receiving %4.1f%% "
                  "decompressing %4.1f%% "
                  "model load image %4.1f%% "
                  "model inference %4.1f%% "
                  "replying %4.1f%% "
                  "image show %4.1f%%"
                  % (moving_average_fps.get_moving_average(),
                     moving_average_receive_time.get_moving_average() / total_time * 100,
                     moving_average_decompress_time.get_moving_average() / total_time * 100,
                     moving_average_model_load_image_time.get_moving_average() / total_time * 100,
                     moving_average_model_inference_time.get_moving_average() / total_time * 100,
                     moving_average_reply_time.get_moving_average() / total_time * 100,
                     moving_average_image_show_time.get_moving_average() / total_time * 100), end='\r')

        # counter
        image_count += 1
        if image_count == 10000000:
            image_count = 0
Esempio n. 19
0
 def __init__(self):
     self.image_hub = imagezmq.ImageHub()
     time.sleep(5)
     self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     self.socket.connect(("127.0.0.1", 1234))
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
from mpl_toolkits.mplot3d import Axes3D
import argparse
import cv2
import operator
import pickle
#import imageio
from PIL import Image

#Camera Stream Libraries
# initialize the ImageHub object
import imagezmq
from datetime import datetime
imageHub = imagezmq.ImageHub()

#MQTT for control of blinds
import paho.mqtt.publish as publish
Broker = "192.168.0.185" #Ip address for blind control
pub_topic = "blinds/position"

from nets.ColorHandPose3DNetwork import ColorHandPose3DNetwork
from utils.general import detect_keypoints, trafo_coords, plot_hand, plot_hand_2d, plot_hand_3d
from pose.DeterminePositions import create_known_finger_poses, determine_position, get_position_name_with_pose_id
from pose.utils.FingerPoseEstimate import FingerPoseEstimate

#Define video source
cap = cv2.VideoCapture(0)
#RPI Source
imageHub = imagezmq.ImageHub()
Esempio n. 21
0
as a video stream. You can repeat Step 2 and start the with_ImageSender.py
on multiple RPis and each one will cause a new cv2.imshow() window to open.

To end the programs, press Ctrl-C in the terminal window of each program.
"""

import sys
import time
import traceback
import numpy as np
import cv2
from imutils.video import FPS
import imagezmq

try:
    with imagezmq.ImageHub() as image_hub:
        while True:  # receive images until Ctrl-C is pressed
            sent_from, jpg_buffer = image_hub.recv_jpg()
            image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
            # see opencv docs for info on -1 parameter
            cv2.imshow(sent_from,
                       image)  # display images 1 window per sent_from
            cv2.waitKey(1)
            image_hub.send_reply(b'OK')  # REP reply
except (KeyboardInterrupt, SystemExit):
    pass  # Ctrl-C was pressed to end program; FPS stats computed below
except Exception as ex:
    print('Python error with no Exception handler:')
    print('Traceback error:', ex)
    traceback.print_exc()
finally:
Esempio n. 22
0
def c_to_f(celc):
    return ((celc * 1.8) + 32)


# Load our serialized face detector model from disk
prototxtPath = r"face_detector\deploy.prototxt"
weightsPath = r"face_detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)

# Load the face mask detector model from disk
print("[INFO] loading model...")
maskNet = load_model("mask_detector.model")

print("[INFO] creating connection...")
imageHub = imagezmq.ImageHub(open_port="tcp://*:50007")
print("[INFO] connection established...")


def detect_and_predict_mask(frame, faceNet, maskNet):
    # Get the dimensions of the frame and create a blob object from it
    (height, width) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224), (104.0, 177.0, 123.0))

    # Pass the blob object through the network and obtain the face detections
    faceNet.setInput(blob)
    detections = faceNet.forward()
    print(detections.shape)

    # Initialize the list of faces and their locations in the frame
    # Initialize the list of predictions for each of the faces from the mask network
Esempio n. 23
0
 def run_hub(self):
     if self.debug: logging.debug('setting imagehub...')
     self.image_hub = imagezmq.ImageHub()
     if self.debug: logging.debug('Started imagehub server.')
Esempio n. 24
0
def main(_argv):
    vk = b'Y\xf8D\xe6o\xf9MZZh\x9e\xcb\xe0b\xb7h\xdb\\\xd7\x80\xd2S\xf5\x81\x92\xe8\x109r*U\xebT\x95\x0c\xf2\xf4(\x13%\x83\xb8\xfa;\xf04\xd3\xfb'
    vk = VerifyingKey.from_string(vk)


    config = ConfigProto()
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
    #STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    input_size = 416
    iou = 0.45
    score = 0.5

    model = 'yolov4'
    framework = ''
    tiny = True
    weights = './checkpoints/yolov4-tiny-416'

    count = False
    dont_show = False
    info = True
    crop = False


    #images = FLAGS.images
    #images = []
    #images.append("C:/Users/Kitzbi/Documents/tensorflow yolo/yolov4-custom-functions/data/images/dog.jpg")

    # load model
    if framework == 'tflite':
            interpreter = tf.lite.Interpreter(model_path=weights)
    else:
            saved_model_loaded = tf.saved_model.load(weights, tags=[tag_constants.SERVING])

    
    
    # statistics info
    moving_average_points = 50
    # initialize receiver
    image_hub = imagezmq.ImageHub()
    print('RPi Stream -> Receiver Initialized')
    time.sleep(1.0)

    # initialize render
    render = Render()
    print('RPi Stream -> Render Ready') 
    
    
    # statistics
    moving_average_fps = MovingAverage(moving_average_points)
    moving_average_receive_time = MovingAverage(moving_average_points)
    moving_average_decompress_time = MovingAverage(moving_average_points)
    moving_average_model_load_image_time = MovingAverage(moving_average_points)
    moving_average_model_inference_time = MovingAverage(moving_average_points)
    moving_average_reply_time = MovingAverage(moving_average_points)
    moving_average_image_show_time = MovingAverage(moving_average_points)
    image_count = 0
    
    

    # read in all class names from config
    class_names = utils.read_class_names(cfg.YOLO.CLASSES)

     # streaming
    print('RPi Stream -> Receiver Streaming')

    while True:
        start_time = time.monotonic()

        # receive image
        name, compressed = image_hub.recv_jpg()
        received_time = time.monotonic()

        # decompress image
        decompressedImage = cv2.imdecode(np.frombuffer(compressed, dtype='uint8'), -1)
        decompressed_time = time.monotonic()


        
        
        #frame = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB)
        #image = Image.fromarray(frame)



   
        original_image = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB)

        image_data = cv2.resize(original_image, (input_size, input_size))
        image_data = image_data / 255.
        
        # get image name by using split method
        #image_name = image_path.split('/')[-1]
        #image_name = image_name.split('.')[0]

        images_data = []
        for i in range(1):
            images_data.append(image_data)
        images_data = np.asarray(images_data).astype(np.float32)

        with concurrent.futures.ThreadPoolExecutor() as executor:
            f1 = executor.submit(verify, vk, name, compressed)
            f2 = executor.submit(inference, framework, images_data, model, tiny, saved_model_loaded, iou, score)
            success = f1.result()
            boxes, scores, classes, valid_detections = f2.result()

        

     

        # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax
        original_h, original_w, _ = original_image.shape
        bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w)
        
        # hold all detection data in one variable
        pred_bbox = [bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0]]

        

        # by default allow all classes in .names file
        allowed_classes = list(class_names.values())
        
        # custom allowed classes (uncomment line below to allow detections for only people)
        #allowed_classes = ['person']

        

        # if crop flag is enabled, crop each detection and save it as new image
        if crop:
            crop_path = os.path.join(os.getcwd(), 'detections', 'crop', image_name)
            try:
                os.mkdir(crop_path)
            except FileExistsError:
                pass
            crop_objects(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB), pred_bbox, crop_path, allowed_classes)

        if count:
            # count objects found
            counted_classes = count_objects(pred_bbox, by_class = False, allowed_classes=allowed_classes)
            # loop through dict and print
            for key, value in counted_classes.items():
                print("Number of {}s: {}".format(key, value))
            boxtext, image = utils.draw_bbox(original_image, pred_bbox, image_count, info, counted_classes, allowed_classes=allowed_classes)
        else:
            boxtext, image = utils.draw_bbox(original_image, pred_bbox, image_count, info, allowed_classes=allowed_classes)
        
        image = Image.fromarray(image.astype(np.uint8))


        

        #print(boxtext)
        # send reply
        if(info):
            image_hub.send_reply(boxtext)
        else:
            image_hub.send_reply('Ok')
        #stra = str(pred_bbox).encode()
        #image_hub.send_reply(stra)
        #print(stra)
        #image_hub.send_reply(str(pred_bbox).encode())
        #image_hub.send_reply(bytearray(pred_bbox))


        if not dont_show:
            #image.show()
            
            image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
            cv2.imshow('raspberrypi', image)
        image_showed_time = time.monotonic()
        if cv2.waitKey(1) == ord('q'):
            break

        # statistics
        instant_fps = 1 / (image_showed_time - start_time)
        moving_average_fps.add(instant_fps)
        receive_time = received_time - start_time
        moving_average_receive_time.add(receive_time)
        decompress_time = decompressed_time - received_time
        moving_average_decompress_time.add(decompress_time)
        #model_load_image_time = model_loaded_image_time - decompressed_time
        #moving_average_model_load_image_time.add(model_load_image_time)
        #model_inference_time = model_inferenced_time - model_loaded_image_time
        #moving_average_model_inference_time.add(model_inference_time)
        #reply_time = replied_time - model_inferenced_time
        #moving_average_reply_time.add(reply_time)
        #image_show_time = image_showed_time - replied_time
        #moving_average_image_show_time.add(image_show_time)
        #total_time = moving_average_receive_time.get_moving_average() \
        #             + moving_average_decompress_time.get_moving_average() \
        #             + moving_average_model_load_image_time.get_moving_average() \
        #             + moving_average_model_inference_time.get_moving_average() \
        #             + moving_average_reply_time.get_moving_average() \
        #             + moving_average_image_show_time.get_moving_average()

         #terminal prints
        #if image_count % 10 == 0:
            #print(moving_average_fps)
            #print(decompress_time)
            #print(" receiver's fps: %4.1f"
                  #" receiver's time components: "
                  #"receiving %4.1f%% "
                  #"decompressing %4.1f%% "
                  #"model load image %4.1f%% "
                  #"model inference %4.1f%% "
                  #"replying %4.1f%% "
                  #"image show %4.1f%%"
                  #% (moving_average_fps.get_moving_average()), end='\r')
                      #moving_average_fps.get_moving_average(),
                     #moving_average_receive_time.get_moving_average() / total_time * 100,
                     #moving_average_decompress_time.get_moving_average() / total_time * 100,
                     #moving_average_model_load_image_time.get_moving_average() / total_time * 100,
                     #moving_average_model_inference_time.get_moving_average() / total_time * 100,
                     #moving_average_reply_time.get_moving_average() / total_time * 100,
                     #moving_average_image_show_time.get_moving_average() / total_time * 100), end='\r')

                    #artifically added
                     

        # counter
        image_count += 1
Esempio n. 25
0
# This is the template for the operator running a Feature Level Task
import json
import traceback
import numpy as np
import cv2
import imagezmq
from imutils import resize

jpeg_quality = 90
image_hub = imagezmq.ImageHub(open_port='tcp://*:5565')

try:
    while True:
        jsonstr, jpg_buffer = image_hub.recv_jpg()
        jsondata = json.loads(jsonstr)
        jsondata['jpeg_quality'] = jpeg_quality
        # print("Received image from {}".format(node_name))
        image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
        image = resize(image, width=400)
        _, jpg_buffer = cv2.imencode(
            ".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
        jsonstr = json.dumps(jsondata)
        image_hub.send_reply(jsonstr.encode('utf-8'))

except (KeyboardInterrupt, SystemExit):
    pass  # Ctrl-C was pressed to end program

except Exception as ex:
    print('Python error with no Exception handler:')
    print('Traceback error:', ex)
    traceback.print_exc()
Esempio n. 26
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 2.0
    nn_budget = None
    nms_max_overlap = 3.0

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True  # show object box blue when detect
    writeVideo_flag = True  # record video ouput

    defaultSkipFrames = 1  # skipped frames between detections

    # set up collection of door
    H1 = 245
    W1 = 370
    H2 = 280
    W2 = 480
    H = None
    W = None

    R = 80  # min R is 56

    def solve_quadratic_equation(a, b, c):
        """ax2 + bx + c = 0"""
        delta = b**2 - 4 * a * c
        if delta < 0:
            print("Phương trình vô nghiệm!")
        elif delta == 0:
            return -b / (2 * a)
        else:
            print("Phương trình có 2 nghiệm phân biệt!")
            if float((-b - sqrt(delta)) / (2 * a)) > float(
                (-b + sqrt(delta)) / (2 * a)):
                return float((-b - sqrt(delta)) / (2 * a))
            else:
                return float((-b + sqrt(delta)) / (2 * a))

    def setup_door(H1, W1, H2, W2, R):
        # bước 1 tìm trung điểm của W1, H1 W2, H2
        I1 = (W1 + W2) / 2
        I2 = (H1 + H2) / 2

        # tìm vecto AB
        u1 = W2 - W1
        u2 = H2 - H1

        # AB chính là vecto pháp tuyến của d
        # ta có phương trình trung tuyến của AB
        # y = -(u1 / u2)* x - c/u2
        c = -u1 * I1 - u2 * I2  # tìm c

        # bước 2 tìm tâm O của đường tròn
        al = c / u2 + I2
        # tính D: khoảng cách I và O
        fi = acos(sqrt((I1 - W1)**2 + (I2 - H1)**2) / R)
        D = sqrt((I1 - W1)**2 + (I2 - H1)**2) * tan(fi)

        O1 = solve_quadratic_equation((1 + u1**2 / u2**2),
                                      2 * (-I1 + u1 / u2 * al),
                                      al**2 - D**2 + I1**2)
        O2 = -u1 / u2 * O1 - c / u2
        # phương trình 2 nghiệm chỉ chọn nghiệm phía trên

        # Bước 3 tìm các điểm trên đường tròn
        door_dict = dict()
        for w in range(W1, W2):
            h = O2 + sqrt(R**2 - (w - O1)**2)
            door_dict[w] = round(h)
        return door_dict

    door_dict = setup_door(H1, W1, H2, W2, R)

    totalFrames = 0
    totalIn = 0

    # create a empty list of centroid to count traffic
    pts = [deque(maxlen=30) for _ in range(9999)]
    ''' ----------------------------- Change video to image_hub --------------------------------------'''
    image_hub = imagezmq.ImageHub()

    fps_imutils = imutils.video.FPS().start()
    t1 = time.time()

    while True:
        cam_id, frame = image_hub.recv_image()  # frame shape 640*480*3

        frame = cv2.resize(frame, (736, 480))
        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # calculate video time
        videotime = time.time() - t1

        # # Draw a door line
        # for w in range (W1, W2):
        #     cv2.circle (frame, (w, door_dict[w]), 1, (0, 255, 255), -1)
        # cv2.circle (frame, (W1, H1), 4, (0, 0, 255), -1)
        # cv2.circle (frame, (W2, H2), 4, (0, 0, 255), -1)

        if totalFrames % defaultSkipFrames > -10:
            boxes, confidence, classes = yolo.detect_image(
                image)  # average time: 1.2s

            features = encoder(frame, boxes)
            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            classes = np.array([d.cls for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            for det in detections:
                bbox = det.to_tlbr()
                if show_detections and len(classes) > 0:
                    det_cls = det.cls
                    score = "%.2f" % (det.confidence * 100) + "%"
                    cv2.putText(frame,
                                str(det_cls) + " " + score,
                                (int(bbox[0]), int(bbox[3]) - 10), 0,
                                1e-3 * frame.shape[0], (0, 255, 0), 1)
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 1)
            '''
            # Call the tracker
            tracker.predict()
            tracker.update (detections)
            for track in tracker.tracks:
                if not track.is_confirmed ():
                    continue
                bbox = track.to_tlbr ()

                if not_count_staff (frame, int (bbox[0]), int (bbox[1]), int (bbox[2]), int (bbox[3])):
                    # adc = "%.2f" % (track.adc * 100) + "%"  # Average detection confidence
                    cv2.rectangle (frame, (int (bbox[0]), int (bbox[1])), (int (bbox[2]), int (bbox[3])), (0, 255, 255),
                                   2)
                    cv2.putText (frame, "STAFF", (int (bbox[0]), int (bbox[1]) - 10), 0,
                                 1e-3 * frame.shape[0], (0, 0, 255), 1)
                    continue
                else:
                    # adc = "%.2f" % (track.adc * 100) + "%"  # Average detection confidence
                    cv2.rectangle (frame, (int (bbox[0]), int (bbox[1])), (int (bbox[2]), int (bbox[3])),
                                   (255, 255, 255), 2)
                    cv2.putText (frame, "ID: " + str (track.track_id), (int (bbox[0]), int (bbox[1])), 0,
                                 1e-3 * frame.shape[0], (0, 255, 0), 1)

                x = [c[0] for c in pts[track.track_id]]
                y = [c[1] for c in pts[track.track_id]]

                centroid_x = int (((bbox[0]) + (bbox[2])) / 2)
                centroid_y = int (((bbox[1]) + (bbox[3])) / 2)

                if not track.Counted and centroid_x in range (W1, W2):
                    if centroid_y < np.mean (y) and door_dict[centroid_x] > centroid_y and np.max (x) - np.min (x) > 20:
                        totalIn += 1
                        track.Counted = True
                        print (track.track_id, track.Counted)

                cv2.circle (frame, (centroid_x, centroid_y), 4, (0, 255, 0), -1)
                pts[track.track_id].append((centroid_x, centroid_y))
                '''
            info = [("Time", "{:.4f}".format(videotime)), ("In", totalIn)]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (W - 150, ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            if show_detections:
                cv2.imshow(cam_id, frame)
                # Press Q to stop!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            image_hub.send_reply(b'OK')

        fps_imutils.update()

        totalFrames += 1

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    cv2.destroyAllWindows()
 def __init__(self):
     self.imageHub = imagezmq.ImageHub()
     self.started = False
     VideoServer.instance = self
Esempio n. 28
0
 def Init(self):
     self.imageHub = imagezmq.ImageHub()
Esempio n. 29
0
def face_detector(args):

    print(args)
    image_hub = imagezmq.ImageHub()

    # load our serialized face detector model from disk
    print("[INFO] loading face detector model...")
    prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
    weightsPath = os.path.sep.join(
        [args["face"], "res10_300x300_ssd_iter_140000.caffemodel"])
    faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)

    # load the face mask detector model from disk
    print("[INFO] loading face mask detector model...")
    #maskNet = load_model(args["model"])

    device = torch.device('cpu')
    if torch.cuda.is_available():
        device = torch.device('cuda')

    maskNet = torch.load(args["model"], map_location=device)
    #model = torch.load("/Users/peterbonnesoeur/myWorkspace/face-mask-detector_pytorch/models_mask/model_softmax.pkl")
    #maskNet.load_state_dict(model)

    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")

    vs = []
    for device in args["devices"]:
        v = VideoStream(src=device)
        v.start()
        vs.append([v, device])
        time.sleep(2.0)  #Warm up time for the camera

    print(len(vs))
    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frames = []
        window_names = []

        for v in vs:
            print(v)
            frame = v[0].read()
            if frame is not None:
                frames.append(imutils.resize(frame, width=args["size"]))
                window_names.append("Camera_" + str(v[1]))

        for camera in range(args["number_cam"]):
            cam_id, frame = image_hub.recv_image()
            if frame is not None:
                frames.append(imutils.resize(frame, width=args["size"]))
                window_names.append(str(cam_id))

        for frame, window_name in zip(frames, window_names):
            # detect faces in the frame and determine if they are wearing a
            # face mask or not
            (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet,
                                                    device, args)
            # loop over the detected face locations and their corresponding
            # locations

            cv2.namedWindow(window_name)
            for (box, pred) in zip(locs, preds):
                print(pred)
                # unpack the bounding box and predictions
                (startX, startY, endX, endY) = box
                (mask, withoutMask) = pred

                # determine the class label and color we'll use to draw
                # the bounding box and text
                lim = 0.55
                if max(mask, withoutMask) > lim:
                    label = "Mask" if mask > withoutMask else "No Mask"
                    color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
                else:
                    color = (255, 0, 0)
                    label = "Not recognized"
                print(label)

                # include the probability in the label
                label = "{}, {:.2f} ".format(label, max(mask, withoutMask))
                if args["anonymity"]:
                    face = frame[startY:endY, startX:endX]
                    face = anonymity(face)
                    frame[startY:endY, startX:endX] = face
                # display the label and bounding box rectangle on the output
                # frame
                cv2.putText(frame, label, (startX, startY - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
            cv2.imshow(window_name, frame)

        # show the output frame
        #cv2.imshow("Frame", final_frame)
        key = cv2.waitKey(1) & 0xFF
        for camera in range(args["number_cam"]):
            image_hub.send_reply(b'OK')

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Esempio n. 30
0
# IP Address Receiver
if len(sys.argv) == 2:  # If there is no ip address input
    ipAddress = 'tcp://*:6666'
    print("Receiving on = " + ipAddress)
elif len(sys.argv) == 3:  # If there is ip address input
    ipAddress = 'tcp://' + sys.argv[2] + ':6666'
    print("Receiving on = " + ipAddress)
else:
    print("Error on ip address input")
    sys.exit()

# instantiate image_hub
# Specify the type messaging and receiver ip address
# 1 for REQ/REP messaging
# 2 for PUB/SUB messaging
image_hub = imagezmq.ImageHub()
if (int(sys.argv[1])) == 1:
    image_hub = imagezmq.ImageHub(open_port=ipAddress)
elif (int(sys.argv[1])) == 2:
    image_hub = imagezmq.ImageHub(open_port=ipAddress, REQ_REP=False)

image_count = 0  # All images received counts
sender_image_counts = defaultdict(int)  # Image counts per sender
sender_image_start_time = defaultdict(datetime)  # Sender start time
sender_image_elasped_time = defaultdict()  # Sender elasped time
sender_image_fps = defaultdict()  # Sender fps
first_image = True  # Flag for first image received

try:
    while True:  # receive images until Ctrl-C is pressed
        # Receives Image