示例#1
0
    parser.add_argument('--writetopic1', default='topic1', help='topic to write to')
    parser.add_argument('--writetopic2', default='all', help='topic to write to')
    parser.add_argument('--readtopic', default='topic1', help='topic to write to')
    args = parser.parse_args()

    ctx = mx.gpu(args.gpuid)
    _, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0)
    arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
    sym = resnet_50(num_class=2)
    model = face_embedding.FaceModel(args.gpuid)

    c = Consumer({'group.id': args.groupid,
              'default.topic.config': {'auto.offset.reset': 'latest', 'enable.auto.commit': 'false'}})
    c.subscribe([args.readstream+':'+args.readtopic])
    running = True
    p = Producer({'streams.producer.default.stream': args.writestream2})
    p_orig = Producer({'streams.producer.default.stream': args.writestream1})

    while running:
        msg = c.poll(timeout=0)
        if msg is None: continue
        if not msg.error():
            nparr = np.fromstring(msg.value(), np.uint8)
            img_orig = cv2.imdecode(nparr, 1)
            img, scale = resize(img_orig.copy(), 600, 1000)
            im_info = np.array([[img.shape[0], img.shape[1], scale]], dtype=np.float32)  # (h, w, scale)
            img = np.swapaxes(img, 0, 2)
            img = np.swapaxes(img, 1, 2)  # change to (c, h, w) order
            img = img[np.newaxis, :]  # extend to (n, c, h, w)

            arg_params["data"] = mx.nd.array(img, ctx)
"""
This producer will send a bunch of messages to topic "fast-messages". Every so often,
it will send a message to "slow-messages". This shows how messages can be sent to
multiple topics. On the receiving end, we will see both kinds of messages but will
also see how the two topics aren't really synchronized.
"""

TOPIC_FAST_MESSAGES = "fast-messages"
TOPIC_SUMMARY_MARKERS = "summary-markers"
STREAM_SAMPLE = "/sample-stream"
NUMBER_OF_MESSAGES = 10000
FREQUENCY_DIFF_TOPICS = 10

try:
    print("Producing messages...")
    p = Producer({'streams.producer.default.stream': STREAM_SAMPLE})
    count = 0
    total_messages = 0
    for i in range(NUMBER_OF_MESSAGES):
        # Note that the the Java-version of this program uses System.nanoTime() which uses nanoseconds which
        # Python does not natively have. Instead, time.time() is used here which returns a floating point number in
        # seconds. See here for more information: https://docs.python.org/3/library/time.html#time.time.

        data = {'type': 'test', 't': float("%.3f" % time.time()), 'k': i}
        p.produce(TOPIC_FAST_MESSAGES, json.dumps(data).encode('utf-8'))
        count += 1
        total_messages += 1

        if i % FREQUENCY_DIFF_TOPICS == 0:
            data_fast = {
                'type': 'marker',
from mapr_streams_python import Producer
import numpy as np
import cv2, time

p = Producer(
    {'streams.producer.default.stream': '/mapr/DLcluster/tmp/rawvideostream'})
cap = cv2.VideoCapture('streets_360.mp4')

while (cap.isOpened):
    # Capture frame-by-frame
    ret, frame = cap.read()
    # Our operations on the frame come here
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    ret, jpeg = cv2.imencode('.png', image)
    # Display the resulting frame
    cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    p.produce('topic1', jpeg.tostring())

p.flush()
cap.release()
cv2.destroyAllWindows()
from mapr_streams_python import Producer
import numpy as np
import cv2,time

p = Producer({'streams.producer.default.stream': '/user/dmeng/nextgenDLapp/rawvideostream'})
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,480)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,360)

while (cap.isOpened):
    # Capture frame-by-frame
    ret, frame = cap.read()
    # Our operations on the frame come here
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    ret, jpeg = cv2.imencode('.png', image)
    # Display the resulting frame
    cv2.imshow('frame',image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    p.produce('topic1', jpeg.tobytes())
    time.sleep(0.2)

p.flush()
cap.release()
cv2.destroyAllWindows()
from mapr_streams_python import Producer
import numpy as np
import cv2, time
import sys

p = Producer({
    'streams.producer.default.stream':
    '/mapr/gcloud.cluster.com/tmp/rawvideostream'
})
if len(sys.argv) > 1:
    video_file = str(sys.argv[1])
else:
    print("USAGE: Video file must be specified as a command line argument")
    exit(1)
cap = cv2.VideoCapture(video_file)

while (cap.isOpened):
    # Capture frame-by-frame
    ret, frame = cap.read()
    # Our operations on the frame come here
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    ret, jpeg = cv2.imencode('.png', image)
    # Display the resulting frame
    cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    p.produce('topic1', jpeg.tostring(), str(cap.get(cv2.CAP_PROP_POS_MSEC)))
    print("video position: " + str(cap.get(cv2.CAP_PROP_POS_MSEC)) + "ms")

p.flush()
cap.release()
示例#6
0
def kafkastream():
    if args.gpuid >= 0:
        ctx = mx.gpu(args.gpuid)
    else:
        ctx = mx.cpu()
    _, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0)
    arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
    sym = resnet_50(num_class=2)
    model = face_embedding.FaceModel(args.gpuid)

    f1T = get_face_embedding(args.filename, arg_params, aux_params, sym, model,
                             ctx)

    c = Consumer({
        'group.id': args.groupid,
        'default.topic.config': {
            'auto.offset.reset': 'earliest',
            'enable.auto.commit': 'false'
        }
    })
    c.subscribe([args.readstream + ':' + args.readtopic])
    running = True
    p = Producer({'streams.producer.default.stream': args.writestream})

    while running:
        msg = c.poll(timeout=0)
        if msg is None: continue
        if not msg.error():
            pickle_vector = pickle.loads(msg.value())
            nparr = np.fromstring(pickle_vector[0], np.uint8)
            img_orig = cv2.imdecode(nparr, 1)

            bbox_vector = pickle_vector[1]
            print(len(bbox_vector))
            embedding_vector = pickle_vector[2]
            if len(embedding_vector) > 0:
                sim_vector = [np.dot(f, f1T) for f in embedding_vector]
                idx = sim_vector.index(max(sim_vector))
                bbox = bbox_vector[idx]
                sim = sim_vector[idx]
                if sim > args.threshold:
                    img = cv2.cvtColor(img_orig, cv2.COLOR_RGB2BGR)
                    cv2.rectangle(img,
                                  (int(round(bbox[0])), int(round(bbox[1]))),
                                  (int(round(bbox[2])), int(round(bbox[3]))),
                                  (0, 255, 0), 2)
                    ret, jpeg = cv2.imencode('.png', img)
                    bytecode = jpeg.tobytes()
                    time.sleep(args.timeout)
                    yield (b'--frame\r\n'
                           b'Content-Type: image/png\r\n\r\n' + bytecode +
                           b'\r\n\r\n')
                    if args.writetostream:
                        p.produce(args.writetopic, jpeg.tostring())
                        print(args.writetopic)
        elif msg.error().code() != KafkaError._PARTITION_EOF:
            print(msg.error())
            running = False

    c.close()
    p.flush()
示例#7
0
if __name__ == '__main__':
    ctx = mx.gpu(0)
    _, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0)
    arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
    sym = resnet_50(num_class=2)
    model = face_embedding.FaceModel()
    
    f1T = get_face_embedding('sam_.jpg', arg_params, aux_params, sym, model)
    f2T = get_face_embedding('frances.jpg', arg_params, aux_params, sym, model)

    c = Consumer({'group.id': 'consumer02',
              'default.topic.config': {'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false'}})
    # c.subscribe(['/user/mapr/nextgenDLapp/rawvideostream:topic1'])
    c.subscribe(['/tmp/rawvideostream:topic1'])
    running = True
    p = Producer({'streams.producer.default.stream': '/mapr/DLcluster/tmp/personalstream'})
    p_orig = Producer({'streams.producer.default.stream': '/tmp/rawvideostream'})

    while running:
        msg = c.poll(timeout=0)
        if msg is None: continue
        if not msg.error():
            nparr = np.fromstring(msg.value(), np.uint8)
            img_orig = cv2.imdecode(nparr, 1)
            img, scale = resize(img_orig.copy(), 600, 1000)
            im_info = np.array([[img.shape[0], img.shape[1], scale]], dtype=np.float32)  # (h, w, scale)
            img = np.swapaxes(img, 0, 2)
            img = np.swapaxes(img, 1, 2)  # change to (c, h, w) order
            img = img[np.newaxis, :]  # extend to (n, c, h, w)

            arg_params["data"] = mx.nd.array(img, ctx)
示例#8
0
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(target_size) / float(im_size_min)
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im,
                    None,
                    None,
                    fx=im_scale,
                    fy=im_scale,
                    interpolation=cv2.INTER_LINEAR)
    return im, im_scale


p = Producer({
    'streams.producer.default.stream':
    '/mapr/gcloud.cluster.com/tmp/rawvideostream'
})
if len(sys.argv) > 1:
    fps = float(sys.argv[1])
else:
    print(
        "USAGE: Frames-per-second must be specified as a command line argument"
    )
    exit(1)

cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

frame_counter = 0
while (cap.isOpened):