def kafkastream(): c = Consumer({ 'group.id': args.groupid, 'default.topic.config': { 'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false' } }) # c.subscribe(['/user/mapr/nextgenDLapp/rawvideostream:topic1']) c.subscribe([args.stream + ':' + args.topic]) running = True while running: msg = c.poll(timeout=0.2) if msg is None: continue if not msg.error(): nparr = np.fromstring(msg.value(), np.uint8) image = cv2.imdecode(nparr, 1) ret, jpeg = cv2.imencode('.png', image) bytecode = jpeg.tobytes() time.sleep(args.timeout) yield (b'--frame\r\n' b'Content-Type: image/png\r\n\r\n' + bytecode + b'\r\n\r\n') elif msg.error().code() != KafkaError._PARTITION_EOF: print(msg.error()) running = False c.close()
def kafkastream(): c = Consumer({ 'group.id': 'consumer1', 'default.topic.config': { 'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false' } }) c.subscribe(['/tmp/rawvideostream:topic1']) running = True while running: msg = c.poll(timeout=1.0) if msg is None: continue if not msg.error(): nparr = np.fromstring(msg.value(), np.uint8) image = cv2.imdecode(nparr, 1) frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) ret, jpeg = cv2.imencode('.png', frame) bytecode = jpeg.tobytes() yield (b'--frame\r\n' b'Content-Type: image/png\r\n\r\n' + bytecode + b'\r\n\r\n') elif msg.error().code() != KafkaError._PARTITION_EOF: print(msg.error()) running = False c.close()
def kafkastream(): c = Consumer({ 'group.id': 'consumer2', 'default.topic.config': { 'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false' } }) # c.subscribe(['/user/mapr/nextgenDLapp/rawvideostream:topic1']) c.subscribe(['/tmp/rawvideostream:topic1']) running = True while running: msg = c.poll(timeout=0.2) if msg is None: continue if not msg.error(): nparr = np.fromstring(msg.value(), np.uint8) bytecode = detect(nparr) yield (b'--frame\r\n' b'Content-Type: image/png\r\n\r\n' + bytecode + b'\r\n\r\n') elif msg.error().code() != KafkaError._PARTITION_EOF: print(msg.error()) running = False c.close()
parser.add_argument('--gpuid', default='0', type=int, help='') parser.add_argument('--readstream', default='/tmp/rawvideostream', help='') parser.add_argument('--writestream1', default='/tmp/processedvideostream', help='') parser.add_argument('--writestream2', default='/tmp/identifiedstream', help='') parser.add_argument('--writetopic1', default='topic1', help='topic to write to') parser.add_argument('--writetopic2', default='all', help='topic to write to') parser.add_argument('--readtopic', default='topic1', help='topic to write to') args = parser.parse_args() ctx = mx.gpu(args.gpuid) _, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0) arg_params, aux_params = ch_dev(arg_params, aux_params, ctx) sym = resnet_50(num_class=2) model = face_embedding.FaceModel(args.gpuid) c = Consumer({'group.id': args.groupid, 'default.topic.config': {'auto.offset.reset': 'latest', 'enable.auto.commit': 'false'}}) c.subscribe([args.readstream+':'+args.readtopic]) running = True p = Producer({'streams.producer.default.stream': args.writestream2}) p_orig = Producer({'streams.producer.default.stream': args.writestream1}) while running: msg = c.poll(timeout=0) if msg is None: continue if not msg.error(): nparr = np.fromstring(msg.value(), np.uint8) img_orig = cv2.imdecode(nparr, 1) img, scale = resize(img_orig.copy(), 600, 1000) im_info = np.array([[img.shape[0], img.shape[1], scale]], dtype=np.float32) # (h, w, scale) img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) # change to (c, h, w) order
def kafkastream(): if args.gpuid >= 0: ctx = mx.gpu(args.gpuid) else: ctx = mx.cpu() _, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0) arg_params, aux_params = ch_dev(arg_params, aux_params, ctx) sym = resnet_50(num_class=2) model = face_embedding.FaceModel(args.gpuid) f1T = get_face_embedding(args.filename, arg_params, aux_params, sym, model, ctx) c = Consumer({ 'group.id': args.groupid, 'default.topic.config': { 'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false' } }) c.subscribe([args.readstream + ':' + args.readtopic]) running = True p = Producer({'streams.producer.default.stream': args.writestream}) while running: msg = c.poll(timeout=0) if msg is None: continue if not msg.error(): pickle_vector = pickle.loads(msg.value()) nparr = np.fromstring(pickle_vector[0], np.uint8) img_orig = cv2.imdecode(nparr, 1) bbox_vector = pickle_vector[1] print(len(bbox_vector)) embedding_vector = pickle_vector[2] if len(embedding_vector) > 0: sim_vector = [np.dot(f, f1T) for f in embedding_vector] idx = sim_vector.index(max(sim_vector)) bbox = bbox_vector[idx] sim = sim_vector[idx] if sim > args.threshold: img = cv2.cvtColor(img_orig, cv2.COLOR_RGB2BGR) cv2.rectangle(img, (int(round(bbox[0])), int(round(bbox[1]))), (int(round(bbox[2])), int(round(bbox[3]))), (0, 255, 0), 2) ret, jpeg = cv2.imencode('.png', img) bytecode = jpeg.tobytes() time.sleep(args.timeout) yield (b'--frame\r\n' b'Content-Type: image/png\r\n\r\n' + bytecode + b'\r\n\r\n') if args.writetostream: p.produce(args.writetopic, jpeg.tostring()) print(args.writetopic) elif msg.error().code() != KafkaError._PARTITION_EOF: print(msg.error()) running = False c.close() p.flush()
f_vector, jpeg = model.get_feature(img_orig, bbox, None) fT = f_vector.T return fT if __name__ == '__main__': ctx = mx.gpu(0) _, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0) arg_params, aux_params = ch_dev(arg_params, aux_params, ctx) sym = resnet_50(num_class=2) model = face_embedding.FaceModel() f1T = get_face_embedding('sam_.jpg', arg_params, aux_params, sym, model) f2T = get_face_embedding('frances.jpg', arg_params, aux_params, sym, model) c = Consumer({'group.id': 'consumer02', 'default.topic.config': {'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false'}}) # c.subscribe(['/user/mapr/nextgenDLapp/rawvideostream:topic1']) c.subscribe(['/tmp/rawvideostream:topic1']) running = True p = Producer({'streams.producer.default.stream': '/mapr/DLcluster/tmp/personalstream'}) p_orig = Producer({'streams.producer.default.stream': '/tmp/rawvideostream'}) while running: msg = c.poll(timeout=0) if msg is None: continue if not msg.error(): nparr = np.fromstring(msg.value(), np.uint8) img_orig = cv2.imdecode(nparr, 1) img, scale = resize(img_orig.copy(), 600, 1000) im_info = np.array([[img.shape[0], img.shape[1], scale]], dtype=np.float32) # (h, w, scale) img = np.swapaxes(img, 0, 2)
#!/usr/bin/env python3 from mapr_streams_python import Consumer, KafkaError import json import time from hdrh.histogram import HdrHistogram # Variables identfiying name of streams and topics TOPIC_FAST_MESSAGES = "/sample-stream:fast-messages" TOPIC_SUMMARY_MARKERS = "/sample-stream:summary-markers" print("Consuming messages...") c = Consumer({ 'group.id': 'mygroup', 'default.topic.config': { 'auto.offset.reset': 'earliest' }, 'auto.commit.interval.ms': 500 }) c.subscribe([TOPIC_FAST_MESSAGES, TOPIC_SUMMARY_MARKERS]) # Note that: # - The 3 is for significant figures but only applies when using HdrHistogram's output_percentile_distribution # function # - HdrHistogram for Python only works with integers (not decimals) stats_periodic = HdrHistogram(1, 10000000, 3) stats_all = HdrHistogram(1, 10000000, 3) timeouts = 0 records = 0 running = True try: