Exemplo n.º 1
0
def main():
    service_name = 'SkeletonsDetector.Detect'

    op = load_options()
    sd = RPCSkeletonsDetector(op)

    log = Logger(name=service_name)
    channel = Channel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)
    provider = ServiceProvider(channel)
    provider.add_interceptor(LogInterceptor())

    max_batch_size = max(100, op.zipkin_batch_size)
    exporter = ZipkinExporter(
        service_name=service_name,
        host_name=op.zipkin_host,
        port=op.zipkin_port,
        transport=BackgroundThreadTransport(max_batch_size=max_batch_size),
    )
    tracing = TracingInterceptor(exporter=exporter)

    provider.delegate(topic='SkeletonsDetector.Detect',
                      function=partial(RPCSkeletonsDetector.detect, sd),
                      request_type=Image,
                      reply_type=ObjectAnnotations)

    provider.run()
Exemplo n.º 2
0
def main():
    service_name = 'SkeletonsDetector.Detection'
    re_topic = re.compile(r'CameraGateway.(\w+).Frame')

    op = load_options()
    sd = SkeletonsDetector(op)

    log = Logger(name=service_name)
    channel = StreamChannel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    max_batch_size = max(100, op.zipkin_batch_size)
    exporter = ZipkinExporter(
        service_name=service_name,
        host_name=op.zipkin_host,
        port=op.zipkin_port,
        transport=BackgroundThreadTransport(max_batch_size=max_batch_size),
    )

    subscription = Subscription(channel=channel, name=service_name)
    subscription.subscribe('CameraGateway.*.Frame')

    while True:
        msg, dropped = channel.consume(return_dropped=True)

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_render')
        detection_span = None

        with tracer.span(name='unpack'):
            im = msg.unpack(Image)
            im_np = get_np_image(im)
        with tracer.span(name='detection') as _span:
            skeletons = sd.detect(im_np)
            detection_span = _span
        with tracer.span(name='pack_and_publish_detections'):
            sks_msg = Message()
            sks_msg.topic = re_topic.sub(r'SkeletonsDetector.\1.Detection',
                                         msg.topic)
            sks_msg.inject_tracing(span)
            sks_msg.pack(skeletons)
            channel.publish(sks_msg)
        with tracer.span(name='render_pack_publish'):
            im_rendered = draw_skeletons(im_np, skeletons)
            rendered_msg = Message()
            rendered_msg.topic = re_topic.sub(r'SkeletonsDetector.\1.Rendered',
                                              msg.topic)
            rendered_msg.pack(get_pb_image(im_rendered))
            channel.publish(rendered_msg)

        span.add_attribute('Detections', len(skeletons.objects))
        tracer.end_span()
        log.info('detections = {:2d}, dropped_messages = {:2d}',
                 len(skeletons.objects), dropped)
        log.info('took_ms = {{ detection: {:5.2f}, service: {:5.2f}}}',
                 span_duration_ms(detection_span), span_duration_ms(span))
from builtins import super

class MyChannel(Channel):
    def consume_until(self, deadline):
        timeout = max([deadline - now(), 0.0])
        return super().consume(timeout=timeout)

service_name = 'Skeletons.Heatmap'
log = Logger(name=service_name)
ops = load_options()

channel = MyChannel(ops.broker_uri)
subscription = Subscription(channel)
exporter = ZipkinExporter(
    service_name=service_name,
    host_name=ops.zipkin_host,
    port=ops.zipkin_port,
    transport=BackgroundThreadTransport(max_batch_size=20),
)

subscription.subscribe('Skeletons.Localization')

sks_hm = SkeletonsHeatmap(ops)

period = ops.period_ms / 1000.0
deadline = now()
while True:
    deadline += period
    msgs = []
    while True:
        try:
            msgs.append(channel.consume_until(deadline=deadline))
from is_wire.core import Channel, Subscription, Message, Logger
from is_wire.core import Tracer, ZipkinExporter, BackgroundThreadTransport

log = Logger(name='Publisher')

broker_uri = 'amqp://localhost:5672'
if len(sys.argv) > 2:
    log.critical('Invalid arguments. Try: python requester.py <BROKER_URI>')
if len(sys.argv) > 1:
    broker_uri = sys.argv[1]

channel = Channel(broker_uri)
subscription = Subscription(channel)
exporter = ZipkinExporter(
    service_name='SkeletonsDetectorRequester',
    host_name='localhost',
    port=9411,
    transport=BackgroundThreadTransport(max_batch_size=100),
)

image = cv2.imread('../image.png')

tracer = Tracer(exporter)
with tracer.span(name='image') as span:
    cimage = cv2.imencode(ext='.jpeg', img=image, params=[cv2.IMWRITE_JPEG_QUALITY, 80])
    data = cimage[1].tobytes()
    im = Image(data=data)
    msg = Message(content=im, reply_to=subscription)
    msg.inject_tracing(span)
    channel.publish(message=msg, topic='SkeletonsDetector.Detect')

    cid = msg.correlation_id
def main(sequence_folder, info_folder, output_folder, pose_model, cameras,
         broker_uri, zipkin_uri, min_requests, max_requests, timeout_ms):

    info_file_path = join(
        info_folder if info_folder is not None else sequence_folder,
        'info.json')
    if not exists(info_file_path):
        log.critical("'{}' file doesn't exist.", info_file_path)

    with open(info_file_path, 'r') as f:
        sequence_info = json.load(f)

    try:
        is_valid_model(pose_model)
    except Exception as ex:
        log.critical(str(ex))

    annotations_folder_path = join(sequence_folder, '2d_annotations',
                                   pose_model)
    _, _, annotations_files_available = next(walk(annotations_folder_path))

    available_cameras = list(
        map(lambda x: int(x.strip('.csv')), annotations_files_available))
    not_available_cameras = set(cameras).difference(available_cameras)
    if len(not_available_cameras) > 0:
        nav_cam_str = ', '.join(map(str, sorted(not_available_cameras)))
        av_cam_str = ', '.join(map(str, sorted(available_cameras)))
        log.critical(
            "For sequence {}, model {}, camera(s) {} are not available. Only {} are present. Exiting.",
            sequence_folder, pose_model, nav_cam_str, av_cam_str)

    annotations_data = {}
    for camera in cameras:
        annotation_file_path = join(annotations_folder_path,
                                    '{}.csv'.format(camera))
        annotations_data[camera] = pd.read_csv(annotation_file_path)

    def make_request(sample_id):
        m_obj_annotations = MultipleObjectAnnotations()
        for camera in cameras:
            annotations = annotations_data[camera]
            sample_annotations = annotations[annotations['sample_id'] ==
                                             sample_id]
            obj_annotations = data_frame_to_object_annotations(
                annotations=sample_annotations,
                model=pose_model,
                frame_id=camera,
                resolution=RESOLUTION)
            m_obj_annotations.list.add().CopyFrom(obj_annotations)

        return m_obj_annotations

    sample_ids = list(range(sequence_info['begin'], sequence_info['end'] + 1))

    channel = Channel(broker_uri)
    zipkin_exporter = None

    if zipkin_uri is not None:
        zipkin_uri = urlparse(zipkin_uri)
        zipkin_exporter = ZipkinExporter(
            service_name="RequestSkeletonsLocalization",
            host_name=zipkin_uri.hostname,
            port=zipkin_uri.port,
            transport=BackgroundThreadTransport(max_batch_size=100),
        )

    request_manager = RequestManager(channel=channel,
                                     zipkin_exporter=zipkin_exporter,
                                     max_requests=max_requests,
                                     min_requests=min_requests)

    sequence_name = basename(dirname(sequence_folder + '/'))
    experiment_name = basename(dirname(output_folder + '/'))
    received_data = []
    while True:

        while request_manager.can_request() and len(sample_ids) > 0:
            sample_id = sample_ids.pop(0)
            request = make_request(sample_id)
            metadata = {
                "sample_id": sample_id,
                "experiment": experiment_name,
                "sequence": sequence_name,
            }
            request_manager.request(content=request,
                                    topic="SkeletonsGrouper.Localize",
                                    timeout_ms=timeout_ms,
                                    metadata=metadata)
            log.info("[{}] [{:>3s}] {}", sequence_name, ">>", sample_id)

        received_msgs = request_manager.consume_ready(timeout=1.0)

        for msg, received_metadata in received_msgs:
            localizations = msg.unpack(ObjectAnnotations)
            received_sample_id = received_metadata['sample_id']

            localizations_array = object_annotations_to_np(
                annotations_pb=localizations,
                model=pose_model,
                has_z=True,
                add_person_id=True,
                sample_id=received_sample_id)
            received_data.append(localizations_array)

            log.info("[{}] [{:<3s}] {}", sequence_name, "<<",
                     received_sample_id)

        if request_manager.all_received() and len(sample_ids) == 0:
            log.info("All received.")
            received_data = np.vstack(received_data)
            df = pd.DataFrame(data=received_data,
                              columns=make_df_columns(pose_model))
            df.sort_values(by=['sample_id', 'person_id'],
                           axis='rows',
                           inplace=True)

            output_folder_path = join(output_folder, sequence_name, pose_model)
            if exists(output_folder_path):
                rmtree(output_folder_path)
            makedirs(output_folder_path)
            output_file_path = join(output_folder_path, 'data.csv')

            log.info("Saving results on {}", output_file_path)
            df.to_csv(path_or_buf=output_file_path, header=True, index=False)

            break
log = Logger(name='Publisher')

topic_id = 0
broker_uri = 'amqp://localhost:5672'
if len(sys.argv) != 3 and len(sys.argv) != 1:
    log.critical(
        'Invalid arguments. Try: python publisher.py <BROKER_URI> <TOPIC_ID>')
if len(sys.argv) > 1:
    broker_uri = sys.argv[1]
    topic_id = sys.argv[2]

channel = Channel(broker_uri)
exporter = ZipkinExporter(
    service_name='CameraGateway.{}'.format(topic_id),
    host_name='localhost',
    port=9411,
    transport=BackgroundThreadTransport(max_batch_size=100),
)

image = cv2.imread('../image.png')

for k in range(10):
    tracer = Tracer(exporter)
    with tracer.span(name='image') as span:
        cimage = cv2.imencode(ext='.jpeg',
                              img=image,
                              params=[cv2.IMWRITE_JPEG_QUALITY, 80])
        data = cimage[1].tobytes()
        im = Image(data=data)
        msg = Message()
        msg.topic = 'CameraGateway.{}.Frame'.format(topic_id)