def request(self, content, topic, timeout_ms, metadata=None):

        if not self.can_request():
            raise Exception(
                "Can't request more than {}. Use 'RequestManager.can_request' "
                "method to check if you can do requests.")

        tracer = Tracer(
            exporter=self._zipkin_exporter) if self._do_tracing else None
        span = tracer.start_span(name='request') if self._do_tracing else None

        msg = Message(content=content)
        msg.topic = topic
        msg.reply_to = self._subscription
        msg.timeout = timeout_ms / 1000.0

        self._log.debug("[Sending] metadata={}, cid={}", metadata,
                        msg.correlation_id)

        if self._do_tracing:
            for key, value in (metadata or {}).items():
                span.add_attribute(key, value)
            tracer.end_span()
            msg.inject_tracing(span)

        self._publish(msg, metadata)

        if len(self._requests) >= self._max_requests:
            self._can_request = False
Exemplo n.º 2
0
def test_injection():
    tracer = Tracer()
    with tracer.span(name="span_name") as span:
        message = Message()
        message.inject_tracing(span)

        assert message.metadata['x-b3-sampled'] == '1'
        assert message.metadata['x-b3-traceid'] == str(
            tracer.tracer.span_context.trace_id)
        assert message.metadata['x-b3-flags'] == '0'
        assert message.metadata['x-b3-spanid'] == str(span.span_id)
        assert message.metadata['x-b3-parentspanid'] == '0000000000000000'
Exemplo n.º 3
0
def main():
    service_name = 'SkeletonsDetector.Detection'
    re_topic = re.compile(r'CameraGateway.(\w+).Frame')

    op = load_options()
    sd = SkeletonsDetector(op)

    log = Logger(name=service_name)
    channel = StreamChannel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    max_batch_size = max(100, op.zipkin_batch_size)
    exporter = ZipkinExporter(
        service_name=service_name,
        host_name=op.zipkin_host,
        port=op.zipkin_port,
        transport=BackgroundThreadTransport(max_batch_size=max_batch_size),
    )

    subscription = Subscription(channel=channel, name=service_name)
    subscription.subscribe('CameraGateway.*.Frame')

    while True:
        msg, dropped = channel.consume(return_dropped=True)

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_render')
        detection_span = None

        with tracer.span(name='unpack'):
            im = msg.unpack(Image)
            im_np = get_np_image(im)
        with tracer.span(name='detection') as _span:
            skeletons = sd.detect(im_np)
            detection_span = _span
        with tracer.span(name='pack_and_publish_detections'):
            sks_msg = Message()
            sks_msg.topic = re_topic.sub(r'SkeletonsDetector.\1.Detection',
                                         msg.topic)
            sks_msg.inject_tracing(span)
            sks_msg.pack(skeletons)
            channel.publish(sks_msg)
        with tracer.span(name='render_pack_publish'):
            im_rendered = draw_skeletons(im_np, skeletons)
            rendered_msg = Message()
            rendered_msg.topic = re_topic.sub(r'SkeletonsDetector.\1.Rendered',
                                              msg.topic)
            rendered_msg.pack(get_pb_image(im_rendered))
            channel.publish(rendered_msg)

        span.add_attribute('Detections', len(skeletons.objects))
        tracer.end_span()
        log.info('detections = {:2d}, dropped_messages = {:2d}',
                 len(skeletons.objects), dropped)
        log.info('took_ms = {{ detection: {:5.2f}, service: {:5.2f}}}',
                 span_duration_ms(detection_span), span_duration_ms(span))
Exemplo n.º 4
0
def main():
    service_name = "FaceDetector.Detection"
    log = Logger(name=service_name)
    op = load_options()
    face_detector = FaceDetector(op.model)

    channel = StreamChannel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    exporter = create_exporter(service_name=service_name, uri=op.zipkin_uri)

    subscription = Subscription(channel=channel, name=service_name)
    subscription.subscribe(topic='CameraGateway.*.Frame')

    while True:
        msg, dropped = channel.consume_last(return_dropped=True)

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_render')
        detection_span = None

        with tracer.span(name='unpack'):
            im = msg.unpack(Image)
            im_np = to_np(im)

        with tracer.span(name='detection') as _span:
            camera_id = get_topic_id(msg.topic)
            faces = face_detector.detect(im_np)
            detection_span = _span

        with tracer.span(name='pack_and_publish_detections'):
            faces_msg = Message()
            faces_msg.topic = 'FaceDetector.{}.Detection'.format(camera_id)
            faces_msg.inject_tracing(span)
            faces_msg.pack(faces)
            channel.publish(faces_msg)

        with tracer.span(name='render_pack_publish'):
            img_rendered = draw_detection(im_np, faces)
            rendered_msg = Message()
            rendered_msg.topic = 'FaceDetector.{}.Rendered'.format(camera_id)
            rendered_msg.pack(to_image(img_rendered))
            channel.publish(rendered_msg)

        span.add_attribute('Detections', len(faces.objects))
        tracer.end_span()

        info = {
            'detections': len(faces.objects),
            'dropped_messages': dropped,
            'took_ms': {
                'detection': round(span_duration_ms(detection_span), 2),
                'service': round(span_duration_ms(span), 2)
            }
        }
        log.info('{}', str(info).replace("'", '"'))
Exemplo n.º 5
0
def test_propagation():
    topic = "span_test"
    channel = Channel(uri=URI, exchange=EXCHANGE)
    subscription = Subscription(channel)
    subscription.subscribe(topic)

    tracer = Tracer()
    with tracer.span(name="span_name") as span:
        span_id = span.span_id
        message = Message()
        message.body = "body".encode('latin1')
        message.inject_tracing(span)
        channel.publish(topic=topic, message=message)

    message2 = channel.consume(timeout=1.0)
    span_context = message2.extract_tracing()

    assert span_context.span_id == span_id
    assert span_context.trace_id == tracer.tracer.span_context.trace_id
    port=ops.zipkin_port,
    transport=BackgroundThreadTransport(max_batch_size=20),
)

subscription.subscribe('Skeletons.Localization')

sks_hm = SkeletonsHeatmap(ops)

period = ops.period_ms / 1000.0
deadline = now()
while True:
    deadline += period
    msgs = []
    while True:
        try:
            msgs.append(channel.consume_until(deadline=deadline))
        except:
            break
    
    span_context = msgs[-1].extract_tracing() if len(msgs) > 0 else None
    tracer = Tracer(exporter=exporter, span_context=span_context)
    with tracer.span(name='Render') as span:
        sks_list = list(map(lambda x: x.unpack(ObjectAnnotations), msgs))
        sks_hm.update_heatmap(sks_list)
        im_pb = sks_hm.get_pb_image()
        msg = Message()
        msg.topic = service_name
        msg.pack(im_pb)
        msg.inject_tracing(span)
        channel.publish(msg)
Exemplo n.º 7
0
def main():

    service_name = "CameraGateway"
    log = Logger(service_name)
    options = load_options()
    camera = CameraGateway(fps=options["fps"])

    publish_channel = Channel(options['broker_uri'])
    rpc_channel = Channel(options['broker_uri'])
    server = ServiceProvider(rpc_channel)
    logging = LogInterceptor()
    server.add_interceptor(logging)

    server.delegate(topic=service_name + ".*.GetConfig",
                    request_type=FieldSelector,
                    reply_type=CameraConfig,
                    function=camera.get_config)

    server.delegate(topic=service_name + ".*.SetConfig",
                    request_type=CameraConfig,
                    reply_type=Empty,
                    function=camera.set_config)

    exporter = create_exporter(service_name=service_name,
                               uri=options["zipkin_uri"])

    while True:

        # iterate through videos listed
        for video in options['videos']:

            # id of the first sequence of videos
            person_id = video['person_id']
            gesture_id = video['gesture_id']

            # getting the path of the 4 videos
            video_files = {
                cam_id: os.path.join(
                    options['folder'],
                    'p{:03d}g{:02d}c{:02d}.mp4'.format(person_id, gesture_id,
                                                       cam_id))
                for cam_id in options["cameras_id"]
            }

            for iteration in range(video['iterations']):

                info = {
                    "person": person_id,
                    "gesture": gesture_id,
                    "iteration": iteration
                }
                log.info('{}', str(info).replace("'", '"'))

                # object that let get images from multiples videos files
                video_loader = FramesLoader(video_files)

                # iterate through all samples on video
                while True:

                    time_initial = time.time()

                    # listen server for messages about change
                    try:
                        message = rpc_channel.consume(timeout=0)
                        if server.should_serve(message):
                            server.serve(message)
                    except socket.timeout:
                        pass

                    frame_id, frames = video_loader.read()

                    for cam in sorted(frames.keys()):
                        tracer = Tracer(exporter)
                        span = tracer.start_span(name='frame')
                        pb_image = to_pb_image(frames[cam])
                        msg = Message(content=pb_image)
                        msg.inject_tracing(span)
                        topic = 'CameraGateway.{}.Frame'.format(cam)
                        publish_channel.publish(msg, topic=topic)
                        tracer.end_span()

                    took_ms = (time.time() - time_initial) * 1000

                    dt = (1 / camera.fps) - (took_ms / 1000)
                    if dt > 0:
                        time.sleep(dt)
                        info = {
                            "sample": frame_id,
                            "took_ms": took_ms,
                            "wait_ms": dt * 1000
                        }
                        log.info('{}', str(info).replace("'", '"'))

                    if frame_id >= (video_loader.num_samples - 1):
                        video_loader.release()
                        del video_loader
                        gc.collect()
                        break

        if options['loop'] is False:
            break
def navigate(goalX, goalY, robotArUco, worldFrame, mapFile, step, robot_size,
             N_KNN, MAX_EDGE_LEN, show_path):

    exporter = ZipkinExporter(
        service_name="Robot.Controller",
        host_name="10.10.2.13",
        port="30200",
        transport=AsyncTransport,
    )

    # Create a channel to connect to the broker
    channel = Channel("amqp://10.10.2.13:30000")
    # Create a subscription
    subscription = Subscription(channel)

    # Subscribe to the following topics:
    # - To get the robot's current position in relation where it was turned on, i.e., dead reckoning odometry
    # - To get the position of the ArUco marker attached to the robot's back

    #robotArUco = 8  # ArUco marker ID attached to the robot's back
    #worldFrame = 1000    # Number of the world reference frame in the HPN Intelligent Space

    awarenessOff(channel)
    time.sleep(6)  # to wait some time to be sure the awareness if off

    topicGetRobotOdometry = "FrameTransformation.2000.2001"
    topicGetArUcoRobotBack = "FrameTransformation." + str(
        robotArUco + 100) + "." + str(worldFrame)

    subscription.subscribe(topicGetRobotOdometry)
    subscription.subscribe(topicGetArUcoRobotBack)

    # Initialise transformation matrices used for storing odometry and correction
    pepperPose = np.identity(4)
    robotOriginToWorld = np.identity(4)
    lastOdometry = np.identity(4)

    # Load the frame transformation between the ArUco marker on the robot's back and the robot's base frame.
    fileName = "frameArUcoRobot.dat"
    arUcoToRobot = read_frameTransformation(fileName)
    robotToArUco = inv(arUcoToRobot)

    print("Goal: ", goalX, "  ", goalY)

    # Localize the robot before start path planning

    robotLocalized = False
    notSeen = 0
    count = 0

    while not robotLocalized:
        # Source must be the current position of the robot in the world frame
        message = channel.consume()
        if (message.topic == topicGetRobotOdometry):
            print("reading odometry")
            # get the transformation matrix corresponding to the current rotation and position of the robot
            lastOdometry = unpackFrameTransformation(message)
            notSeen = notSeen + 1
            print(notSeen)

        if (message.topic == topicGetArUcoRobotBack):
            print("correcting odometry")
            # get the frame transformation betweeb the ArUco marker on the robot's back and the world
            arUcoToWorld = unpackFrameTransformation(message)
            # calculates the robot pose in the world frame
            pepperPose = arUcoToWorld * robotToArUco
            # calculate the frame transformation needed to correct robot's odometry while the ArUco marker is not seen by the intelligent space
            robotOriginToWorld = pepperPose * inv(lastOdometry)

            sourceX = pepperPose[0, 3]
            sourceY = pepperPose[1, 3]
            print("x= ", sourceX, " y= ", sourceY)
            robotLocalized = True
            notSeen = 0

        if notSeen > 30:
            notSeen = 0
            count = count + 1
            # unsubscribe to not accumulate messages
            subscription.unsubscribe(topicGetRobotOdometry)
            subscription.unsubscribe(topicGetArUcoRobotBack)

            if count > 4:
                print("I can't get localized by the Intelligent Space.")
                print(
                    "Please take me to a spot where the marker on my back can be seen by one of the cameras."
                )
                sys.exit(0)

            makeTurn(0, 0.3, channel)
            subscription.subscribe(topicGetRobotOdometry)
            subscription.subscribe(topicGetArUcoRobotBack)

    # unsubscribe to not accumulate messages
    subscription.unsubscribe(topicGetRobotOdometry)
    subscription.unsubscribe(topicGetArUcoRobotBack)

    #sys.exit(0)

    # Create obstacles
    ox, oy = create_virtualObstacles()

    # Call path planning
    # rx, ry contains the positions in the path
    rx, ry = PRM_planning(mapFile, sourceX, sourceY, goalX, goalY, ox, oy,
                          robot_size, step, N_KNN, MAX_EDGE_LEN)

    # Check if a path was found/home/raquel/ProgrammingIS/learningISBristol//
    #assert len(rx) != 0, 'Cannot found path'
    if len(rx) == 0:
        print('Cannot find path')
        raise SystemError

    #sys.exit(0)

    # Plot map points and obstacles
    if show_path:
        map_x, map_y = read_map(mapFile, step)
        plt.plot(ox, oy, ".k")
        plt.plot(sourceX, sourceY, "^r")
        plt.plot(goalX, goalY, "^c")
        plt.plot(map_x, map_y, ".b")
        plt.grid(True)
        plt.axis("equal")
        plt.plot(rx, ry, "-r")
        plt.plot(rx, ry, "og")
        plt.show()

    # Reverse the order of the path (list) returned by the path-planning algorithm
    # The original list contains the goal at the beginning and the source at the end. We need the reverse
    rx = rx[::-1]
    ry = ry[::-1]
    print(rx)
    print(ry)

    #sys.exit(0)

    # Subscribe to the previous topics
    subscription.subscribe(topicGetRobotOdometry)
    subscription.subscribe(topicGetArUcoRobotBack)

    i = 0
    k = 0
    stuck = 0
    dist = 100.0
    threshold = 0.2

    try:
        while True:

            try:

                # listen the channelprint(frameArUcoRobot)
                message = channel.consume(timeout=0.5)

                spanr = message.extract_tracing()
                tracer = Tracer(exporter, spanr)
                tracer.start_span(name="Navigate")
                # Check if the message received is the robot's odometry - FrameTransformation type
                if (message.topic == topicGetRobotOdometry):
                    # get the transformation matrix corresponding to the current rotation and position of the robot
                    lastOdometry = unpackFrameTransformation(message)
                    pepperPose = robotOriginToWorld * lastOdometry

                    #print(pepperPose)

                elif (message.topic == topicGetArUcoRobotBack):

                    print("Odometry Corrected")
                    # get the transformation matrix corresponding to the current pose of the robot corrected when
                    # it sees an ArUco marker
                    arUcoToWorld = unpackFrameTransformation(message)
                    pepperPose = arUcoToWorld * robotToArUco
                    robotOriginToWorld = pepperPose * inv(lastOdometry)
            except socket.timeout:

                print("Time out")

            # matrix Inverse
            toPepperFrame = inv(pepperPose)
            # transform the goal from the world frame to the robot frame
            posX, posY, posZ = changeRefFrame(rx[i], ry[i], 0, toPepperFrame)

            distPrevious = dist
            dist = math.sqrt(posX**2 + posY**2)
            #print(dist)
            # If distance to current goal is less than the threshold, pick the next point in the path to be the next goal
            if dist < threshold:
                i = i + 1
                stuck = 0
                print(dist)
                print("Path index: ", i, "  of ", len(rx))

                if i == (len(rx) - 1):
                    threshold = 0.5

                # If that was the last point in the path, finish navigation. Goal achieved.
                if i >= len(rx):

                    print("Goal achieved")
                    break
                # If not the last point in the path, get the next point and converte it to robot frame
                print("Next point in the path")
                posX, posY, posZ = changeRefFrame(rx[i], ry[i], 0,
                                                  toPepperFrame)
                # Command robot to move
                commandMoveTo(posX, posY, channel)
                #commandNavigateTo(posX,posY,channel)

            # If distance to current goal is greater than the threshold, check if robot is stopped
            # To check if robot is stopped, calculate the difference between current distance and previous one
            elif abs(dist - distPrevious
                     ) < 0.005:  # if difference is less than 0.5 cm
                k = k + 1  # accumulate
                if k == 20:  #see if situation remains for 20 times
                    # Robot is stuck. Let's send a move command again
                    print(dist)
                    k = 0
                    print(
                        "Ooops.... I got stuck.... I will try to move. Stuck = ",
                        stuck)
                    posX, posY, posZ = changeRefFrame(rx[i], ry[i], 0,
                                                      toPepperFrame)

                    stuck = stuck + 1
                    if stuck == 4:

                        goBack(-10, 0, channel)
                        stuck = 0
                        posX, posY, posZ = changeRefFrame(
                            rx[i], ry[i], 0, toPepperFrame)
                        # Command robot to move
                        commandMoveTo(posX, posY, channel)
                    else:
                        commandMoveTo(posX, posY, channel)
            tracer.end_span()

    except KeyboardInterrupt:

        commandMoveTo(0, 0, channel)

    #awarenessOff(channel)

    awarenessOn(channel)
    time.sleep(6)
    log.critical('Invalid arguments. Try: python requester.py <BROKER_URI>')
if len(sys.argv) > 1:
    broker_uri = sys.argv[1]

channel = Channel(broker_uri)
subscription = Subscription(channel)
exporter = ZipkinExporter(
    service_name='SkeletonsDetectorRequester',
    host_name='localhost',
    port=9411,
    transport=BackgroundThreadTransport(max_batch_size=100),
)

image = cv2.imread('../image.png')

tracer = Tracer(exporter)
with tracer.span(name='image') as span:
    cimage = cv2.imencode(ext='.jpeg', img=image, params=[cv2.IMWRITE_JPEG_QUALITY, 80])
    data = cimage[1].tobytes()
    im = Image(data=data)
    msg = Message(content=im, reply_to=subscription)
    msg.inject_tracing(span)
    channel.publish(message=msg, topic='SkeletonsDetector.Detect')

    cid = msg.correlation_id
    while True:
        msg = channel.consume()
        if msg.correlation_id == cid:
            skeletons = msg.unpack(ObjectAnnotations)
            print(skeletons)
            sys.exit(0)
Exemplo n.º 10
0
def main():

    service_name = 'GestureRecognizer.Recognition'
    log = Logger(name=service_name)

    op = load_options()

    channel = Channel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    exporter = create_exporter(service_name=service_name, uri=op.zipkin_uri)

    subscription = Subscription(channel=channel, name=service_name)
    for group_id in list(op.group_ids):
        subscription.subscribe(
            'SkeletonsGrouper.{}.Localization'.format(group_id))

    model = GestureRecognizer("model_gesture1_72.00.pth")
    log.info('Initialize the model')

    unc = Gauge('uncertainty_total', "Uncertainty about predict")
    unc.set(0.0)
    start_http_server(8000)

    buffer = list()
    predict_flag = False

    mean = lambda x: (sum(x) / len(x))

    while True:

        msg = channel.consume()

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_info')

        annotations = msg.unpack(ObjectAnnotations)
        skeleton = select_skeletons(annotations=annotations,
                                    min_keypoints=op.skeletons.min_keypoints,
                                    x_range=op.skeletons.x_range,
                                    y_range=op.skeletons.y_range)

        if skeleton is None:
            tracer.end_span()
            continue

        skl = Skeleton(skeleton)
        skl_normalized = skl.normalize()
        pred, prob, uncertainty = model.predict(skl_normalized)

        if pred == 0 and predict_flag is False:
            pass

        elif pred != 0 and predict_flag is False:
            initial_time = time.time()
            predict_flag = True
            buffer.append(uncertainty)

        elif pred != 0 and predict_flag is True:
            buffer.append(uncertainty)

        elif pred == 0 and predict_flag is True:
            predict_flag = False
            exec_time = time.time() - initial_time
            if exec_time >= op.exec_time:
                unc.set(mean(buffer))
                log.info("execution_ms: {}, buffer_mean: {}",
                         (exec_time * 1000), mean(buffer))
            buffer = []

        tracer.end_span()

        info = {
            'prediction': pred,
            'probability': prob,
            'uncertainty': uncertainty,
            'took_ms': {
                'service': round(span_duration_ms(span), 2)
            }
        }
        log.info('{}', str(info).replace("'", '"'))
Exemplo n.º 11
0
sks_hm = SkeletonsHeatmap(ops)

period = ops.period_ms / 1000.0
deadline = now()
while True:
    deadline += period
    msgs = []
    while True:
        try:
            msgs.append(channel.consume_until(deadline=deadline))
        except:
            break

    span_context = msgs[-1].extract_tracing() if len(msgs) > 0 else None
    tracer = Tracer(exporter=exporter, span_context=span_context)
    update_span, service_span = None, None
    with tracer.span(name='Render') as span_0:
        sks_list = list(map(lambda x: x.unpack(ObjectAnnotations), msgs))
        with tracer.span(name='Update_Heatmap') as span_1:
            sks_hm.update_heatmap(sks_list)
            update_span = span_1
        im_pb = sks_hm.get_pb_image()
        msg = Message(content=im_pb)
        msg.inject_tracing(span_0)
        channel.publish(message=msg, topic='{}.Rendered'.format(service_name))
        service_span = span_0

    log.info('event=Render messages={}', len(msgs))
    log.info('took_ms= {{ update_heatmap={:4.2f}, service={:4.2f} }}',
             span_duration_ms(update_span), span_duration_ms(service_span))
Exemplo n.º 12
0
max_batch_size = max(100, op.zipkin_batch_size)
exporter = ZipkinExporter(
    service_name=service_name,
    host_name=op.zipkin_host,
    port=op.zipkin_port,
    transport=BackgroundThreadTransport(max_batch_size=max_batch_size),
)

subscription = Subscription(channel=channel, name=service_name)
subscription.subscribe('CameraGateway.*.Frame')

while True:
    msg, dropped = channel.consume(return_dropped=True)

    tracer = Tracer(exporter, span_context=msg.extract_tracing())
    span = tracer.start_span(name='detection_and_render')

    with tracer.span(name='unpack'):
        im = msg.unpack(Image)
        im_np = get_np_image(im)
    with tracer.span(name='detect'):
        skeletons = sd.detect(im_np)
    with tracer.span(name='pack_and_publish_detections'):
        sks_msg = Message()
        sks_msg.topic = re_topic.sub(r'Skeletons.\1.Detection', msg.topic)
        sks_msg.inject_tracing(span)
        sks_msg.pack(skeletons)
        channel.publish(sks_msg)
    with tracer.span(name='render_pack_publish'):
        im_rendered = draw_skeletons(im_np, skeletons)