Exemplo n.º 1
0
def main():
    service_name = 'SkeletonsDetector.Detect'

    op = load_options()
    sd = RPCSkeletonsDetector(op)

    log = Logger(name=service_name)
    channel = Channel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)
    provider = ServiceProvider(channel)
    provider.add_interceptor(LogInterceptor())

    max_batch_size = max(100, op.zipkin_batch_size)
    exporter = ZipkinExporter(
        service_name=service_name,
        host_name=op.zipkin_host,
        port=op.zipkin_port,
        transport=BackgroundThreadTransport(max_batch_size=max_batch_size),
    )
    tracing = TracingInterceptor(exporter=exporter)

    provider.delegate(topic='SkeletonsDetector.Detect',
                      function=partial(RPCSkeletonsDetector.detect, sd),
                      request_type=Image,
                      reply_type=ObjectAnnotations)

    provider.run()
Exemplo n.º 2
0
def load_options():
    log = Logger(name='LoadOptions')
    op_file = sys.argv[1] if len(sys.argv) > 1 else 'options.json'
    try:
        with open(op_file, 'r') as f:
            try:
                op = Parse(f.read(), SkeletonsHeatmapOptions())
                log.info('Options: \n{}', op)
            except Exception as ex:
                log.critical('Unable to load options from \'{}\'. \n{}',
                             op_file, ex)
                sys.exit(-1)
            except:
                log.critical('Unable to load options from \'{}\'', op_file)
                sys.exit(-1)
    except Exception as ex:
        log.critical('Unable to open file \'{}\'', op_file)
        sys.exit(-1)

    message = op.DESCRIPTOR.full_name
    # validation
    if op.period_ms < 200:
        message += " 'period_ms' field must be equal or greater than 200. "
        message += "Given {}".format(op.period_ms)
        raise Exception(message)
    if op.period_ms > 1000:
        message += " 'period_ms' field must be equal or less than 1000. "
        message += "Given {}".format(op.period_ms)
        raise Exception(message)

    return op
Exemplo n.º 3
0
class BrokerEvents(object):
    def __init__(self, broker_uri, management_uri, log_level):
        self.log = Logger(name="BrokerEvents", level=log_level)

        self.log.debug("broker_uri='{}'", broker_uri)
        self.channel = Channel(broker_uri)
        self.subscription = Subscription(self.channel)
        self.subscription.subscribe(topic="binding.*")

        self.log.debug("management_uri='{}'", management_uri)
        self.consumers = self.query_consumers_http(management_uri)

    def run(self):
        while True:
            msg = self.channel.consume()
            self.log.debug("topic='{}' metadata={}", msg.topic, msg.metadata)

            if msg.metadata["destination_kind"] != "queue" or \
               msg.metadata["source_name"] != "is":
                continue

            event = msg.topic.split('.')[-1]
            topic = msg.metadata["routing_key"]
            queue = msg.metadata["destination_name"]

            if event == "created":
                self.consumers.info[topic].consumers.append(queue)
            elif event == "deleted":
                self.consumers.info[topic].consumers.remove(queue)
                if len(self.consumers.info[topic].consumers) == 0:
                    del self.consumers.info[topic]

            self.log.info("event='{}' topic='{}' queue='{}'", event, topic,
                          queue)

            self.channel.publish(
                Message(content=self.consumers),
                topic="BrokerEvents.Consumers",
            )

    @staticmethod
    def query_consumers_http(management_uri):
        reply = requests.get(management_uri + "/api/bindings")
        if reply.status_code != 200:
            why = "Failed to query management API, code={}".format(
                reply.status_code)
            raise RuntimeError(why)

        bindings = reply.json()
        bindings = [
            b for b in bindings
            if b["destination_type"] == "queue" and b["source"] == "is"
        ]

        consumers = ConsumerList()
        for b in bindings:
            consumers.info[b["routing_key"]].consumers.append(b["destination"])
        return consumers
Exemplo n.º 4
0
def main():
    service_name = "FaceDetector.Detection"
    log = Logger(name=service_name)
    op = load_options()
    face_detector = FaceDetector(op.model)

    channel = StreamChannel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    exporter = create_exporter(service_name=service_name, uri=op.zipkin_uri)

    subscription = Subscription(channel=channel, name=service_name)
    subscription.subscribe(topic='CameraGateway.*.Frame')

    while True:
        msg, dropped = channel.consume_last(return_dropped=True)

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_render')
        detection_span = None

        with tracer.span(name='unpack'):
            im = msg.unpack(Image)
            im_np = to_np(im)

        with tracer.span(name='detection') as _span:
            camera_id = get_topic_id(msg.topic)
            faces = face_detector.detect(im_np)
            detection_span = _span

        with tracer.span(name='pack_and_publish_detections'):
            faces_msg = Message()
            faces_msg.topic = 'FaceDetector.{}.Detection'.format(camera_id)
            faces_msg.inject_tracing(span)
            faces_msg.pack(faces)
            channel.publish(faces_msg)

        with tracer.span(name='render_pack_publish'):
            img_rendered = draw_detection(im_np, faces)
            rendered_msg = Message()
            rendered_msg.topic = 'FaceDetector.{}.Rendered'.format(camera_id)
            rendered_msg.pack(to_image(img_rendered))
            channel.publish(rendered_msg)

        span.add_attribute('Detections', len(faces.objects))
        tracer.end_span()

        info = {
            'detections': len(faces.objects),
            'dropped_messages': dropped,
            'took_ms': {
                'detection': round(span_duration_ms(detection_span), 2),
                'service': round(span_duration_ms(span), 2)
            }
        }
        log.info('{}', str(info).replace("'", '"'))
Exemplo n.º 5
0
def load_options():
    log = Logger(name='LoadingOptions')
    op_file = sys.argv[1] if len(sys.argv) > 1 else 'options.json'
    try:
        with open(op_file, 'r') as f:
            try:
                op = Parse(f.read(), GestureRecognizierOptions())
                log.info('GestureRecognizierOptions: \n{}', op)
                return op
            except Exception as ex:
                log.critical('Unable to load options from \'{}\'. \n{}', op_file, ex)
    except Exception as ex:
        log.critical('Unable to open file \'{}\'', op_file)
Exemplo n.º 6
0
def load_options():
    log = Logger(name='LoadOptions')
    op_file = sys.argv[1] if len(sys.argv) > 1 else 'options.json'
    try:
        with open(op_file, 'r') as f:
            try:
                op = Parse(f.read(), ObjectAnnotationsTransformerOptions())
                log.info('Options: \n{}', op)
            except Exception as ex:
                log.critical('Unable to load options from \'{}\'. \n{}',
                             op_file, ex)
            except:
                log.critical('Unable to load options from \'{}\'', op_file)
    except Exception as ex:
        log.critical('Unable to open file \'{}\'', op_file)

    return op
def load_options():
    log = Logger(name='LoadingOptions')
    op_file = sys.argv[1] if len(sys.argv) > 1 else 'options.json'
    try:
        with open(op_file, 'r') as f:
            try:
                op = Parse(f.read(), Options())
                mem_frac = op.per_process_gpu_memory_fraction 
                if mem_frac < 0.0 or mem_frac > 1.0:
                    log.critical("Invalid value for 'per_process_gpu_memory_fraction': {}. Must be in [0.0, 1.0]", mem_frac)
                log.info('Options: \n{}', op)
                return op
            except Exception as ex:
                log.critical('Unable to load options from \'{}\'. \n{}', op_file, ex)
            except:
                log.critical('Unable to load options from \'{}\'', op_file)
    except Exception as ex:
        log.critical('Unable to open file \'{}\'', op_file)
Exemplo n.º 8
0
def main():
    log = Logger(name="Parser")
    parser = argparse.ArgumentParser(description='Broker events service')
    parser.add_argument('--config',
                        dest='config',
                        type=str,
                        default="config.json",
                        help='path to configuration file')

    args = parser.parse_args()
    with open(args.config) as f:
        config = json.load(f)
        log.info("{}", config)

    service = BrokerEvents(
        broker_uri=config["broker_uri"],
        management_uri=config["management_uri"],
        log_level=logging.getLevelName(config["log_level"]),
    )

    service.run()
Exemplo n.º 9
0
def main():
    service_name = 'FaceDetector.Detect'
    log = Logger(name=service_name)

    op = load_options()
    detector = RPCFaceDetector(op.model)

    channel = Channel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    provider = ServiceProvider(channel)
    provider.add_interceptor(LogInterceptor())

    exporter = create_exporter(service_name=service_name, uri=op.zipkin_uri)
    tracing = TracingInterceptor(exporter=exporter)
    provider.add_interceptor(tracing)

    provider.delegate(topic='FaceDetector.Detect',
                      function=detector.detect,
                      request_type=Image,
                      reply_type=ObjectAnnotations)

    provider.run()
Exemplo n.º 10
0
def main():
    service_name = 'SkeletonsDetector.Detection'
    re_topic = re.compile(r'CameraGateway.(\w+).Frame')

    op = load_options()
    sd = SkeletonsDetector(op)

    log = Logger(name=service_name)
    channel = StreamChannel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    max_batch_size = max(100, op.zipkin_batch_size)
    exporter = ZipkinExporter(
        service_name=service_name,
        host_name=op.zipkin_host,
        port=op.zipkin_port,
        transport=BackgroundThreadTransport(max_batch_size=max_batch_size),
    )

    subscription = Subscription(channel=channel, name=service_name)
    subscription.subscribe('CameraGateway.*.Frame')

    while True:
        msg, dropped = channel.consume(return_dropped=True)

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_render')
        detection_span = None

        with tracer.span(name='unpack'):
            im = msg.unpack(Image)
            im_np = get_np_image(im)
        with tracer.span(name='detection') as _span:
            skeletons = sd.detect(im_np)
            detection_span = _span
        with tracer.span(name='pack_and_publish_detections'):
            sks_msg = Message()
            sks_msg.topic = re_topic.sub(r'SkeletonsDetector.\1.Detection',
                                         msg.topic)
            sks_msg.inject_tracing(span)
            sks_msg.pack(skeletons)
            channel.publish(sks_msg)
        with tracer.span(name='render_pack_publish'):
            im_rendered = draw_skeletons(im_np, skeletons)
            rendered_msg = Message()
            rendered_msg.topic = re_topic.sub(r'SkeletonsDetector.\1.Rendered',
                                              msg.topic)
            rendered_msg.pack(get_pb_image(im_rendered))
            channel.publish(rendered_msg)

        span.add_attribute('Detections', len(skeletons.objects))
        tracer.end_span()
        log.info('detections = {:2d}, dropped_messages = {:2d}',
                 len(skeletons.objects), dropped)
        log.info('took_ms = {{ detection: {:5.2f}, service: {:5.2f}}}',
                 span_duration_ms(detection_span), span_duration_ms(span))
Exemplo n.º 11
0
def load_options():
    log = Logger(name='LoadOptions')
    op_file = sys.argv[1] if len(sys.argv) > 1 else 'options.json'
    try:
        with open(op_file, 'r') as f:
            try:
                op = Parse(f.read(), options_pb2.RobotGatewayOptions())
                log.info('Options: \n{}', op)
            except Exception as ex:
                log.critical('Unable to load options from \'{}\'. \n{}',
                             op_file, ex)
            except:
                log.critical('Unable to load options from \'{}\'', op_file)
    except Exception as ex:
        log.critical('Unable to open file \'{}\'', op_file)

    message = op.DESCRIPTOR.full_name
    # validation
    if op.robot_parameters.sampling_rate < 0.1:
        message += " 'sampling_rate' field must be equal or greater than 0.1. "
        message += "Given {}".format(op.robot_parameters.sampling_rate)
        raise Exception(message)

    return op
parser = argparse.ArgumentParser(
    description='Utility to capture a sequence of images from multiples cameras')
parser.add_argument('--person', '-p', type=int, required=True, help='ID to identity person')
parser.add_argument('--gesture', '-g', type=int, required=True, help='ID to identity gesture')
args = parser.parse_args()

person_id = args.person
gesture_id = args.gesture
if str(gesture_id) not in gestures:
    log.critical("Invalid GESTURE_ID: {}. \nAvailable gestures: {}", gesture_id,
                 json.dumps(gestures, indent=2))

if person_id < 1 or person_id > 999:
    log.critical("Invalid PERSON_ID: {}. Must be between 1 and 999.", person_id)

log.info("PERSON_ID: {} GESTURE_ID: {}", person_id, gesture_id)

cameras = [int(cam_config.id) for cam_config in options.cameras]
video_files = {
    cam_id: os.path.join(options.folder, 'p{:03d}g{:02d}c{:02d}.mp4'.format(
        person_id, gesture_id, cam_id))
    for cam_id in cameras
}
json_files = {
    cam_id: os.path.join(options.folder, 'p{:03d}g{:02d}c{:02d}_2d.json'.format(
        person_id, gesture_id, cam_id))
    for cam_id in cameras
}
json_locaizations_file = os.path.join(options.folder, 'p{:03d}g{:02d}_3d.json'.format(
    person_id, gesture_id))
                    type=int,
                    required=True,
                    help='ID to identity gesture')
args = parser.parse_args()

person_id = args.person
gesture_id = args.gesture
if str(gesture_id) not in gestures:
    log.critical("Invalid GESTURE_ID: {}. \nAvailable gestures: {}",
                 gesture_id, json.dumps(gestures, indent=2))

if person_id < 1 or person_id > 999:
    log.critical("Invalid PERSON_ID: {}. Must be between 1 and 999.",
                 person_id)

log.info("PERSON_ID: {} GESTURE_ID: {}", person_id, gesture_id)

cameras = [int(cam_config.id) for cam_config in options.cameras]
video_files = {
    cam_id: os.path.join(
        options.folder,
        'p{:03d}g{:02d}c{:02d}.mp4'.format(person_id, gesture_id, cam_id))
    for cam_id in cameras
}
json_files = {
    cam_id: os.path.join(
        options.folder,
        'p{:03d}g{:02d}c{:02d}_2d.json'.format(person_id, gesture_id, cam_id))
    for cam_id in cameras
}
json_locaizations_file = os.path.join(
Exemplo n.º 14
0
from is_wire.core import Logger
from gateway import RobotGateway
from driver import PepperRobotDriver
import json
import sys
import os

log = Logger("Service")

config_path = "conf.json" if len(sys.argv) != 2 else sys.argv[1]
with open(config_path) as f:
    config = json.load(f)

log.info("config_file @'{}':\n{}", config_path, json.dumps(config, indent=4))


def env_or_default(env, default):
    value = os.environ[env] if env in os.environ else default
    log.info("{}='{}'".format(env, value))
    return value


broker_uri = env_or_default("BROKER_URI", config["broker_uri"])
robot_uri = env_or_default("ROBOT_URI", str(config["robot_uri"]))
robot_id = env_or_default("ROBOT_ID", config["robot_id"])

driver = PepperRobotDriver(robot_uri=robot_uri,
                           parameters=config["driver_params"])

service = RobotGateway(driver=driver)
service.run(id=robot_id, broker_uri=broker_uri)
from google.protobuf.wrappers_pb2 import FloatValue
from is_msgs.common_pb2 import SamplingSettings
from is_msgs.image_pb2 import ImageSettings, Resolution, ColorSpace, ColorSpaces

uri = 'amqp://10.10.2.15:30000'
channel = Channel(uri)
subscription = Subscription(channel)
log = Logger(name='SetConfig')
camera_id = 0
""" 
    Loading CameraConfig protobuf from a json file
"""
with open('camera_config.json', 'r') as f:
    try:
        set_config = Parse(f.read(), CameraConfig())
        log.info('CameraConfig:\n{}', set_config)
    except Exception as ex:
        log.critical('Unable to camera settings. \n{}', ex)
"""
    Another way to create a CameraConfig message
"""
set_config = CameraConfig(
    sampling=SamplingSettings(frequency=FloatValue(value=5.0)),
    image=ImageSettings(
        resolution=Resolution(width=720, height=540),
        color_space=ColorSpace(value=ColorSpaces.Value('GRAY'))))

msg = Message()
msg.reply_to = subscription
msg.topic = 'CameraGateway.{}.SetConfig'.format(camera_id)
msg.pack(set_config)
Exemplo n.º 16
0
from is_wire.core import Channel, Subscription, Message, Logger
from is_msgs.common_pb2 import Position
import os
from sys import argv, exit

log = Logger(name="client")

uri = os.environ[
    "BROKER_URI"] if "BROKER_URI" in os.environ else "amqp://10.10.2.20:30000"
log.info("uri={}", uri)
if len(argv) != 3:
    log.error("Usage: python navigate.py <X> <Y>")
    exit(-1)

channel = Channel(uri)
subscription = Subscription(channel)

position = Position()
position.x = float(argv[1])
position.y = float(argv[2])

channel.publish(message=Message(content=position, reply_to=subscription),
                topic="RobotGateway.0.NavigateTo")

reply = channel.consume(timeout=1.0)
log.info("status={}", reply.status)
Exemplo n.º 17
0
from is_wire.core import Channel, Subscription, Message, Logger
from is_wire.core.wire.status import StatusCode
from is_msgs.common_pb2 import FieldSelector
from is_msgs.camera_pb2 import CameraConfigFields, CameraConfig
from google.protobuf.json_format import Parse

uri = 'amqp://10.10.2.15:30000'
channel = Channel(uri)
subscription = Subscription(channel)
log = Logger(name='GetConfig')
camera_id = 0

get_config = FieldSelector()
get_config.fields.append(CameraConfigFields.Value('SAMPLING_SETTINGS'))
get_config.fields.append(CameraConfigFields.Value('IMAGE_SETTINGS'))

msg = Message()
msg.topic = 'CameraGateway.{}.GetConfig'.format(camera_id)
msg.reply_to = subscription
msg.pack(get_config)
channel.publish(msg)

while True:
    msg = channel.consume()
    if msg.status.code == StatusCode.OK:
        config = msg.unpack(CameraConfig)
        log.info('Configuration received from camera \'{}\'\n{}', camera_id, config)
    else:
        log.warn('Can\'t apply configuration:\n{}', msg.status)
    sys.exit(0)
Exemplo n.º 18
0
class CameraGateway(object):
    def __init__(self, driver):
        self.driver = driver
        self.logger = Logger("CameraGateway")

    def get_config(self, field_selector, ctx):
        fields = field_selector.fields
        camera_config = CameraConfig()

        if CameraConfigFields.Value("ALL") in fields or \
           CameraConfigFields.Value("SAMPLING_SETTINGS") in fields:
            get_val(self.driver.get_sampling_rate,
                    camera_config.sampling.frequency, "value")
            get_val(self.driver.get_delay, camera_config.sampling.delay,
                    "value")

        if CameraConfigFields.Value("ALL") in fields or \
           CameraConfigFields.Value("IMAGE_SETTINGS") in fields:
            get_obj(self.driver.get_resolution, camera_config.image.resolution)
            get_obj(self.driver.get_image_format, camera_config.image.format)
            get_val(self.driver.get_color_space,
                    camera_config.image.color_space, "value")
            get_obj(self.driver.get_region_of_interest,
                    camera_config.image.region)

        if CameraConfigFields.Value("ALL") in fields or \
           CameraConfigFields.Value("CAMERA_SETTINGS") in fields:
            get_obj(self.driver.get_brightness,
                    camera_config.camera.brightness)
            get_obj(self.driver.get_exposure, camera_config.camera.exposure)
            get_obj(self.driver.get_focus, camera_config.camera.focus)
            get_obj(self.driver.get_gain, camera_config.camera.gain)
            get_obj(self.driver.get_gamma, camera_config.camera.gamma)
            get_obj(self.driver.get_hue, camera_config.camera.hue)
            get_obj(self.driver.get_iris, camera_config.camera.iris)
            get_obj(self.driver.get_saturation,
                    camera_config.camera.saturation)
            get_obj(self.driver.get_sharpness, camera_config.camera.sharpness)
            get_obj(self.driver.get_shutter, camera_config.camera.shutter)
            get_obj(self.driver.get_white_balance_bu,
                    camera_config.camera.white_balance_bu)
            get_obj(self.driver.get_white_balance_rv,
                    camera_config.camera.white_balance_rv)
            get_obj(self.driver.get_zoom, camera_config.camera.zoom)

        return camera_config

    def set_config(self, camera_config, ctx):
        if camera_config.HasField("sampling"):
            if camera_config.sampling.HasField("frequency"):
                self.driver.set_sampling_rate(
                    camera_config.sampling.frequency.value)
            if camera_config.sampling.HasField("delay"):
                self.driver.set_delay(camera_config.sampling.delay.value)

        if camera_config.HasField("image"):
            if camera_config.image.HasField("resolution"):
                self.driver.set_resolution(camera_config.image.resolution)
            if camera_config.image.HasField("format"):
                self.driver.set_image_format(camera_config.image.format)
            if camera_config.image.HasField("color_space"):
                self.driver.set_color_space(
                    camera_config.image.color_space.value)
            if camera_config.image.HasField("region"):
                self.driver.set_region_of_interest(camera_config.image.region)

        if camera_config.HasField("camera"):
            if camera_config.camera.HasField("brightness"):
                self.driver.set_brightness(camera_config.camera.brightness)
            if camera_config.camera.HasField("exposure"):
                self.driver.set_exposure(camera_config.camera.exposure)
            if camera_config.camera.HasField("focus"):
                self.driver.set_focus(camera_config.camera.focus)
            if camera_config.camera.HasField("gain"):
                self.driver.set_gain(camera_config.camera.gain)
            if camera_config.camera.HasField("gamma"):
                self.driver.set_gamma(camera_config.camera.gamma)
            if camera_config.camera.HasField("hue"):
                self.driver.set_hue(camera_config.camera.hue)
            if camera_config.camera.HasField("iris"):
                self.driver.set_iris(camera_config.camera.iris)
            if camera_config.camera.HasField("saturation"):
                self.driver.set_saturation(camera_config.camera.saturation)
            if camera_config.camera.HasField("sharpness"):
                self.driver.set_sharpness(camera_config.camera.sharpness)
            if camera_config.camera.HasField("shutter"):
                self.driver.set_shutter(camera_config.camera.shutter)
            if camera_config.camera.HasField("white_balance_bu"):
                self.driver.set_white_balance_bu(
                    camera_config.camera.white_balance_bu)
            if camera_config.camera.HasField("white_balance_rv"):
                self.driver.set_white_balance_rv(
                    camera_config.camera.white_balance_rv)
            if camera_config.camera.HasField("zoom"):
                self.driver.set_zoom(camera_config.camera.zoom)
        return Empty()

    def run(self, id, broker_uri):
        service_name = "CameraGateway.{}".format(id)

        channel = Channel(broker_uri)
        server = ServiceProvider(channel)

        logging = LogInterceptor()
        server.add_interceptor(logging)

        server.delegate(topic=service_name + ".GetConfig",
                        request_type=FieldSelector,
                        reply_type=CameraConfig,
                        function=self.get_config)

        server.delegate(topic=service_name + ".SetConfig",
                        request_type=CameraConfig,
                        reply_type=Empty,
                        function=self.set_config)

        self.driver.start_capture()
        self.logger.info("Listening for requests")

        while True:
            image = self.driver.grab_image()
            channel.publish(Message(content=image),
                            topic=service_name + ".Frame")

            pose = self.driver.get_pose()
            frameTransList = FrameTransformations()
            frameTransList.tfs.extend([pose])
            channel.publish(Message(content=frameTransList),
                            topic=service_name + ".FrameTransformations")

            try:
                message = channel.consume(timeout=0)
                if server.should_serve(message):
                    server.serve(message)
            except socket.timeout:
                pass
Exemplo n.º 19
0
import os
import json
from subprocess import Popen, PIPE, STDOUT
from is_wire.core import Logger

log = Logger(name="SplitSamples")

if not os.path.exists("samples"):
    log.critical("'samples' folder not found")
if not os.path.exists("samples/spots.json"):
    log.critical("'samples/spots.json' file not found")

with open("samples/spots.json", 'r') as f:
    spots = json.load(f)

ffmpeg_command = "ffmpeg -y -i gestures.MOV -ss {ss:.2f} -t {t:.2f} -an samples/{gesture:02d}.MOV"
for spot in spots:
    g_id = int(spot['gesture'])
    command = ffmpeg_command.format(ss=spot['ss'], t=spot['t'], gesture=g_id)
    process = Popen(command.split(), stdout=PIPE, stderr=STDOUT)
    log.info("{}", command)
    if process.wait() == 0:
        log.info("{} | {:.2f} + {:.2f}", g_id, spot['ss'], spot['t'])
    else:
        log.error("Failed to split video gesture: {}", g_id)
Exemplo n.º 20
0
def main():

    service_name = 'GestureRecognizer.Recognition'
    log = Logger(name=service_name)

    op = load_options()

    channel = Channel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    exporter = create_exporter(service_name=service_name, uri=op.zipkin_uri)

    subscription = Subscription(channel=channel, name=service_name)
    for group_id in list(op.group_ids):
        subscription.subscribe(
            'SkeletonsGrouper.{}.Localization'.format(group_id))

    model = GestureRecognizer("model_gesture1_72.00.pth")
    log.info('Initialize the model')

    unc = Gauge('uncertainty_total', "Uncertainty about predict")
    unc.set(0.0)
    start_http_server(8000)

    buffer = list()
    predict_flag = False

    mean = lambda x: (sum(x) / len(x))

    while True:

        msg = channel.consume()

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_info')

        annotations = msg.unpack(ObjectAnnotations)
        skeleton = select_skeletons(annotations=annotations,
                                    min_keypoints=op.skeletons.min_keypoints,
                                    x_range=op.skeletons.x_range,
                                    y_range=op.skeletons.y_range)

        if skeleton is None:
            tracer.end_span()
            continue

        skl = Skeleton(skeleton)
        skl_normalized = skl.normalize()
        pred, prob, uncertainty = model.predict(skl_normalized)

        if pred == 0 and predict_flag is False:
            pass

        elif pred != 0 and predict_flag is False:
            initial_time = time.time()
            predict_flag = True
            buffer.append(uncertainty)

        elif pred != 0 and predict_flag is True:
            buffer.append(uncertainty)

        elif pred == 0 and predict_flag is True:
            predict_flag = False
            exec_time = time.time() - initial_time
            if exec_time >= op.exec_time:
                unc.set(mean(buffer))
                log.info("execution_ms: {}, buffer_mean: {}",
                         (exec_time * 1000), mean(buffer))
            buffer = []

        tracer.end_span()

        info = {
            'prediction': pred,
            'probability': prob,
            'uncertainty': uncertainty,
            'took_ms': {
                'service': round(span_duration_ms(span), 2)
            }
        }
        log.info('{}', str(info).replace("'", '"'))
options = load_options('../global_options.json')

log = Logger()
with open('../{}_datasets.json'.format(options['dataset_group']), 'r') as f:
    datasets_data = json.load(f)
datasets = datasets_data['datasets']
model = datasets_data['model']
cameras = datasets_data['cameras']
width, height = datasets_data['resolution']['width'], datasets_data[
    'resolution']['height']

for dataset in datasets:
    dataset_folder = os.path.join(options['data_folder'], dataset)
    for camera in cameras:
        log.info("[Starting][{}] camera \'{}\'", dataset, camera)
        filename = os.path.join(
            dataset_folder,
            '{}_pose_2d_detected_00_{:02d}'.format(model, camera))
        reader = ProtobufReader(filename)
        filename = os.path.join(dataset_folder,
                                '{}_2d_detector_{}'.format(model, camera))
        writer = ProtobufWriter(filename)

        sid = 0
        while True:
            sks = reader.next(Skeletons())
            if not sks:
                log.info("[Done][{}] camera \'{}\' with {} sequences", dataset,
                         camera, sid + 1)
                break
Exemplo n.º 22
0
                    type=int,
                    required=True,
                    help='ID to identity gesture')
args = parser.parse_args()

person_id = args.person
gesture_id = args.gesture
if str(gesture_id) not in gestures:
    log.critical("Invalid GESTURE_ID: {}. \nAvailable gestures: {}",
                 gesture_id, json.dumps(gestures, indent=2))

if person_id < 1 or person_id > 999:
    log.critical("Invalid PERSON_ID: {}. Must be between 1 and 999.",
                 person_id)

log.info("PERSON_ID: {} GESTURE_ID: {}", person_id, gesture_id)

cameras = [int(cam_config.id) for cam_config in options.cameras]
video_files = {
    cam_id: os.path.join(
        options.folder,
        'p{:03d}g{:02d}c{:02d}.mp4'.format(person_id, gesture_id, cam_id))
    for cam_id in cameras
}
json_files = {
    cam_id: os.path.join(
        options.folder,
        'p{:03d}g{:02d}c{:02d}_2d.json'.format(person_id, gesture_id, cam_id))
    for cam_id in cameras
}
json_locaizations_file = os.path.join(
Exemplo n.º 23
0
n_annotations = {}
for video_file in video_files:
    base_name = video_file.split('.')[0]
    annotation_file = '{}_2d.json'.format(base_name)
    annotation_path = os.path.join(options.folder, annotation_file)
    video_path = os.path.join(options.folder, video_file)
    cap = cv2.VideoCapture(video_path)
    n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if os.path.exists(annotation_path):
        # check if all annotations were done
        with open(annotation_path, 'r') as f:
            annotations_data = json.load(f)
        n_annotations_on_file = len(annotations_data['annotations'])
        if n_annotations_on_file == n_frames:
            log.info(
                "Video '{}' already annotated at '{}' with {} annotations",
                video_file, annotations_data['created_at'],
                n_annotations_on_file)
            continue

    pending_videos.append(video_file)
    n_annotations[base_name] = n_frames
if len(pending_videos) == 0:
    log.info("Exiting...")
    sys.exit(-1)

channel = Channel(options.broker_uri)
subscription = Subscription(channel)

requests = {}
annotations_received = defaultdict(dict)
state = State.MAKE_REQUESTS
Exemplo n.º 24
0
                    help='ID to identity gesture')
args = parser.parse_args()

person_id = args.person
gesture_id = args.gesture
if str(gesture_id) not in gestures:
    log.critical("Invalid GESTURE_ID: {}. \nAvailable gestures: {}",
                 gesture_id, json.dumps(gestures, indent=2))
    sys.exit(-1)

if person_id < 1 or person_id > 999:
    log.critical("Invalid PERSON_ID: {}. Must be between 1 and 999.",
                 person_id)
    sys.exit(-1)

log.info("PERSON_ID: {} GESTURE_ID: {}", person_id, gesture_id)

options = load_options(print_options=False)

if not os.path.exists(options.folder):
    os.makedirs(options.folder)

sequence = 'p{:03d}g{:02d}'.format(person_id, gesture_id)
sequence_folder = os.path.join(options.folder, sequence)
if os.path.exists(sequence_folder):
    log.warn(
        'Path to PERSON_ID={} GESTURE_ID={} already exists.\nWould you like to proceed? All data will be deleted! [y/n]',
        person_id, gesture_id)
    key = input()
    if key == 'y':
        shutil.rmtree(sequence_folder)
Exemplo n.º 25
0
    return op


# Carrega parametros do robo e endereco do broker do arquivo Json
op_config = load_options()

ATSPlog = Logger(name='unpack')

service_name = "RobotGateway.{}".format(op_config.robot_parameters.id)
sonar_topic = "RobotGateway.{}.SonarScan".format(op_config.robot_parameters.id)
pose_topic = "RobotGateway.{}.Pose".format(op_config.robot_parameters.id)

# instania channel passando endereco do broker
# estabelece conexao com o broker
channel = Channel(op_config.broker_uri)
ATSPlog.info("event=ChannelInitDone")

# Inscreve nos topicos que deseja receber mensagem
subscription = Subscription(channel)
subscription.subscribe(pose_topic)
subscription.subscribe(sonar_topic)
ATSPlog.info("SubscriptionsDone")

# Envia mensagem de getConfig
get_req = Message(reply_to=subscription)
# Salva correlation_id do request
cor_id = get_req.correlation_id
# print("cor_id: ", cor_id)
# Broadcast message to anyone interested (subscribed)
channel.publish(message=get_req, topic=service_name + ".GetConfig")
Exemplo n.º 26
0
class RobotGateway(object):
    def __init__(self, driver):
        self.driver = driver
        self.logger = Logger("RobotGateway")

    def get_config(self, field_selector, ctx):
        robot_config = RobotConfig()
        get_obj(self.driver.get_speed, robot_config.speed)
        return robot_config

    def set_config(self, robot_config, ctx):
        if robot_config.HasField("speed"):
            self.driver.set_speed(robot_config.speed)
        return Empty()

    def navigate_to(self, position, ctx):
        self.driver.navigate_to(position.x, position.y)
        return Empty()

    def move_to(self, pose, ctx):
        self.driver.move_to(pose.position.x, pose.position.y,
                            pose.orientation.yaw)
        return Empty()

    def pause_awareness(self, req, ctx):
        self.driver.pause_awareness()
        return Empty()

    def resume_awareness(self, req, ctx):
        self.driver.resume_awareness()
        return Empty()

    def set_awareness(self, req, ctx):
        self.driver.set_awareness(req["enabled"])
        return Empty()

    #def set_awareness_off(self, req, ctx):
    #    self.driver.set_awareness_off()
    #    return Empty()

    def run(self, id, broker_uri):
        service_name = "RobotGateway.{}".format(id)

        channel = Channel(broker_uri)
        server = ServiceProvider(channel)
        logging = LogInterceptor()
        server.add_interceptor(logging)

        server.delegate(topic=service_name + ".GetConfig",
                        request_type=FieldSelector,
                        reply_type=RobotConfig,
                        function=self.get_config)

        server.delegate(topic=service_name + ".SetConfig",
                        request_type=RobotConfig,
                        reply_type=Empty,
                        function=self.set_config)

        server.delegate(topic=service_name + ".NavigateTo",
                        request_type=Position,
                        reply_type=Empty,
                        function=self.navigate_to)

        server.delegate(topic=service_name + ".MoveTo",
                        request_type=Pose,
                        reply_type=Empty,
                        function=self.move_to)

        server.delegate(topic=service_name + ".PauseAwareness",
                        request_type=Empty,
                        reply_type=Empty,
                        function=self.pause_awareness)

        server.delegate(topic=service_name + ".ResumeAwareness",
                        request_type=Empty,
                        reply_type=Empty,
                        function=self.resume_awareness)

        server.delegate(topic=service_name + ".SetAwareness",
                        request_type=Struct,
                        reply_type=Empty,
                        function=self.set_awareness)

        #server.delegate(
        #    topic=service_name + ".SetAwarenessOff",
        #    request_type=Empty,
        #    reply_type=Empty,
        #    function=self.set_awareness_off)

        self.logger.info("Listening for requests")
        while True:
            pose = self.driver.get_base_pose()
            frameTransList = FrameTransformations()
            frameTransList.tfs.extend([pose])
            self.logger.debug("Publishing pose")

            channel.publish(Message(content=frameTransList),
                            topic=service_name + ".FrameTransformations")

            try:
                message = channel.consume(timeout=0)
                if server.should_serve(message):
                    server.serve(message)
            except socket.timeout:
                pass
Exemplo n.º 27
0
    '--id', '-i', type=int, required=False, help='Camera ID', default=0)
parser.add_argument(
    '--device', '-d', type=int, required=False, help='Camera device. 0 for /dev/video0', default=0)
parser.add_argument(
    '--fps', '-f', type=int, required=False, help='FPS', default=10)
parser.add_argument(
    '--uri', '-u', type=str, required=False, help='broker_uri', default='amqp://localhost:5672')
args = parser.parse_args()

device = args.device
fps = args.fps
broker_uri = args.uri

cap = VideoCapture(device)
if cap.isOpened():
    log.info('Connected to device: /dev/video{}'.format(device))
else:
    log.error('Coudn\'t find device: /dev/video{}'.format(device))
    exit()

log.info('Camera FPS set to {}'.format(fps))
cap.set(CAP_PROP_FPS, fps)

channel = StreamChannel(broker_uri)
log.info('Connected to broker {}', broker_uri)

log.info('Starting to capture')

while True:
    ret, frame = cap.read()
log = Logger(name='Client')

channel = Channel('amqp://localhost:5672')
request_manager = RequestManager(channel,
                                 min_requests=15,
                                 max_requests=30,
                                 log_level=Logger.INFO)

poses = [(Pose(position=Position(x=i, y=2 * i)), i) for i in range(100)]

while True:

    while request_manager.can_request() and len(poses) > 0:
        pose, pose_id = poses.pop()
        request_manager.request(content=pose,
                                topic="GetPosition",
                                timeout_ms=1000,
                                metadata=pose_id)
        log.info('{:>6s} msg_id={}', " >>", pose_id)

    received_msgs = request_manager.consume_ready(timeout=1.0)

    for msg, metadata in received_msgs:
        position = msg.unpack(Position)
        log.info('{:<6s} msg_id={}', "<< ", metadata)

    if request_manager.all_received() and len(poses) == 0:
        log.info("All received. Exiting.")
        break
Exemplo n.º 29
0
def main():

    service_name = "CameraGateway"
    log = Logger(service_name)
    options = load_options()
    camera = CameraGateway(fps=options["fps"])

    publish_channel = Channel(options['broker_uri'])
    rpc_channel = Channel(options['broker_uri'])
    server = ServiceProvider(rpc_channel)
    logging = LogInterceptor()
    server.add_interceptor(logging)

    server.delegate(topic=service_name + ".*.GetConfig",
                    request_type=FieldSelector,
                    reply_type=CameraConfig,
                    function=camera.get_config)

    server.delegate(topic=service_name + ".*.SetConfig",
                    request_type=CameraConfig,
                    reply_type=Empty,
                    function=camera.set_config)

    exporter = create_exporter(service_name=service_name,
                               uri=options["zipkin_uri"])

    while True:

        # iterate through videos listed
        for video in options['videos']:

            # id of the first sequence of videos
            person_id = video['person_id']
            gesture_id = video['gesture_id']

            # getting the path of the 4 videos
            video_files = {
                cam_id: os.path.join(
                    options['folder'],
                    'p{:03d}g{:02d}c{:02d}.mp4'.format(person_id, gesture_id,
                                                       cam_id))
                for cam_id in options["cameras_id"]
            }

            for iteration in range(video['iterations']):

                info = {
                    "person": person_id,
                    "gesture": gesture_id,
                    "iteration": iteration
                }
                log.info('{}', str(info).replace("'", '"'))

                # object that let get images from multiples videos files
                video_loader = FramesLoader(video_files)

                # iterate through all samples on video
                while True:

                    time_initial = time.time()

                    # listen server for messages about change
                    try:
                        message = rpc_channel.consume(timeout=0)
                        if server.should_serve(message):
                            server.serve(message)
                    except socket.timeout:
                        pass

                    frame_id, frames = video_loader.read()

                    for cam in sorted(frames.keys()):
                        tracer = Tracer(exporter)
                        span = tracer.start_span(name='frame')
                        pb_image = to_pb_image(frames[cam])
                        msg = Message(content=pb_image)
                        msg.inject_tracing(span)
                        topic = 'CameraGateway.{}.Frame'.format(cam)
                        publish_channel.publish(msg, topic=topic)
                        tracer.end_span()

                    took_ms = (time.time() - time_initial) * 1000

                    dt = (1 / camera.fps) - (took_ms / 1000)
                    if dt > 0:
                        time.sleep(dt)
                        info = {
                            "sample": frame_id,
                            "took_ms": took_ms,
                            "wait_ms": dt * 1000
                        }
                        log.info('{}', str(info).replace("'", '"'))

                    if frame_id >= (video_loader.num_samples - 1):
                        video_loader.release()
                        del video_loader
                        gc.collect()
                        break

        if options['loop'] is False:
            break
Exemplo n.º 30
0
    current_timestamps[topic_id] = DT.utcfromtimestamp(
        msg.created_at).isoformat()

    if device == "webcam":
        # save images
        if start_save and not sequence_saved:
            filename = os.path.join(
                sequence_folder,
                "{:02d}_{:03d}_{:05d}.jpeg".format(int(label_id),
                                                   int(subject_id), n_sample))
            with open(filename, "wb") as f:
                f.write(images_data[topic_id])

            timestamps[topic_id].append(current_timestamps[topic_id])
            n_sample += 1
            log.info("Sample {} saves", n_sample)

        # display images
        if n_sample % display_rate == 0:
            display_image = cv2.imdecode(data, cv2.IMREAD_COLOR)
            draw_info_bar(display_image,
                          info_bar_text,
                          50,
                          50,
                          draw_circle=start_save and not sequence_saved)
            cv2.imshow("", display_image)

            current_timestamps = {}

        key = cv2.waitKey(1)
        if key == ord("s"):