Esempio n. 1
0
    def __init__(self):
        body_tracking_validator = SubscriberReady(topicname('player_name'),
                                                  String)
        zone_detection_validator = SubscriberReady(topicname('scene_found'),
                                                   Bool)
        seg_validator = SubscriberReady(SEGMENTATION_IMAGE_TOPIC,
                                        CompressedImage)
        features_validator = SubscriberReady(FEATURES_EXTRACTION_IMAGE_TOPIC,
                                             CompressedImage)
        restart_validator = SubscriberReady(topicname('new_game'), Bool)

        self.smart_publishers = [
            SmartImagePublisher(BODY_TRACKING_IMAGE_TOPIC,
                                body_tracking_validator,
                                throttle_rate=1.5),
            SmartImagePublisher(ZONE_DETECTION_IMAGE_TOPIC,
                                zone_detection_validator,
                                throttle_rate=0.5),
            SmartImagePublisher(SEGMENTATION_IMAGE_TOPIC, seg_validator),
            SmartImagePublisher(FEATURES_EXTRACTION_IMAGE_TOPIC,
                                features_validator),
            SmartImagePublisher(BODY_TRACKING_IMAGE_TOPIC,
                                restart_validator,
                                throttle_rate=1.5)
        ]

        self.pub_index = 0
Esempio n. 2
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Simple joystick controller to answer snips call when demoing in a noisy environment
"""

import rospy
from devine_config import topicname
from devine_dialog.msg import TtsQuery, TtsAnswer
from sensor_msgs.msg import Joy

JOYSTICK_TOPIC = topicname('joystick')
TTS_QUERY_TOPIC = topicname('tts_query')
TTS_ANSWER_TOPIC = topicname('tts_answer')

# See http://wiki.ros.org/joy
LOGITECH_JOYSTICK_BTN_MAP = {
    'X': 0,
    'A': 1,
    'B': 2,
    'Y': 3,
    'LB': 4,
    'RB': 5,
    'LT': 6,
    'RT': 7,
    'back': 8,
    'start': 9,
    'LStick': 10,
    'RStick': 11,
}
Esempio n. 3
0
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import random
import json
import uuid
from enum import Enum
import rospy
from devine_config import topicname
from devine_dialog.msg import TtsQuery, TtsAnswer
from devine_common import ros_utils

TTS_ANSWER_TOPIC = topicname('tts_answer')
DIALOGS = None


class TTSAnswerType(Enum):
    NO_ANSWER = 0
    YES_NO = 1
    PLAYER_NAME = 2


def load_dialogs():
    """ Load dialogs from json file """
    global DIALOGS
    if not DIALOGS:
        with open(ros_utils.get_fullpath(__file__, 'static/dialogs.json')) as f:
            DIALOGS = json.loads(f.read())
Esempio n. 4
0
                 '../../Mask_RCNN'))

import coco
import model as modellib
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from devine_config import topicname
from ros_image_processor import ImageProcessor, ROSImageProcessingWrapper
from devine_image_processing.msg import SegmentedImage, SceneObject

# Paths
COCO_MODEL_PATH = ros_utils.get_fullpath(__file__, '../../mask_rcnn_coco.h5')
MODEL_DIR = ros_utils.get_fullpath(__file__, 'logs')

# Topics
IMAGE_TOPIC = topicname('segmentation_image')
SEGMENTATION_TOPIC = topicname('objects')

# This threshold removes objects with a lower confidence level
SEGMENTATION_THRESHOLD = 0.7


class RCNNSegmentation(ImageProcessor):
    """ RCNN segmentation wrapper of Mask_RCNN for use in guesswhat """
    class_names = [
        'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
        'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
        'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
        'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
Esempio n. 5
0
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import rospy
import message_filters
import numpy as np
from sensor_msgs.msg import Image as ROSImage
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from devine_config import topicname
from devine_common.image_utils import image_to_ros_msg
import cv2

# IN
IMAGE_IN_TOPIC = topicname('image')
DEPTH_IN_TOPIC = topicname('depth')
# OUT
MASKED_IMAGE_TOPIC = topicname('masked_image')

# Consts
DEPTH_THRESHOLD = 1.5  # Depth in meters


class DepthMask(object):
    """ Node that applies a depth mask to RGB image and resends the masked image """
    def __init__(self):
        self.bridge = CvBridge()
        image_sub = message_filters.Subscriber(IMAGE_IN_TOPIC, ROSImage)
        depth_sub = message_filters.Subscriber(DEPTH_IN_TOPIC, ROSImage)
        self.masked_image = None
Esempio n. 6
0
import rospy
import cv2
import tensorflow as tf
from std_msgs.msg import String
from bson import json_util
from tf_pose.estimator import TfPoseEstimator
from keras.backend.tensorflow_backend import set_session
from devine_config import topicname
from devine_common import ros_utils
from ros_image_processor import ImageProcessor, ROSImageProcessingWrapper

# Originally in './models/graph/mobilenet_thin/graph_opt.pb'
MODEL_DIR = ros_utils.get_fullpath(__file__, '../../mobilenet_thin.pb')

# Topics
IMAGE_TOPIC = topicname('body_tracking_image')
PUBLISH_TOPIC = topicname('body_tracking')

class BodyTracking(ImageProcessor):
    """ Body Tracking wrapper of tf_pose for use in guesswhat """

    body_parts_names = [
        'Nose', 'Neck', 'RShoulder', 'RElbow', 'RWrist', 'LShoulder', 'LElbow', 'LWrist', 'RHip', 'RKnee',
        'RAnkle', 'LHip', 'LKnee', 'LAnkle', 'REye', 'LEye', 'REar', 'LEar', 'Background'
    ]

    def __init__(self):
        config = tf.ConfigProto(log_device_placement=False)
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))
        self.estimator = TfPoseEstimator(MODEL_DIR, target_size=(432, 368))  # downscale image
Esempio n. 7
0
""" Head mover helper library """
__author__ = "Jordan Prince Tremblay, Ismael Balafrej, Felix Labelle, Felix Martel-Denis, Eric Matte, Adam Letourneau, Julien Chouinard-Beaupre, Antoine Mercier-Nicol"
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import rospy
import actionlib
from devine_config import topicname
from std_msgs.msg import Bool
from devine_head_coordinator.msg import LookAtHumanAction, LookAtHumanGoal

SCENE_DETECTION_TOPIC = topicname('start_scene_detection')
TOPIC_SCENE_FOUND = topicname('scene_found')


class Look(object):
    """ Easy to use intent to movement translator """
    def __init__(self):
        self._human_finder_client = actionlib.SimpleActionClient(
            'human_finder', LookAtHumanAction)
        self._human_finder_client.wait_for_server()
        self.humans_count = 0
        self._start_scene_publisher = rospy.Publisher(SCENE_DETECTION_TOPIC,
                                                      Bool,
                                                      queue_size=1)

    def at_human(self):
Esempio n. 8
0
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

from threading import RLock
from enum import IntEnum
from time import time
import rospy
from sensor_msgs.msg import CompressedImage
from std_msgs.msg import String, Bool
from devine_config import topicname
from blur_detection import is_image_blurry
from ros_image_processor import ImageProcessor, ROSImageProcessingWrapper

# IN
IMAGE_TOPIC = topicname('masked_image')

# OUT
BODY_TRACKING_IMAGE_TOPIC = topicname('body_tracking_image')
ZONE_DETECTION_IMAGE_TOPIC = topicname('zone_detection_image_in')
SEGMENTATION_IMAGE_TOPIC = topicname('segmentation_image')
FEATURES_EXTRACTION_IMAGE_TOPIC = topicname('features_extraction_image')


class Throttle(object):
    """ ROS publisher throttler """
    def __init__(self, publisher, delay):
        self.publisher = publisher
        self.last_time = 0
        self.delay = delay
Esempio n. 9
0
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import math
import rospy
import tf

from std_msgs.msg import Float64MultiArray
from geometry_msgs.msg import PoseStamped
from devine_irl_control import irl_constant
from devine_config import topicname

# IN
TOPIC_OBJECT_LOCATION = topicname('guess_location_world')

# OUT
TOPIC_POINT_ERR = topicname('robot_err_pointing')


class ErrorPoint(object):
    """ Compute error when pointing with arms """
    def __init__(self):
        self.pose_stamp = None
        self.tf_listener = tf.TransformListener()
        rospy.Subscriber(TOPIC_OBJECT_LOCATION, PoseStamped,
                         self.pose_stamp_callback)
        self.pub_err_top = rospy.Publisher(TOPIC_POINT_ERR,
                                           Float64MultiArray,
                                           queue_size=1)
Esempio n. 10
0
__author__ = "Jordan Prince Tremblay, Ismael Balafrej, Felix Labelle, Felix Martel-Denis, Eric Matte, Adam Letourneau, Julien Chouinard-Beaupre, Antoine Mercier-Nicol"
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import rospy
from std_msgs.msg import Float64MultiArray
from devine_dialog.msg import TtsQuery
from devine_dialog import TTSAnswerType, send_speech
from guesswhat.models.guesser.guesser_wrapper import GuesserWrapper
from devine_config import topicname

TTS_ANSWER_TOPIC = topicname('tts_answer')
TTS_QUERY_TOPIC = topicname('tts_query')
CONFIDENCE_TOPIC = topicname('objects_confidence')


class GuesserROSWrapper(GuesserWrapper):
    """ Wraps the guesser model and publishes confidence levels """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.confidence = rospy.Publisher(CONFIDENCE_TOPIC, Float64MultiArray, queue_size=1, latch=True)

    def find_object(self, *args, **kwargs):
        """ find_object interface for the looper """
        found, softmax, selected_object = super().find_object(*args, **kwargs)
        found = [False]  # found is based on the oracle's choice, which we dont know
Esempio n. 11
0
from trajectory_msgs.msg import JointTrajectoryPoint

from devine_config import topicname
from devine_irl_control import irl_constant
from devine_irl_control.movement import Movement
from devine_irl_control.controllers import TrajectoryClient
from devine_irl_control.gripper import Gripper
from devine_irl_control import ik
from devine_irl_control.admittance import Admittance

ROBOT_NAME = irl_constant.ROBOT_NAME
LOW_ADMITTANCE = 3
HIGH_ADMITTANCE = 15

# IN
TOPIC_OBJECT_LOCATION = topicname('guess_location_world')
TOPIC_HEAD_LOOK_AT = topicname('robot_look_at')
TOPIC_HEAD_JOINT_STATE = topicname('robot_head_joint_traj_point')
GUESS_SUCCESS = topicname('object_guess_success')

# OUT
TOPIC_IS_POINTING = topicname('is_pointing_object')
TOPIC_IS_LOOKING = topicname('is_looking')

class Controller(object):
    """ Arms, head and gripper controller """

    def __init__(self, is_head_activated=True, is_arms_activated=True, is_gripper_activated=True):
        self.arm_data = None
        self.head_data = None
        self.arm_joints_position = [0,0,0,0]
Esempio n. 12
0
# -*- coding: utf-8 -*-
""" Example to find scene """
__author__ = "Jordan Prince Tremblay, Ismael Balafrej, Felix Labelle, Felix Martel-Denis, Eric Matte, Adam Letourneau, Julien Chouinard-Beaupre, Antoine Mercier-Nicol"
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import rospy
from std_msgs.msg import String
from trajectory_msgs.msg import JointTrajectoryPoint
from devine_config import topicname

ZONE_DETECTION_TOPIC = topicname('zone_detection')
TOPIC_HEAD_JOINT_STATE = topicname('robot_head_joint_traj_point')


def main():
    """ Example to look for a scene """
    # Start ROS node
    node_name = 'devine_common_find_scene_example'
    rospy.init_node(node_name)

    pub_zone_detection = rospy.Publisher(ZONE_DETECTION_TOPIC,
                                         String,
                                         queue_size=10)
    pub_neck_ctrl = rospy.Publisher(TOPIC_HEAD_JOINT_STATE,
                                    JointTrajectoryPoint,
                                    queue_size=1)
Esempio n. 13
0
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import argparse
import rospy

from trajectory_msgs.msg import JointTrajectoryPoint
from geometry_msgs.msg import PoseStamped
from devine_config import topicname
import devine_common.ros_utils as ros_utils


NODE_NAME = 'devine_irl_control_example_point'
TOPIC_OBJECT_LOCATION = topicname('guess_location_world')
TOPIC_HEAD_LOOK_AT = topicname('robot_look_at')
TOPIC_HEAD_JOINT_STATE = topicname('robot_head_joint_traj_point')
TIME = 2


def main(args):
    """ Publish on the 3D position from /base_link to point """
    # Parse arguments
    if args.point:
        point = [float(i) for i in args.point.split(',')]

    if args.look:
        look = [float(i) for i in args.look.split(',')]
    elif args.head_joint_position:
        head_joint_pos = [float(i) for i in args.head_joint_position.split(',')]
Esempio n. 14
0
def get_image_dim(topic_name=topicname('camera_info')):
    """ Return the image width and height for an image topic (blocking) """
    cam_info = rospy.wait_for_message(topic_name, CameraInfo, None)
    return (cam_info.width, cam_info.height)
Esempio n. 15
0
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
""" Node to visualize object in RViz and broadcast object_frame """
__author__ = "Jordan Prince Tremblay, Ismael Balafrej, Felix Labelle, Felix Martel-Denis, Eric Matte, Adam Letourneau, Julien Chouinard-Beaupre, Antoine Mercier-Nicol"
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import rospy
from devine_irl_control.objects import ObjectMaker
from devine_config import topicname

TOPIC_OBJECT_LOCATION = topicname('guess_location_world')


def main():
    """ Init ROS Node to create marker and broadcast TF at postion /object_location """
    node_name = 'devine_irl_control_object'
    rospy.init_node(node_name)
    rospy.loginfo('Running node \'%s\'', node_name)

    ObjectMaker(TOPIC_OBJECT_LOCATION)
    rospy.spin()


if __name__ == '__main__':
    main()
Esempio n. 16
0
import cv2
import tensorflow as tf
import numpy as np
import rospy
from PIL import Image
from rospy.numpy_msg import numpy_msg
from tensorflow.contrib.slim.python.slim.nets import vgg
from devine_image_processing.msg import VGG16Features
from devine_config import topicname
from devine_common import ros_utils
from ros_image_processor import ImageProcessor, ROSImageProcessingWrapper

VGG16_NTW_PATH = ros_utils.get_fullpath(__file__, '../../vgg_16.ckpt')

FEATURES_TOPIC = topicname('image_features')
IMAGE_TOPIC = topicname('features_extraction_image')

IMAGE_SIZE = 224
CHANNEL_MEAN = np.array([123.68, 116.779, 103.939])


class ROSVgg16(ImageProcessor):
    """ VGG-16 wrapper for ROS """
    def __init__(self):
        self.holder = tf.placeholder(
            tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE,
                         len(CHANNEL_MEAN)],
            name='image')
        _, self.end_points = vgg.vgg_16(self.holder,
                                        is_training=False,
Esempio n. 17
0
__status__ = "Production"

import rospy
import tf
import message_filters
from sensor_msgs.msg import PointCloud2, CompressedImage
from sensor_msgs import point_cloud2
from geometry_msgs.msg import PoseStamped, PointStamped

from devine_config import topicname, constant
from devine_common import math_utils, ros_utils

CAM_FRAME_OPTICAL = constant('cam_frame_optical')

# Topics
IMAGE_DEPTH_TOPIC = topicname('image_depth')
OBJECT_IMAGE_LOCATION_TOPIC = topicname('guess_location_image')
OBJECT_WORLD_LOCATION_TOPIC = topicname('guess_location_world')
SEG_IMAGE_TOPIC = topicname('segmentation_image')

ROS_PUBLISHER = rospy.Publisher(OBJECT_WORLD_LOCATION_TOPIC,
                                PoseStamped,
                                queue_size=1)


def plot_openni_3d_data_matrix(data):
    """ Plot the matrix in 3D. Return the list of points. """
    return math_utils.plot_3d_matrix(deserialize(data))


def deserialize(pcloud):
Esempio n. 18
0
from devine_dialog import TTSAnswerType
from threading import RLock

# Snips settings
SNIPS_HOST = 'localhost'
SNIPS_PORT = 1883
SNIPS_INTENT = {
    'yes': 'Devine-UdeS:Yes',
    'no': 'Devine-UdeS:No',
    'na': 'Devine-UdeS:NA',
    'name': 'Devine-UdeS:GetName'
}
MQTT_CLIENT = mqtt.Client()

# Topics
TTS_QUERY = topicname('tts_query')
TTS_ANSWER = topicname('tts_answer')


class SnipsRosWrapper(mqtt.Client):
    def __init__(self):
        self._publisher = rospy.Publisher(TTS_ANSWER, TtsAnswer, queue_size=10)
        rospy.Subscriber(TTS_QUERY, TtsQuery, self.on_query)
        rospy.Subscriber(TTS_ANSWER, TtsAnswer, self.on_answer)
        super(SnipsRosWrapper, self).__init__()
        self.connect(SNIPS_HOST, SNIPS_PORT)
        self.queries = {}
        self.query_mutex = RLock()

    def on_connect(self, _client, _userdata, _flags, _rc):
        """ Callback executed when snips is connected """
Esempio n. 19
0
__email__ = "*****@*****.**"
__status__ = "Production"

import json
import time
import random
from std_msgs.msg import String, Bool
import rospy
from devine_dialog.msg import TtsQuery
from devine_dialog import TTSAnswerType, send_speech, load_dialogs
from devine_config import topicname
from devine_common import ros_utils
from devine_head_coordinator.look import Look

# IN topics
HUMAN_READY_DETECTED_TOPIC = topicname('body_tracking')
IS_POINTING_OBJECT_TOPIC = topicname('is_pointing_object')
OBJECT_CATEGORY_TOPIC = topicname('guess_category')
EXPRESSION_DONE_TOPIC = topicname('robot_facial_expression_completed')

# OUT topics
TTS_PUBLISHER = rospy.Publisher(topicname('tts_query'),
                                TtsQuery,
                                queue_size=10)
READY_TO_PLAY_PUBLISHER = rospy.Publisher(topicname('player_name'),
                                          String,
                                          queue_size=1)
GUESS_SUCCEEDED = rospy.Publisher(topicname('object_guess_success'),
                                  Bool,
                                  queue_size=1)
START_NEW_GAME = rospy.Publisher(topicname('new_game'), Bool, queue_size=1)
Esempio n. 20
0
__status__ = "Production"

import json
import rospy
import tf
import actionlib
from std_msgs.msg import String
from geometry_msgs.msg import PointStamped, Point
from devine_head_coordinator.msg import LookAtHumanAction, LookAtHumanResult, LookAtHumanFeedback
from devine_config import topicname, constant
from devine_common.ros_utils import pose_stamped, get_image_dim
from devine_common.math_utils import upper_left_to_zero_center, pixel_to_meters
import head_action_control

# IN
BODY_TRACKING = topicname('body_tracking')
CAM_FRAME_OPTICAL = constant('cam_frame_optical')

# OUT
# /devine/human_finder actionlib

# Consts
HARDCODED_HUMAN_DIST = 1.5

class LookAtHumanActionServer(object):
    """ Action server to look at humans for a fixed period of time """

    def __init__(self, name):
        self._action_name = name
        self._as = actionlib.SimpleActionServer(
            self._action_name, LookAtHumanAction, execute_cb=self.execute_cb, auto_start=False)
Esempio n. 21
0
# -*- coding: utf-8 -*-
""" Head Motor Controls Helper Library """
import math
import rospy
import tf
from geometry_msgs.msg import Point
from devine_irl_control.irl_constant import ROBOT_CONTROLLER, ROBOT_NAME, ROBOT_LINK
from devine_irl_control.controllers import TrajectoryClient
from devine_irl_control import ik
from devine_common.math_utils import clip
from devine_config import topicname

EYES_TOPIC = topicname("eyes")
# Minimum angle at which we move the head instead of the eyes
MIN_ANGLE_FOR_HEAD = math.radians(10)


class HeadActionCtrl(object):
    """ Controller for the head motors (neck_pan, neck_tilt, eyes) """
    def __init__(self,
                 neck_pan_bounds,
                 neck_tilt_bounds,
                 neck_pan_delta,
                 neck_tilt_delta,
                 starting_position=[None, None]):
        self.joint_ctrl = TrajectoryClient(ROBOT_NAME, 'neck_controller')
        self.limits = ROBOT_CONTROLLER[
            self.joint_ctrl.controller_name]['joints_limit']

        if neck_pan_bounds[0] > neck_pan_bounds[1]:
            raise Exception("Expected neck pan bounds to be [min, max]")
Esempio n. 22
0
from guesswhat.data_provider.questioner_batchifier import QuestionerBatchifier

from modelwrappers import GuesserROSWrapper, OracleROSWrapper
from devine_config import topicname
from devine_common import ros_utils
from devine_image_processing.msg import SegmentedImage, VGG16Features

EVAL_CONF_PATH = ros_utils.get_fullpath(__file__, '../../config/eval.json')
GUESS_CONF_PATH = ros_utils.get_fullpath(__file__, '../../config/guesser.json')
QGEN_CONF_PATH = ros_utils.get_fullpath(__file__, '../../config/qgen.json')
GUESS_NTW_PATH = ros_utils.get_fullpath(__file__, '../../data/guesser.ckpt')
QGEN_NTW_PATH = ros_utils.get_fullpath(__file__, '../../data/qgen.ckpt')
TOKENS_PATH = ros_utils.get_fullpath(__file__, '../../data/tokens.json')

# topics
SEGMENTATION_TOPIC = topicname('objects')
FEATURES_TOPIC = topicname('image_features')
STATUS_TOPIC = topicname('guesswhat_status')
# TODO: merge these topics to one
OBJECT_TOPIC = topicname('guess_location_image')
CATEGORY_TOPIC = topicname('guess_category')


class ImgFeaturesLoader(object):
    """ Loads image from memory """
    def __init__(self, data):
        self.data = data

    def get_image(self, *_args, **_kwargs):
        """ get_image interface """
        return self.data
Esempio n. 23
0
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import argparse
import rospy

from std_msgs.msg import Bool
from std_msgs.msg import Float64MultiArray

from devine_config import topicname

TOPIC_GUESSWHAT_CONFIDENCE = topicname('objects_confidence')
TOPIC_GUESSWHAT_SUCCEED = topicname('object_guess_success')


def main(arguments):
    """ Publish on GuessWhat?! confidence and success topic """

    # Parse arguments
    confidence_array = [float(i) for i in arguments.confidence.split(',')]
    is_guesswhat_succeed = bool(int(arguments.succeed))

    # Start ROS node
    node_name = 'devine_irl_control_example_emotion'
    rospy.init_node(node_name)
    rospy.loginfo('Running node \'%s\' with\n\tconfidence: %d\n\tsucceed: %d',
                  node_name, confidence_array, is_guesswhat_succeed)
Esempio n. 24
0
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import rospy
from std_msgs.msg import Header
import h5py
import gzip
import json
from devine_config import topicname
from devine_image_processing.msg import SegmentedImage, SceneObject, VGG16Features

SEGMENTATION_TOPIC = topicname('objects')
FEATURES_TOPIC = topicname('image_features')

rospy.init_node('guesswhat_example', anonymous=True)
f = gzip.open('guesswhat.image.jsonl.gz')
seg = json.loads(next(f).decode())
f.close()
h5file = h5py.File('image_features.h5', 'r')
feats = h5file['features'][0]

head = Header(stamp=rospy.Time.now())
seg_msg = SegmentedImage(header=head, objects=[])
for obj in seg['objects']:
    obj_t = SceneObject(category_id=obj['category_id'],
                        category_name=obj['category'])
    obj_t.bounding_box.x_offset = int(obj['bbox'][0])
Esempio n. 25
0
from jn0_face_msgs.msg import EmoIntensity, EmoPulse
from devine_dialog.msg import TtsQuery
from devine_config import topicname


class RobotFacialExpression(Enum):
    """ Valid facial expressions """

    SURPRISE = 'Surprise'
    SUPRISE_MOUTH = 'Surprise-mouth'
    SAD = 'Sad'
    ANGER = 'Anger'
    JOY = 'Joy'


OBJECT_CONFIDENCE_TOPIC = topicname('objects_confidence')
GAME_SUCCESS_TOPIC = topicname('object_guess_success')
ROBOT_TALKING_EXPRESSION_TOPIC = topicname('robot_talking_expression')
ROBOT_FACIAL_EXPRESSION_TOPIC = topicname('robot_facial_expression')
FACIAL_EXPRESSION_COMPLETED = topicname('robot_facial_expression_completed')
TTS_QUERY_TOPIC = topicname('tts_query')

NODE_NAME = 'facial_expression'
TALKING_EXPRESSION_DURATION = 0.2


class FacialExpression(object):
    """ Subscribes to object confidence and publishes facial expressions for a specific duration """

    FACIAL_EXPRESSION_DURATION = 10
Esempio n. 26
0
__email__ = "*****@*****.**"
__status__ = "Production"

import argparse
import random
import rospy
from devine_config import topicname
import cv2
from sensor_msgs.msg import CompressedImage
import numpy as np
from time import sleep
import os
from std_msgs.msg import String
from devine_common.image_utils import image_to_ros_msg

FEATURE_IMAGE_TOPIC = topicname('features_extraction_image')
SEGMENTATION_IMAGE_TOPIC = topicname('segmentation_image')
CATEGORY_TOPIC = topicname('guess_category')


class ImageTest:
    """ Test class that loads images and runs a game of GW?! """
    def __init__(self, image_folder):
        self.image_folder = image_folder
        self.outputs = []
        self.image = None
        rospy.init_node('mscoco_test')
        self.rate = rospy.Rate(1)
        self.image_feat_pub = rospy.Publisher(FEATURE_IMAGE_TOPIC,
                                              CompressedImage,
                                              queue_size=1)
Esempio n. 27
0
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "*****@*****.**"
__status__ = "Production"

import json
from collections import Counter
from sensor_msgs.msg import CompressedImage
from devine_config import topicname
import rospy
from devine_common import image_utils, ros_utils
from devine_image_processing.msg import SegmentedImage

IMAGE_TOPIC = topicname("segmentation_image")
SEGMENTATION_IMAGE_TOPIC = topicname("objects")

IMAGE_PUB = rospy.Publisher(IMAGE_TOPIC, CompressedImage, queue_size=1, latch=True)

IMAGE_MSG = "image_msg"
FILENAME = "filename"
EXPECTED_OBJECTS = "expected_objects"
LAST_STAMP = None


def segment_image(image):
    global LAST_STAMP
    # send over node
    IMAGE_PUB.publish(image[IMAGE_MSG])
    # receive data