def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'learning.json')) as f:
            self.params = json.load(f)

        self.translator = EnvironmentTranslator()
        self.learning = None

        # User control and critical resources
        self.lock_iteration = RLock()
        self.ready_for_interaction = True
        self.focus = None
        self.set_iteration = -1

        # Saved experiment files
        self.dir = join(self.rospack.get_path('apex_playground'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)
        # self.stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        # self.source_file = join(self.dir, self.source_name + '.pickle')
        self.main_experiment = True
        self.condition = ""
        self.trial = ""
        self.task = ""

        # Serving these services
        self.service_name_perceive = "learning/perceive"
        self.service_name_produce = "learning/produce"
        self.service_name_set_interest = "learning/set_interest"
        self.service_name_set_iteration = "learning/set_iteration"
        self.service_name_interests = "learning/get_interests"

        # Publishing these topics
        self.pub_focus = rospy.Publisher('learning/current_focus',
                                         String,
                                         queue_size=1,
                                         latch=True)
        self.pub_user_focus = rospy.Publisher('learning/user_focus',
                                              String,
                                              queue_size=1,
                                              latch=True)
        self.pub_ready = rospy.Publisher('learning/ready_for_interaction',
                                         Bool,
                                         queue_size=1,
                                         latch=True)
        self.pub_iteration = rospy.Publisher('iteration',
                                             UInt32,
                                             queue_size=1,
                                             latch=True)

        # Using these services
        self.service_name_get_perception = "perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo(
                "Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception,
                                            GetSensorialState)
Ejemplo n.º 2
0
    def __init__(self):
        # Register Ctrl-C sigint
        signal.signal(signal.SIGINT, self._signal_handler)
        rpack = RosPack()
        pkg_path = rpack.get_path('dialogflow_ros')

        rospy.Subscriber('/bat_charge', Float64, self.cbk_bat_charge)

        # -------------------------------------------------- #
        #  COSTRUISCO IL PANNELLO
        # ---------------------------------------------- #
        global app
        app = QApplication(sys.argv)
        #MainWindow = AppMainWindow()
        win = QMainWindow()
        win.resize(800, 600)

        BtnLayout = QtWidgets.QHBoxLayout()
        self.PlusButton = QtWidgets.QPushButton('+')
        self.MinusButton = QtWidgets.QPushButton('-')
        BtnLayout.addWidget(self.PlusButton)
        BtnLayout.addWidget(self.MinusButton)

        button = QPushButton('Click')

        button.clicked.connect(self.on_button_clicked)
        button.show()

        app.exec_()
Ejemplo n.º 3
0
    def __init__(self):
        self.interrupted = False
        self.detector = None
        rpack = RosPack()
        # UMDL or PMDL file paths along with audio files
        pkg_path = rpack.get_path('dialogflow_ros')

        #self.model_path = pkg_path + '/scripts/snowboy/resources/jarvis.umdl'
        self.model_path = pkg_path + '/scripts/snowboy/resources/OK_Robot.pmdl'
        print("Hotoword: {}".format(self.model_path))

        ding_path = pkg_path + '/scripts/snowboy/resources/ding.wav'
        # Setup df
        self.df_client = None
        # Setup audio output
        ding = wave.open(ding_path, 'rb')
        self.ding_data = ding.readframes(ding.getnframes())
        self.audio = pyaudio.PyAudio()
        self.stream_out = self.audio.open(
            format=self.audio.get_format_from_width(ding.getsampwidth()),
            channels=ding.getnchannels(),
            rate=ding.getframerate(),
            input=False,
            output=True)
        self.last_contexts = []
        rospy.loginfo("HOTWORD_CLIENT: Ready!")
Ejemplo n.º 4
0
    def __init__(self):
        self.rospack = RosPack()
        with open(join(self.rospack.get_path('nips2016'), 'config', 'learning.json')) as f:
            self.params = json.load(f)

        self.translator = EnvironmentTranslator()
        self.learning = Learning(self.translator.config, 
                                 n_motor_babbling=self.params["n_motor_babbling"], 
                                 explo_noise=self.params["explo_noise"], 
                                 choice_eps=self.params["choice_eps"], 
                                 enable_hand=self.params["enable_hand"],
                                 normalize_interests=self.params["normalize_interests"])
        self.experiment_name = rospy.get_param("/nips2016/experiment_name", "experiment")
        # self.source_name = rospy.get_param("/nips2016/source_name", "experiment")

        rospy.loginfo("Learning node will write {}".format(self.experiment_name))
        # rospy.loginfo("Learning node will read {}".format(self.source_name))

        # User control and critical resources
        self.lock_iteration = RLock()
        self.ready_for_interaction = True
        self.focus = None
        self.set_iteration = -1
        self.demonstrate = None

        # Saved experiment files
        self.dir = join(self.rospack.get_path('nips2016'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)
        # self.stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        # self.source_file = join(self.dir, self.source_name + '.pickle')
        self.experiment_file = join(self.dir, self.experiment_name + '.pickle') # if self.source_name == "none" else self.source_file
        self.main_experiment = True

        if isfile(self.experiment_file):
            self.learning.restart_from_end_of_file(self.experiment_file)
        else:
            self.learning.start()

        # Serving these services
        self.service_name_perceive = "/nips2016/learning/perceive"
        self.service_name_produce = "/nips2016/learning/produce"
        self.service_name_set_interest = "/nips2016/learning/set_interest"
        self.service_name_set_iteration = "/nips2016/learning/set_iteration"
        self.service_name_demonstrate = "/nips2016/learning/assess"

        # Publishing these topics
        self.pub_interests = rospy.Publisher('/nips2016/learning/interests', Interests, queue_size=1, latch=True)
        self.pub_focus = rospy.Publisher('/nips2016/learning/current_focus', String, queue_size=1, latch=True)
        self.pub_user_focus = rospy.Publisher('/nips2016/learning/user_focus', String, queue_size=1, latch=True)
        self.pub_ready = rospy.Publisher('/nips2016/learning/ready_for_interaction', Bool, queue_size=1, latch=True)
        self.pub_iteration = rospy.Publisher('/nips2016/iteration', UInt32, queue_size=1, latch=True)

        # Using these services
        self.service_name_get_perception = "/nips2016/perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo("Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception, GetSensorialState)
Ejemplo n.º 5
0
Archivo: main.py Proyecto: LucidOne/rqt
 def create_application(self, argv):
     from python_qt_binding.QtGui import QIcon
     app = super(Main, self).create_application(argv)
     rp = RosPack()
     logo = os.path.join(rp.get_path('rqt_gui'), 'resource', 'rqt.svg')
     icon = QIcon(logo)
     app.setWindowIcon(icon)
     return app
Ejemplo n.º 6
0
    def __init__(self, title='Options', description=None):
        super(SimpleSettingsDialog, self).__init__()
        self.setObjectName('SimpleSettingsDialog')

        rp = RosPack()
        ui_file = os.path.join(rp.get_path('rqt_quaternion_view'), 'resource',
                               'QuaternionViewOptions.ui')
        loadUi(ui_file, self)

        self.setWindowTitle(title)

        if description is not None:
            self.add_label(description)
    def __init__(self, title='Options', description=None):
        super(SimpleSettingsDialog, self).__init__()
        self.setObjectName('SimpleSettingsDialog')

        rp = RosPack()
        ui_file = os.path.join(rp.get_path('qt_gui_py_common'), 'resource', 'simple_settings_dialog.ui')
        loadUi(ui_file, self)

        self.setWindowTitle(title)
        self._settings_groups = []

        if description is not None:
            self.add_label(description)
Ejemplo n.º 8
0
    def __init__(self):
        self._action_name = '/pobax_playground/perception/record_server'
        self._as = actionlib.SimpleActionServer(self._action_name,
                                                RecordAction,
                                                execute_cb=self.execute_cb,
                                                auto_start=False)
        self._as.start()
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        self.topics = TopicAggregator()
Ejemplo n.º 9
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('nips2016'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "/nips2016/perception/get"
        self.service_name_record = "/nips2016/perception/record"
        # Using these services
        self.service_name_set_compliant = "/nips2016/torso/set_compliant"
        self.topics = TopicAggregator(
        )  # All topics are read and stored in that object
Ejemplo n.º 10
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "/pobax_playground/perception/get"
        self.service_name_record = "/pobax_playground/perception/record"
        #self.service_name_baxter_start_record = "/pobax_playground/perception/baxter_start_record"
        #self.service_name_baxter_stop_record = "/pobax_playground/perception/baxter_stop_record"
        # Using these services
        self.topics = TopicAggregator(
        )  # All topics are read and stored in that object
Ejemplo n.º 11
0
 def __init__(self, filename=None, ros_pack=None):
     rp = ros_pack or RosPack()
     qtgui_path = rp.get_path('qt_gui')
     super(Main, self).__init__(qtgui_path,
                                invoked_filename=filename,
                                settings_filename='rqt_gui')
     self._ros_pack = rp
Ejemplo n.º 12
0
    def __init__(self):
        self.rospack = RosPack()

        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'voice.json')) as f:
            self.params = json.load(f)

        # Serving these services
        self.service_name_execute_analyse = "/pobax_playground/voice/execute_analyse"
        self.service_name_baxter_analyse = "/pobax_playground/voice/baxter_analyse"

        # Init DIVA vocal tract simulator
        self.voice = Voice(self.params["tau"],
                           self.params["pa"],
                           self.params["pc"],
                           gui=self.params["gui"],
                           audio=self.params["audio"])
Ejemplo n.º 13
0
def handle_rwt_action(req):
    action = rosaction.MODE_ACTION
    pkg = RosPack()
    package = rosaction.iterate_packages(pkg, action)
    messageList = []
    for pack in package:
        action, path = pack
        message = rosmsg.list_msgs(action)
        messageList.append(str(message))
    return ActionListResponse(messageList)
Ejemplo n.º 14
0
def handle_rwt_srv(req):
    srv = rosmsg.MODE_SRV
    pkg = RosPack()
    package = rosmsg.iterate_packages(pkg, srv)
    messageList = []
    for pack in package:
        service, path = pack
        message = rosmsg.list_srvs(service)
        messageList.append(str(message))
    return SrvListResponse(messageList)
Ejemplo n.º 15
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'general.json')) as f:
            self.params = json.load(f)
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'torso.json')) as f:
            self.torso_params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "perception/get"
        self.service_name_record = "perception/record"
        # Using these services
        self.service_name_set_compliant = "{}/left_arm/set_compliant".format(
            self.torso_params["robot_name"])
        self.topics = PerceptionServices(
        )  # All topics are read and stored in that object
Ejemplo n.º 16
0
class Perception(object):
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "/pobax_playground/perception/get"
        self.service_name_record = "/pobax_playground/perception/record"
        #self.service_name_baxter_start_record = "/pobax_playground/perception/baxter_start_record"
        #self.service_name_baxter_stop_record = "/pobax_playground/perception/baxter_stop_record"
        # Using these services
        self.topics = TopicAggregator(
        )  # All topics are read and stored in that object

    def run(self):
        rospy.Service(self.service_name_get, GetSensorialState, self.cb_get)
        rospy.Service(self.service_name_record, Record, self.cb_record)
        #rospy.Service(self.service_name_baxter_start_record, BaxterStartRecord, self.cb_baxter_start_record)
        #rospy.Service(self.service_name_baxter_stop_record, BaxterStopRecord, self.cb_baxter_stop_record)

        rospy.loginfo("Done, perception is up!")

    def get(self):
        #TODO ADD VOICE
        state = SensorialState(hand=self.topics.torso_l_eef,
                               culbuto_1=self.topics.culbuto_1)
        return state

    ################################# Service callbacks
    def cb_get(self, request):
        return GetSensorialStateResponse(state=self.get())

    def cb_record(self, request):
        response = RecordResponse()
        # TODO eventually keep trace of the last XX points to start recording prior to the start signal
        # TODO add VOICE DEMO
        rospy.loginfo("Recording {}...".format("an arm demo"))
        for point in range(request.nb_points.data):
            if rospy.is_shutdown():
                break
            response.sensorial_trajectory.points.append(self.get())
            self.rate.sleep()

        rospy.loginfo("Recorded!")
        return response

    '''
Ejemplo n.º 17
0
class VoiceNode(object):
    def __init__(self):
        self.rospack = RosPack()

        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'voice.json')) as f:
            self.params = json.load(f)

        # Serving these services
        self.service_name_execute_analyse = "/pobax_playground/voice/execute_analyse"
        self.service_name_baxter_analyse = "/pobax_playground/voice/baxter_analyse"

        # Init DIVA vocal tract simulator
        self.voice = Voice(self.params["tau"],
                           self.params["pa"],
                           self.params["pc"],
                           gui=self.params["gui"],
                           audio=self.params["audio"])

    def run(self):
        rospy.Service(self.service_name_execute_analyse,
                      ExecuteAnalyseVocalTrajectory, self.cb_execute_analyse)
        rospy.Service(self.service_name_baxter_analyse,
                      BaxterAnalyseVocalTrajectory, self.cb_baxter_analyse)

    def cb_baxter_analyse(self, request):
        rospy.loginfo(
            'Voice node analysing baxter sound response to torso movement')
        baxter_sound_traj = self.voice.baxter_analyse(
            request.is_culbuto_touched)
        return BaxterAnalyseVocalTrajectoryResponse(
            baxter_sound_trajectory=SoundTrajectory(data=baxter_sound_traj))

    def cb_execute_analyse(self, request):
        rospy.loginfo('Voice node producing sound...')
        torso_sound_traj, baxter_sound_traj, is_culb_name, produced_name, raw_torso_sound_traj\
        = self.voice.execute_analyse(request.vocal_trajectory.data)
        return ExecuteAnalyseVocalTrajectoryResponse(
            torso_sound_trajectory=SoundTrajectory(data=torso_sound_traj),
            baxter_sound_trajectory=SoundTrajectory(data=baxter_sound_traj),
            is_culbuto_name=is_culb_name,
            produced_name=produced_name,
            raw_torso_sound=SoundTrajectory(data=raw_torso_sound_traj))
Ejemplo n.º 18
0
class LearningNode(object):
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'learning.json')) as f:
            self.params = json.load(f)

        self.translator = EnvironmentTranslator()
        self.learning = None

        # User control and critical resources
        self.lock_iteration = RLock()
        self.ready_for_interaction = True
        self.focus = None
        self.set_iteration = -1
        self.demonstrate = None

        # Saved experiment files
        self.dir = join(self.rospack.get_path('apex_playground'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)
        # self.stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        # self.source_file = join(self.dir, self.source_name + '.pickle')
        self.main_experiment = True
        self.condition = ""
        self.trial = ""

        # Serving these services
        self.service_name_perceive = "learning/perceive"
        self.service_name_produce = "learning/produce"
        self.service_name_set_interest = "learning/set_interest"
        self.service_name_set_iteration = "learning/set_iteration"
        self.service_name_demonstrate = "learning/assess"
        self.service_name_interests = "learning/get_interests"

        # Publishing these topics
        self.pub_focus = rospy.Publisher('learning/current_focus',
                                         String,
                                         queue_size=1,
                                         latch=True)
        self.pub_user_focus = rospy.Publisher('learning/user_focus',
                                              String,
                                              queue_size=1,
                                              latch=True)
        self.pub_ready = rospy.Publisher('learning/ready_for_interaction',
                                         Bool,
                                         queue_size=1,
                                         latch=True)
        self.pub_iteration = rospy.Publisher('iteration',
                                             UInt32,
                                             queue_size=1,
                                             latch=True)

        # Using these services
        self.service_name_get_perception = "perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo(
                "Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception,
                                            GetSensorialState)

    def produce_init_learner(self):
        condition = rospy.get_param('experiment/current/method')
        trial = rospy.get_param('experiment/current/trial')
        iteration = rospy.get_param('experiment/current/iteration')
        experiment_name = rospy.get_param('/experiment/name')

        if condition != self.condition or trial != self.trial:
            with self.lock_iteration:
                rospy.logwarn("Learner opens condition {} trial {}...".format(
                    condition, trial + 1))

                self.learning = Learning(
                    self.translator.config,
                    condition=condition,
                    n_motor_babbling=self.params["n_motor_babbling"],
                    explo_noise=self.params["explo_noise"],
                    choice_eps=self.params["choice_eps"],
                    enable_hand=self.params["enable_hand"],
                    normalize_interests=self.params["normalize_interests"])

                if iteration > 0:
                    rospy.loginfo(
                        "Learning node restarts {} from iteration {} trial {}".
                        format(experiment_name, iteration, trial))
                    self.learning.restart_from_files(experiment_name, trial,
                                                     iteration)
                else:
                    rospy.loginfo(
                        "Learning node starts {} from scratch trial {}".format(
                            experiment_name, trial))
                    self.learning.start()

            rospy.loginfo(
                "Learner loaded with condition {}!".format(condition))
            self.condition = condition
            self.trial = trial

    def cb_get_interests(self, request):
        interests_array = self.learning.get_normalized_interests_evolution()
        interests = GetInterestsResponse()
        if self.learning is not None:
            interests.names = self.learning.get_space_names()
            interests.num_iterations = UInt32(len(interests_array))
            interests.interests = [
                Float32(val) for val in interests_array.flatten()
            ]
        return interests

    def run(self):
        rospy.Service(self.service_name_perceive, Perceive, self.cb_perceive)
        rospy.Service(self.service_name_produce, Produce, self.cb_produce)
        rospy.Service(self.service_name_set_interest, SetFocus,
                      self.cb_set_focus)
        rospy.Service(self.service_name_set_iteration, SetIteration,
                      self.cb_set_iteration)
        rospy.Service(self.service_name_demonstrate, Assess, self.cb_assess)
        rospy.Service(self.service_name_interests, GetInterests,
                      self.cb_get_interests)
        rospy.loginfo("Learning is up!")

        rate = rospy.Rate(self.params['publish_rate'])
        while not rospy.is_shutdown():
            self.publish()
            rate.sleep()

    def publish(self):
        if self.learning is not None:
            with self.lock_iteration:
                focus = copy(self.focus)
                ready = copy(self.ready_for_interaction)

            self.pub_ready.publish(Bool(data=ready))
            self.pub_user_focus.publish(
                String(data=focus if focus is not None else ""))
            self.pub_focus.publish(String(data=self.learning.get_last_focus()))
            self.pub_iteration.publish(
                UInt32(data=self.learning.get_iterations()))

    ################################# Service callbacks
    def cb_set_iteration(self, request):
        with self.lock_iteration:
            ready = copy(self.ready_for_interaction)
            self.ready_for_interaction = False
            self.set_iteration = request.iteration.data

        into_past = request.iteration.data < self.learning.get_iterations()
        if ready:
            if into_past:
                pass
                #if self.main_experiment:
                #    self.learning.save(experiment_name, self.trial)
                #self.main_experiment = False
                #rospy.loginfo("Saving file before time travel")
            else:
                self.main_experiment = True
        return SetIterationResponse()

    def cb_set_focus(self, request):
        with self.lock_iteration:
            # space is "" or e.g. "s_hand" to toggle focus on/off
            self.focus = request.space if len(request.space) > 0 else None
            self.ready_for_interaction = False
        return SetFocusResponse()

    def cb_assess(self, request):
        with self.lock_iteration:
            self.demonstrate = request.goal
            self.ready_for_interaction = False
        return AssessResponse()

    def cb_perceive(self, request):
        s = self.translator.sensory_trajectory_msg_to_list(
            request.demo.sensorial_demonstration)
        if request.demo.type_demo == Demonstration.TYPE_DEMO_ARM:
            torso_traj = self.translator.trajectory_msg_to_matrix(
                request.demo.torso_demonstration)
            torso_traj_w = self.translator.trajectory_to_w(torso_traj)
            rospy.loginfo(
                "Learning node is perceiving sensory + torso trajectories for an arm demo"
            )
            success = self.learning.perceive(s, m_demo=torso_traj_w)
        elif request.demo.type_demo == Demonstration.TYPE_DEMO_JOYSTICK:

            rospy.loginfo(
                "Learning node is perceiving sensory + torso trajectories for a joystick demo"
            )
            success = self.learning.perceive(s, j_demo=True)
        else:
            rospy.loginfo(
                "Learning node is perceiving sensory trajectory only for a normal demo"
            )
            success = self.learning.perceive(s)

        if not success:
            rospy.logerr("Learner could not perceive this trajectory")

        # This turn is over, check if we have a time travel pending...
        with self.lock_iteration:
            set_iteration = copy(self.set_iteration)
            self.set_iteration = -1

        if set_iteration > -1:
            rospy.logwarn(
                "Applying time travel to iteration {}".format(set_iteration))
            #self.learning.restart_from_files(experiment_name, set_iteration)

        # And savethe current iteration
        experiment_name = rospy.get_param('/experiment/name')
        self.learning.save(experiment_name, self.trial)

        return PerceiveResponse()

    def cb_produce(self, request):
        with self.lock_iteration:
            # Check if we need a new learner
            self.produce_init_learner()

            focus = copy(self.focus)
            demonstrate = copy(self.demonstrate)
            self.demonstrate = None

        rospy.loginfo("Learning node is requesting the current state")
        state = self.get_state(GetSensorialStateRequest()).state

        if demonstrate is None:
            rospy.loginfo("Learning node is producing...")
            w = self.learning.produce(self.translator.get_context(state),
                                      focus)
        else:
            rospy.logwarn("Assessing {}...".format(demonstrate))
            context = self.translator.get_context(state)
            w = self.learning.produce(context, goal=demonstrate)

        trajectory_matrix = self.translator.w_to_trajectory(w)
        trajectory_msg = self.translator.matrix_to_trajectory_msg(
            trajectory_matrix)

        self.ready_for_interaction = True

        response = ProduceResponse(torso_trajectory=trajectory_msg)
        return response
Ejemplo n.º 19
0
class Perception(object):
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('nips2016'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "/nips2016/perception/get"
        self.service_name_record = "/nips2016/perception/record"
        # Using these services
        self.service_name_set_compliant = "/nips2016/torso/set_compliant"
        self.topics = TopicAggregator(
        )  # All topics are read and stored in that object

    def run(self):
        for service in [self.service_name_set_compliant]:
            rospy.loginfo("Perception is waiting service {}".format(service))
            rospy.wait_for_service(service)
        self.set_torso_compliant_srv = rospy.ServiceProxy(
            self.service_name_set_compliant, SetTorsoCompliant)
        rospy.Service(self.service_name_get, GetSensorialState, self.cb_get)
        rospy.Service(self.service_name_record, Record, self.cb_record)
        rospy.loginfo("Done, perception is up!")

    def get(self):
        state = SensorialState(ball=self.topics.ball,
                               ergo=self.topics.ergo,
                               color=self.topics.light,
                               sound=self.topics.sound,
                               joystick_1=self.topics.joy1,
                               joystick_2=self.topics.joy2,
                               hand=self.topics.torso_l_eef)
        return state

    def wait_for_human_interaction(self,
                                   arm_threshold=1,
                                   joystick_threshold=0.15):
        rospy.loginfo("We are waiting for human interaction...")

        def detect_arm_variation():
            new_effort = np.array(self.topics.torso_l_j.effort)
            delta = np.absolute(effort - new_effort)
            return np.amax(delta) > arm_threshold

        def detect_joy_variation():
            return np.amax(np.abs(self.topics.joy1.axes)) > joystick_threshold

        effort = np.array(self.topics.torso_l_j.effort)
        rate = rospy.Rate(50)
        is_joystick_demo = None
        while not rospy.is_shutdown():
            if detect_arm_variation():
                is_joystick_demo = False
                break
            elif detect_joy_variation():
                is_joystick_demo = True
                break
            rate.sleep()
        return is_joystick_demo

    ################################# Service callbacks
    def cb_get(self, request):
        return GetSensorialStateResponse(state=self.get())

    def cb_record(self, request):
        response = RecordResponse()
        # TODO eventually keep trace of the last XX points to start recording prior to the start signal

        is_joystick_demo = False
        if request.human_demo.data:
            # Blocking... Wait for the user's grasp before recording...
            is_joystick_demo = self.wait_for_human_interaction()
            if not is_joystick_demo:
                self.set_torso_compliant_srv(
                    SetTorsoCompliantRequest(compliant=True))

        rospy.loginfo("Recording {}...".format(
            "a joystick demo" if is_joystick_demo else "an arm demo"))
        for point in range(request.nb_points.data):
            if rospy.is_shutdown():
                break
            if point % self.params["divider_nb_points_sensory"] == 0:
                response.demo.sensorial_demonstration.points.append(self.get())
            if not is_joystick_demo:
                response.demo.torso_demonstration.points.append(
                    joints.state_to_jtp(self.topics.torso_l_j))
            self.rate.sleep()

        if not is_joystick_demo:
            self.set_torso_compliant_srv(
                SetTorsoCompliantRequest(compliant=False))

        if is_joystick_demo:
            response.demo.type_demo = Demonstration.TYPE_DEMO_JOYSTICK
        elif request.human_demo.data:
            response.demo.type_demo = Demonstration.TYPE_DEMO_ARM
        else:
            response.demo.type_demo = Demonstration.TYPE_DEMO_NORMAL

        rospy.loginfo("Recorded!")
        return response
Ejemplo n.º 20
0
class Perception(object):
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'general.json')) as f:
            self.params = json.load(f)
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'torso.json')) as f:
            self.torso_params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "perception/get"
        self.service_name_record = "perception/record"
        # Using these services
        self.service_name_set_compliant = "{}/left_arm/set_compliant".format(
            self.torso_params["robot_name"])
        self.topics = PerceptionServices(
        )  # All topics are read and stored in that object

    def run(self):
        for service in [self.service_name_set_compliant]:
            rospy.loginfo("Perception is waiting service {}".format(service))
            rospy.wait_for_service(service)
        self.set_torso_compliant_srv = rospy.ServiceProxy(
            self.service_name_set_compliant, SetTorsoCompliant)
        rospy.Service(self.service_name_get, GetSensorialState, self.cb_get)
        rospy.Service(self.service_name_record, Record, self.cb_record)
        rospy.loginfo("Done, perception is up!")
        rospy.spin()

    def get(self):
        state = SensorialState(ball=self.topics.ball,
                               ergo=self.topics.ergo,
                               color=self.topics.light,
                               sound=self.topics.sound,
                               joystick_1=self.topics.joy1,
                               joystick_2=self.topics.joy2,
                               hand=self.topics.torso_l_eef)
        return state

    def wait_for_human_interaction(self,
                                   arm_threshold=1,
                                   joystick_threshold=0.15):
        rospy.loginfo("We are waiting for human interaction...")

        def detect_arm_variation():
            new_effort = np.array(self.topics.torso_l_j.effort)
            delta = np.absolute(effort - new_effort)
            return np.amax(delta) > arm_threshold

        def detect_joy_variation():
            return np.amax(np.abs(self.topics.joy1.axes)) > joystick_threshold

        effort = np.array(self.topics.torso_l_j.effort)
        rate = rospy.Rate(50)
        is_joystick_demo = None
        while not rospy.is_shutdown():
            if detect_arm_variation():
                is_joystick_demo = False
                break
            elif detect_joy_variation():
                is_joystick_demo = True
                break
            rate.sleep()
        return is_joystick_demo

    ################################# Service callbacks
    def cb_get(self, request):
        return GetSensorialStateResponse(state=self.get())

    def cb_record(self, request):
        response = RecordResponse()
        # TODO eventually keep trace of the last XX points to start recording prior to the start signal

        folder = rospy.get_param('/experiment/results_path',
                                 '/home/pi/apex_results/')
        condition = rospy.get_param('experiment/current/method')
        trial = rospy.get_param('experiment/current/trial')
        task = rospy.get_param('experiment/current/task')
        iteration = rospy.get_param('experiment/current/iteration')
        experiment_name = rospy.get_param('/experiment/name')
        folder_trial = join(folder, experiment_name, "task_" + str(task),
                            "condition_" + str(condition),
                            "trial_" + str(trial))
        folder_traj = join(folder_trial, 'trajectories')

        if not isdir(folder_traj):
            makedirs(folder_traj)

        sensorial_points = []
        motor_points = []

        try:
            is_joystick_demo = False
            if request.human_demo.data:
                # Blocking... Wait for the user's grasp before recording...
                is_joystick_demo = self.wait_for_human_interaction()
                if not is_joystick_demo:
                    self.set_torso_compliant_srv(
                        SetTorsoCompliantRequest(compliant=True))

            rospy.loginfo("Recording {}...".format(
                "a joystick demo" if is_joystick_demo else "an arm demo"))
            for point in range(request.nb_points.data):
                if rospy.is_shutdown():
                    break
                sensorial_point = self.get()
                motor_point = self.topics.torso_l_j

                if point % self.params["divider_nb_points_sensory"] == 0:
                    response.demo.sensorial_demonstration.points.append(
                        sensorial_point)
                if not is_joystick_demo:
                    response.demo.torso_demonstration.points.append(
                        joints.state_to_jtp(motor_point))

                # Saving trajectories for file dumping
                sensorial_points.append(sensorial_point)
                motor_points.append(motor_point)

                self.rate.sleep()

            if not is_joystick_demo:
                self.set_torso_compliant_srv(
                    SetTorsoCompliantRequest(compliant=False))

            if is_joystick_demo:
                response.demo.type_demo = Demonstration.TYPE_DEMO_JOYSTICK
                type = "demo_joy"
            elif request.human_demo.data:
                response.demo.type_demo = Demonstration.TYPE_DEMO_ARM
                type = "demo_arm"
            else:
                response.demo.type_demo = Demonstration.TYPE_DEMO_NORMAL
                type = "normal"

            trajectories = [{
                "motor":
                joints.ros_to_list(motor_points[point]),
                "sensorial":
                sensorial.ros_to_dict(sensorial_points[point]),
                "type":
                type
            } for point in range(request.nb_points.data)]

            #with open(join(folder_traj, "iteration_{}.json".format(iteration)), 'w') as f:
            #    json.dump(trajectories, f)

        except rospy.exceptions.ROSInterruptException:
            rospy.logwarn("recording aborted!")
        except IOError:
            # TODO: total or partial iteration failure?
            raise
            rospy.logerr("I/O error when dumping trajectories")
        else:
            rospy.loginfo("Recorded!")

        return response
Ejemplo n.º 21
0
from std_msgs.msg import String, Float64, Bool
from geometry_msgs.msg import Twist

# ROS-Dialogflow
from dialogflow_ros.msg import *

#esegue i comandi riconosciuti da DialogFlow
from dialogflow_mycommands import DialogflowCommandExecutor

# Use to convert Struct messages to JSON
# from google.protobuf.json_format import MessageToJson
sys.path.append('/opt/ros/melodic/lib/python2.7/dist-packages'
                )  # in order to import cv2 under python3

rpack = RosPack()
# UMDL or PMDL file paths along with audio files
import wave
pkg_path = rpack.get_path('dialogflow_ros')
ding_path = pkg_path + '/scripts/snowboy/resources/ding.wav'
ding = wave.open(ding_path, 'rb')


def isNumber(s):
    try:
        float(s)
        return True
    except ValueError:
        return False

Ejemplo n.º 22
0
class Voice(object):
    def __init__(self, tau, pa, pc, gui=False, audio=False):

        self.t = 0
        self.tau = tau
        self.pa = pa
        self.pc = pc

        # SOUND CONFIG
        self.v_o = list(np.log2([500, 900]))
        self.v_y = list(np.log2([300, 1700]))
        self.v_u = list(np.log2([300, 800]))
        self.v_e = list(np.log2([400, 2200]))
        self.v_i = list(np.log2([300, 2300]))

        # Retrieve caregiver sounds and trajectories from json
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'human_sounds.pickle')) as f:
            self.full_human_motor_traj, self.full_human_sounds_traj = pickle.load(
                f)
        self.human_sounds = self.full_human_sounds_traj.keys()
        rospy.loginfo('Voice node using the word %s for culbuto name' %
                      self.human_sounds[0])

        self.vowels = dict(o=self.v_o,
                           y=self.v_y,
                           u=self.v_u,
                           e=self.v_e,
                           i=self.v_i)

        self.human_sounds = ['eyu', 'oey', 'eou', 'oyi']
        #random.shuffle(self.human_sounds)
        print "human sounds", self.human_sounds

        def compress_sound_traj(sound):
            assert (len(sound) == 100)
            f1s = sound[:50]
            f3s = sound[50:]
            return np.append(f1s[np.array([0, 12, 24, 37, 49])],
                             f3s[np.array([0, 12, 24, 37, 49])])

        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compress_sound_traj(
                self.full_human_sounds_traj[hs])
            self.human_sounds_traj_std[hs] = [
                d - 8.5 for d in self.human_sounds_traj[hs][:5]
            ] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]
        '''
        def compute_s_sound(sound):
            s1 = self.vowels[sound[0]]
            s2 = [(self.vowels[sound[0]][0] + self.vowels[sound[1]][0]) / 2., (self.vowels[sound[0]][1] + self.vowels[sound[1]][1]) / 2.]
            s3 = self.vowels[sound[1]]
            s4 = [(self.vowels[sound[1]][0] + self.vowels[sound[2]][0]) / 2., (self.vowels[sound[1]][1] + self.vowels[sound[2]][1]) / 2.]
            s5 = self.vowels[sound[2]]
            rdm = 0.0 * (2.*np.random.random((1,10))[0] - 1.)
            return list(rdm + np.array([f[0] for f in [s1, s2, s3, s4, s5]] + [f[1] for f in [s1, s2, s3, s4, s5]]))
        
        
        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compute_s_sound(hs)
            self.human_sounds_traj_std[hs] = [d - 8.5 for d in self.human_sounds_traj[hs][:5]] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]    
        '''

        self.diva_traj = None
        self.produced_sound = None
        self.sound_tol = 0.4

        self.audio = audio

        self.time_arm = 0.
        self.time_diva = 0.
        self.time_arm_per_it = 0.
        self.time_diva_per_it = 0.

        self.n_dmps = 7  #Quick and Dirty, todo json
        self.timesteps = 50  #Quick and Dirty, todo json

        # DIVA CONFIG
        diva_cfg = dict(
            m_mins=np.array([-1, -1, -1, -1, -1, -1, -1]),
            m_maxs=np.array([1, 1, 1, 1, 1, 1, 1]),
            s_mins=np.array([7.5, 9.25]),
            s_maxs=np.array([9.5, 11.25]),
            m_used=range(7),
            s_used=range(1, 3),
            rest_position_diva=list([0] * 7),
            audio=self.audio,
            diva_use_initial=True,
            diva_use_goal=True,
            used_diva=list([True] * 7),
            n_dmps_diva=self.n_dmps,
            n_bfs_diva=2,
            move_steps=self.timesteps,
        )
        # Init Diva
        self.diva = Diva(**diva_cfg)

    def give_label(self, toy):

        if toy == "culbuto_1":
            #print "Caregiver says", self.human_sounds[0]
            return self.human_sounds[0], self.human_sounds_traj_std[
                self.human_sounds[0]]
        elif toy == "distractor":
            sound_id = np.random.choice([1, 2, 3])
            #print "Caregiver says", self.human_sounds[sound_id]
            return self.human_sounds[sound_id], self.human_sounds_traj_std[
                self.human_sounds[sound_id]]
        else:
            raise NotImplementedError

    def analysis_sound(self, diva_traj):
        #return self.human_sounds[2]
        #print self.human_sounds_traj
        for hs in self.human_sounds:
            error = np.linalg.norm(
                np.array(self.human_sounds_traj[hs]) -
                np.array([f[0] for f in diva_traj[[0, 12, 24, 37, 49]]] +
                         [f[1] for f in diva_traj[[0, 12, 24, 37, 49]]]))
            if error < self.best_vocal_errors[hs]:
                self.best_vocal_errors[hs] = error
            if error < self.sound_tol:
                print "***********Agent says", hs
                return hs
        return None

    def get_caregiver_answer(self, touched_toy):
        if self.pc:
            if touched_toy:
                return "contingent"
            else:
                return "distractor"

        pa, k, theta, l = self.pa, 5., 0.25, np.log(2.) / 2.

        def distractor_answer(l):
            return np.random.exponential(1. / l)

        def caregiver_answer(pa, k, theta):
            if not touched_toy or np.random.random() < 1 - pa:
                return None
            else:
                return np.random.gamma(k, theta)

        c = caregiver_answer(pa, k, theta)

        d = distractor_answer(l)
        if c is not None and c < d and c < self.tau:
            #print "contingent answer"
            return "contingent"
        elif d < self.tau:
            #             if touched_toy:
            #                 print "distractor answer"
            return "distractor"
#         else:
#             if touched_toy:
#                 print "no answer"

    def baxter_analyse(self, is_culbuto_touched):
        # CAREGIVER VOCAL REACTION
        caregiver_answer = self.get_caregiver_answer(is_culbuto_touched)
        #print caregiver_answer
        sound_id = None
        if caregiver_answer == "contingent":
            if is_culbuto_touched:
                rospy.logwarn("CULBUTO WAS TOUCHED HIP HIP HIP HOURRRAY")
                sound_id, self.caregiver_sound = self.give_label("culbuto_1")

        elif caregiver_answer == "distractor":
            sound_id, self.caregiver_sound = self.give_label("distractor")
        else:
            # No sound
            self.caregiver_sound = None

        if not sound_id == None:  # A word was choosen by caregiver
            rospy.logwarn('Voice Node: Baxter says %s' % sound_id)
            if self.audio:  # Play the word
                url = "http://raspberrypi.local:8080/api/v1/" + sound_id
                contents = urllib2.urlopen(url).read()
                #self.diva.compute_sensory_effect(self.full_human_motor_traj[sound_id],sound_power=30.)
        return self.caregiver_sound

    # Generate and play sound from diva trajectory
    # Returns a 10-D sound trajectory for baxter and torso
    # And a boolean set to True if the sound was culbuto's name
    def execute_analyse(self, diva_traj):
        t = time.time()

        #Reshape trajectory list into matrix
        diva_traj = np.reshape(diva_traj, (self.timesteps, self.n_dmps))

        is_culbuto_name = False

        self.raw_produced_sound = self.diva.compute_sensory_effect(diva_traj)
        self.produced_sound = self.analysis_sound(self.raw_produced_sound)
        if self.produced_sound is not None:
            # parent gives object if label is produced and object out of reach
            if self.produced_sound == self.human_sounds[0]:
                is_culbuto_name = True
                rospy.logwarn(
                    "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!CULBUTO NAME WAS PRONOUNCED"
                )

        self.self_sound = [
            f for formants in self.raw_produced_sound[[0, 12, 24, 37, 49]]
            for f in formants
        ]
        self.self_sound = self.self_sound[0::2] + self.self_sound[1::2]
        # MAP TO STD INTERVAL (only extracts first 2 formants)
        self.downsampled_self_sound = [
            d - 8.5 for d in self.self_sound[:5]
        ] + [d - 10.25 for d in self.self_sound[5:]]
        #self.downsampled_caregiver_sound = [d - 8.5 for d in self.caregiver_sound[:5]] + [d - 10.25 for d in self.caregiver_sound[5:]]

        #return bounds_min_max(s, self.conf.s_mins, self.conf.s_maxs)
        flat_raw_produced_sound = [
            f for formants in self.raw_produced_sound for f in formants
        ]
        return self.downsampled_self_sound, None, is_culbuto_name, self.produced_sound, flat_raw_produced_sound
Ejemplo n.º 23
0
class RecordActionServer(object):
    # create messages that are used to publish feedback/result
    _feedback = RecordFeedback()
    _result = RecordResult()

    def __init__(self):
        self._action_name = '/pobax_playground/perception/record_server'
        self._as = actionlib.SimpleActionServer(self._action_name,
                                                RecordAction,
                                                execute_cb=self.execute_cb,
                                                auto_start=False)
        self._as.start()
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        self.topics = TopicAggregator()

    def get(self):
        #TODO ADD VOICE
        state = SensorialState(hand=self.topics.torso_l_eef,
                               culbuto_1=self.topics.culbuto_1)
        return state

    # Choose nb_points from sensorial_demo, evenly spaced
    # (except for last point which is always the last one recorded)
    def choose_points(self, sensorial_traj, nb_points):
        sequence_size = len(sensorial_traj.points)
        if sequence_size < nb_points:
            rospy.logerr(
                '%s: Recording time was not long enough to gather enough data points: recorded: %s vs asked: %s'
                % (self._action_name, sequence_size, nb_points))
            return
        # Handy fonction for choosing m evenly spaced elements from a sequence of length n
        f = lambda m, n: [i * n // m + n // (2 * m) for i in range(m)]
        choosen_idx = f(nb_points, sequence_size)
        # Change the last point in choosen sequence by the last recorder point
        choosen_idx[-1] = (sequence_size - 1)
        result = SensorialTrajectory()
        for i in choosen_idx:
            result.points.append(sensorial_traj.points[i])
        return result

    def execute_cb(self, goal):
        rospy.loginfo('%s: Start non-blocking recording of culbuto' %
                      (self._action_name))
        sensorial_demo = SensorialTrajectory()
        while not rospy.is_shutdown():
            if self._as.is_preempt_requested(
            ):  # When requested, we stop registering positions
                rospy.loginfo('%s: Preempted' % self._action_name)
                break
            sensorial_demo.points.append(self.get())
            self.rate.sleep()

        self._result.sensorial_trajectory = self.choose_points(
            sensorial_demo, goal.nb_points.data)
        rospy.loginfo('%s: Succeeded' % self._action_name)
        self._as.set_succeeded(self._result)
Ejemplo n.º 24
0
    def __init__(self, gui=False, audio=False):

        self.t = 0

        # ARM CONFIG

        arm_cfg = dict(m_mins=[-1.] * 3 * 7,
                       m_maxs=[1.] * 3 * 7,
                       s_mins=[-1.] * 3 * 50,
                       s_maxs=[1.] * 3 * 50,
                       lengths=[0.5, 0.3, 0.2],
                       angle_shift=0.5,
                       rest_state=[0., 0., 0.],
                       n_dmps=3,
                       n_bfs=6,
                       timesteps=50,
                       gui=gui)

        # SOUND CONFIG

        self.v_o = list(np.log2([500, 900]))
        self.v_y = list(np.log2([300, 1700]))
        self.v_u = list(np.log2([300, 800]))
        self.v_e = list(np.log2([400, 2200]))
        self.v_i = list(np.log2([300, 2300]))

        self.vowels = dict(o=self.v_o,
                           y=self.v_y,
                           u=self.v_u,
                           e=self.v_e,
                           i=self.v_i)

        # Retrieve caregiver sounds and trajectories from json
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'human_sounds.pickle')) as f:
            self.full_human_motor_traj, self.full_human_sounds_traj = pickle.load(
                f)
        self.human_sounds = self.full_human_sounds_traj.keys()
        rospy.loginfo('Voice node using the word %s for culbuto name' %
                      self.human_sounds[0])

        def compress_sound_traj(sound):
            assert (len(sound) == 100)
            f1s = sound[:50]
            f3s = sound[50:]
            return np.append(f1s[np.array([0, 12, 24, 37, 49])],
                             f3s[np.array([0, 12, 24, 37, 49])])

        #reduce number of sounds
        self.human_sounds = ['eyu', 'oey', 'eou', 'oyi']
        #random.shuffle(self.human_sounds)

        print self.human_sounds
        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compress_sound_traj(
                self.full_human_sounds_traj[hs])
            self.human_sounds_traj_std[hs] = [
                d - 8.5 for d in self.human_sounds_traj[hs][:5]
            ] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]

        self.sound_tol = 0.4

        # DIVA CONFIG

        diva_cfg = dict(
            m_mins=np.array([-1, -1, -1, -1, -1, -1, -1]),
            m_maxs=np.array([1, 1, 1, 1, 1, 1, 1]),
            s_mins=np.array([7.5, 9.25]),
            s_maxs=np.array([9.5, 11.25]),
            m_used=range(7),
            s_used=range(1, 3),
            rest_position_diva=list([0] * 7),
            audio=audio,
            diva_use_initial=True,
            diva_use_goal=True,
            used_diva=list([True] * 7),
            n_dmps_diva=7,
            n_bfs_diva=2,
            move_steps=50,
        )

        self.arm = ArmEnvironment(**arm_cfg)
        self.diva = DivaEnvironment(**diva_cfg)

        self.timesteps = 50

        # OBJECTS CONFIG

        self.caregiver_gives_obj_factor = 0.01
        self.tool_length = 0.5
        self.handle_tol = 0.2
        self.handle_tol_sq = self.handle_tol * self.handle_tol
        self.handle_noise = 0.
        self.object_tol_hand = 0.2
        self.object_tol_hand_sq = self.object_tol_hand * self.object_tol_hand
        self.object_tol_tool = 0.2
        self.object_tol_tool_sq = self.object_tol_tool * self.object_tol_tool

        self.diva_traj = None
        self.produced_sound = None

        Environment.__init__(self,
                             m_mins=[-1.] * (21 + 28),
                             m_maxs=[1.] * (21 + 28),
                             s_mins=[-1.] * 56,
                             s_maxs=[1.] * 56)

        self.current_tool = [-0.5, 0., 0.5, 0.]
        self.current_toy1 = [0.5, 0.5, 0.]
        #self.current_toy2 = [0.7, 0.7, 0.]
        #self.current_toy3 = [0.9, 0.9, 0.]
        self.current_caregiver = [0., 1.7]
        self.reset()
        self.compute_tool()

        self.purge_logs()

        if self.arm.gui:
            self.init_plot()

        self.count_diva = 0
        self.count_arm = 0
        self.count_tool = 0
        self.count_toy1_by_tool = 0
        #self.count_toy2_by_tool = 0
        #self.count_toy3_by_tool = 0
        self.count_toy1_by_hand = 0
        #self.count_toy2_by_hand = 0
        #self.count_toy3_by_hand = 0
        self.count_parent_give_label = 0
        self.count_parent_give_object = 0
        self.count_produced_sounds = {}
        for hs in self.human_sounds:
            self.count_produced_sounds[hs] = 0

        self.time_arm = 0.
        self.time_diva = 0.
        self.time_arm_per_it = 0.
        self.time_diva_per_it = 0.
Ejemplo n.º 25
0
class LearningNode(object):
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'learning.json')) as f:
            self.params = json.load(f)
        #with open(join(self.rospack.get_path('pobax_playground'), 'config', 'dmps.json')) as f:
        #    self.dmps_params = json.load(f)

        self.arm_translator = EnvironmentTranslatorArm()
        self.diva_translator = EnvironmentTranslatorDiva()
        self.config = dict(m_mins=[-1.] * 68,
                           m_maxs=[1.] * 68,
                           s_mins=[-1.] * self.params["sensory_state_size"],
                           s_maxs=[1.] * self.params["sensory_state_size"])
        self.learning = Learning(
            self.config,
            sensory_state_size=self.params["sensory_state_size"],
            model_babbling=self.params["model_babbling"],
            n_motor_babbling=self.params["n_motor_babbling"],
            explo_noise=self.params["explo_noise"],
            choice_eps=self.params["choice_eps"],
            proba_imitate=self.params["proba_imitate"],
            tau=self.params["tau"])

        self.experiment_name = rospy.get_param(
            "/pobax_playground/experiment_name", "experiment")
        rospy.loginfo("Learning node will write {}".format(
            self.experiment_name))

        # Saved experiment files
        self.dir = join(self.rospack.get_path('pobax_playground'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)

        self.experiment_file = join(self.dir, self.experiment_name + '.pickle')

        starting_iteration = 0
        if isfile(self.experiment_file):
            starting_iteration = self.learning.restart_from_end_of_file(
                self.experiment_file)
        else:
            self.learning.start()
        rospy.set_param("/pobax_playground/starting_iteration",
                        starting_iteration)

        # Serving these services
        self.service_name_perceive = "/pobax_playground/learning/perceive"
        self.service_name_produce = "/pobax_playground/learning/produce"
        self.service_name_save = "/pobax_playground/learning/save"

        # Publishing these topics
        #self.pub_interests = rospy.Publisher('/pobax_playground/learning/interests', Interests, queue_size=1, latch=True)
        self.pub_focus = rospy.Publisher(
            '/pobax_playground/learning/current_focus',
            String,
            queue_size=1,
            latch=True)

        # Using these services
        self.service_name_get_perception = "/pobax_playground/perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo(
                "Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception,
                                            GetSensorialState)

    def run(self):
        rospy.Service(self.service_name_perceive, Perceive, self.cb_perceive)
        rospy.Service(self.service_name_produce, Produce, self.cb_produce)
        rospy.Service(self.service_name_save, Save, self.cb_save)
        rospy.loginfo("Learning is up!")

        rate = rospy.Rate(self.params['publish_rate'])
        while not rospy.is_shutdown():
            #self.publish()
            rate.sleep()

    '''
    def publish(self):
        interests_array = self.learning.get_normalized_interests_evolution()
        interests = Interests()
        interests.names = self.learning.get_space_names()
        interests.num_iterations = UInt32(len(interests_array))
        interests.interests = [Float32(val) for val in interests_array.flatten()]

        self.pub_interests.publish(interests)
    '''

    ################################# Service callbacks
    def cb_perceive(self, request):
        #rospy.logwarn("Aborting perception, TODO modify learning core for new sensory space")
        #return PerceiveResponse()
        s_physical = self.arm_translator.sensory_trajectory_msg_to_list(
            request)
        s_sound = self.diva_translator.sensory_trajectory_msg_to_list(request)
        rospy.loginfo("Learning node is perceiving sensory trajectory")
        #print "physical"
        #print s_physical
        #print "sound"
        #print s_sound
        success = self.learning.perceive(np.append(s_physical, s_sound))
        if not success:
            rospy.logerr("Learner could not perceive this trajectory")
        return PerceiveResponse()

    def cb_produce(self, request):
        rospy.loginfo("Learning node is requesting the current state")
        state = self.get_state(GetSensorialStateRequest()).state

        rospy.loginfo("Learning node is producing...")
        w, param_type = self.learning.produce(
            self.arm_translator.get_context(state))

        produce_msg = ProduceResponse(trajectory_type=param_type)
        if param_type == "arm":
            trajectory_matrix = self.arm_translator.w_to_trajectory(w)
            trajectory_msg = self.arm_translator.matrix_to_trajectory_msg(
                trajectory_matrix)
            produce_msg.torso_trajectory = trajectory_msg
        elif param_type == "diva":
            trajectory_matrix = self.diva_translator.w_to_trajectory(w)
            trajectory_msg = self.diva_translator.matrix_to_trajectory_msg(
                trajectory_matrix)
            produce_msg.vocal_trajectory = trajectory_msg
        else:
            rospy.logerr(
                "Learning Node received an unknown param_type when calling produce()"
            )

        self.ready_for_interaction = True
        return produce_msg

    def cb_save(self, request):
        rospy.loginfo("Learning node saving current state...")
        data = self.learning.save()
        with open(self.experiment_file, 'w') as f:
            pickle.dump(data, f)
        rospy.loginfo("Saved file (periodic save) into {}".format(
            self.experiment_file))
        return SaveResponse()
Ejemplo n.º 26
0
    def __init__(self, tau, pa, pc, gui=False, audio=False):

        self.t = 0
        self.tau = tau
        self.pa = pa
        self.pc = pc

        # SOUND CONFIG
        self.v_o = list(np.log2([500, 900]))
        self.v_y = list(np.log2([300, 1700]))
        self.v_u = list(np.log2([300, 800]))
        self.v_e = list(np.log2([400, 2200]))
        self.v_i = list(np.log2([300, 2300]))

        # Retrieve caregiver sounds and trajectories from json
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'human_sounds.pickle')) as f:
            self.full_human_motor_traj, self.full_human_sounds_traj = pickle.load(
                f)
        self.human_sounds = self.full_human_sounds_traj.keys()
        rospy.loginfo('Voice node using the word %s for culbuto name' %
                      self.human_sounds[0])

        self.vowels = dict(o=self.v_o,
                           y=self.v_y,
                           u=self.v_u,
                           e=self.v_e,
                           i=self.v_i)

        self.human_sounds = ['eyu', 'oey', 'eou', 'oyi']
        #random.shuffle(self.human_sounds)
        print "human sounds", self.human_sounds

        def compress_sound_traj(sound):
            assert (len(sound) == 100)
            f1s = sound[:50]
            f3s = sound[50:]
            return np.append(f1s[np.array([0, 12, 24, 37, 49])],
                             f3s[np.array([0, 12, 24, 37, 49])])

        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compress_sound_traj(
                self.full_human_sounds_traj[hs])
            self.human_sounds_traj_std[hs] = [
                d - 8.5 for d in self.human_sounds_traj[hs][:5]
            ] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]
        '''
        def compute_s_sound(sound):
            s1 = self.vowels[sound[0]]
            s2 = [(self.vowels[sound[0]][0] + self.vowels[sound[1]][0]) / 2., (self.vowels[sound[0]][1] + self.vowels[sound[1]][1]) / 2.]
            s3 = self.vowels[sound[1]]
            s4 = [(self.vowels[sound[1]][0] + self.vowels[sound[2]][0]) / 2., (self.vowels[sound[1]][1] + self.vowels[sound[2]][1]) / 2.]
            s5 = self.vowels[sound[2]]
            rdm = 0.0 * (2.*np.random.random((1,10))[0] - 1.)
            return list(rdm + np.array([f[0] for f in [s1, s2, s3, s4, s5]] + [f[1] for f in [s1, s2, s3, s4, s5]]))
        
        
        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compute_s_sound(hs)
            self.human_sounds_traj_std[hs] = [d - 8.5 for d in self.human_sounds_traj[hs][:5]] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]    
        '''

        self.diva_traj = None
        self.produced_sound = None
        self.sound_tol = 0.4

        self.audio = audio

        self.time_arm = 0.
        self.time_diva = 0.
        self.time_arm_per_it = 0.
        self.time_diva_per_it = 0.

        self.n_dmps = 7  #Quick and Dirty, todo json
        self.timesteps = 50  #Quick and Dirty, todo json

        # DIVA CONFIG
        diva_cfg = dict(
            m_mins=np.array([-1, -1, -1, -1, -1, -1, -1]),
            m_maxs=np.array([1, 1, 1, 1, 1, 1, 1]),
            s_mins=np.array([7.5, 9.25]),
            s_maxs=np.array([9.5, 11.25]),
            m_used=range(7),
            s_used=range(1, 3),
            rest_position_diva=list([0] * 7),
            audio=self.audio,
            diva_use_initial=True,
            diva_use_goal=True,
            used_diva=list([True] * 7),
            n_dmps_diva=self.n_dmps,
            n_bfs_diva=2,
            move_steps=self.timesteps,
        )
        # Init Diva
        self.diva = Diva(**diva_cfg)
Ejemplo n.º 27
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'learning.json')) as f:
            self.params = json.load(f)
        #with open(join(self.rospack.get_path('pobax_playground'), 'config', 'dmps.json')) as f:
        #    self.dmps_params = json.load(f)

        self.arm_translator = EnvironmentTranslatorArm()
        self.diva_translator = EnvironmentTranslatorDiva()
        self.config = dict(m_mins=[-1.] * 68,
                           m_maxs=[1.] * 68,
                           s_mins=[-1.] * self.params["sensory_state_size"],
                           s_maxs=[1.] * self.params["sensory_state_size"])
        self.learning = Learning(
            self.config,
            sensory_state_size=self.params["sensory_state_size"],
            model_babbling=self.params["model_babbling"],
            n_motor_babbling=self.params["n_motor_babbling"],
            explo_noise=self.params["explo_noise"],
            choice_eps=self.params["choice_eps"],
            proba_imitate=self.params["proba_imitate"],
            tau=self.params["tau"])

        self.experiment_name = rospy.get_param(
            "/pobax_playground/experiment_name", "experiment")
        rospy.loginfo("Learning node will write {}".format(
            self.experiment_name))

        # Saved experiment files
        self.dir = join(self.rospack.get_path('pobax_playground'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)

        self.experiment_file = join(self.dir, self.experiment_name + '.pickle')

        starting_iteration = 0
        if isfile(self.experiment_file):
            starting_iteration = self.learning.restart_from_end_of_file(
                self.experiment_file)
        else:
            self.learning.start()
        rospy.set_param("/pobax_playground/starting_iteration",
                        starting_iteration)

        # Serving these services
        self.service_name_perceive = "/pobax_playground/learning/perceive"
        self.service_name_produce = "/pobax_playground/learning/produce"
        self.service_name_save = "/pobax_playground/learning/save"

        # Publishing these topics
        #self.pub_interests = rospy.Publisher('/pobax_playground/learning/interests', Interests, queue_size=1, latch=True)
        self.pub_focus = rospy.Publisher(
            '/pobax_playground/learning/current_focus',
            String,
            queue_size=1,
            latch=True)

        # Using these services
        self.service_name_get_perception = "/pobax_playground/perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo(
                "Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception,
                                            GetSensorialState)
Ejemplo n.º 28
0
class CogSci2017Environment(Environment):
    def __init__(self, gui=False, audio=False):

        self.t = 0

        # ARM CONFIG

        arm_cfg = dict(m_mins=[-1.] * 3 * 7,
                       m_maxs=[1.] * 3 * 7,
                       s_mins=[-1.] * 3 * 50,
                       s_maxs=[1.] * 3 * 50,
                       lengths=[0.5, 0.3, 0.2],
                       angle_shift=0.5,
                       rest_state=[0., 0., 0.],
                       n_dmps=3,
                       n_bfs=6,
                       timesteps=50,
                       gui=gui)

        # SOUND CONFIG

        self.v_o = list(np.log2([500, 900]))
        self.v_y = list(np.log2([300, 1700]))
        self.v_u = list(np.log2([300, 800]))
        self.v_e = list(np.log2([400, 2200]))
        self.v_i = list(np.log2([300, 2300]))

        self.vowels = dict(o=self.v_o,
                           y=self.v_y,
                           u=self.v_u,
                           e=self.v_e,
                           i=self.v_i)

        # Retrieve caregiver sounds and trajectories from json
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'human_sounds.pickle')) as f:
            self.full_human_motor_traj, self.full_human_sounds_traj = pickle.load(
                f)
        self.human_sounds = self.full_human_sounds_traj.keys()
        rospy.loginfo('Voice node using the word %s for culbuto name' %
                      self.human_sounds[0])

        def compress_sound_traj(sound):
            assert (len(sound) == 100)
            f1s = sound[:50]
            f3s = sound[50:]
            return np.append(f1s[np.array([0, 12, 24, 37, 49])],
                             f3s[np.array([0, 12, 24, 37, 49])])

        #reduce number of sounds
        self.human_sounds = ['eyu', 'oey', 'eou', 'oyi']
        #random.shuffle(self.human_sounds)

        print self.human_sounds
        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compress_sound_traj(
                self.full_human_sounds_traj[hs])
            self.human_sounds_traj_std[hs] = [
                d - 8.5 for d in self.human_sounds_traj[hs][:5]
            ] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]

        self.sound_tol = 0.4

        # DIVA CONFIG

        diva_cfg = dict(
            m_mins=np.array([-1, -1, -1, -1, -1, -1, -1]),
            m_maxs=np.array([1, 1, 1, 1, 1, 1, 1]),
            s_mins=np.array([7.5, 9.25]),
            s_maxs=np.array([9.5, 11.25]),
            m_used=range(7),
            s_used=range(1, 3),
            rest_position_diva=list([0] * 7),
            audio=audio,
            diva_use_initial=True,
            diva_use_goal=True,
            used_diva=list([True] * 7),
            n_dmps_diva=7,
            n_bfs_diva=2,
            move_steps=50,
        )

        self.arm = ArmEnvironment(**arm_cfg)
        self.diva = DivaEnvironment(**diva_cfg)

        self.timesteps = 50

        # OBJECTS CONFIG

        self.caregiver_gives_obj_factor = 0.01
        self.tool_length = 0.5
        self.handle_tol = 0.2
        self.handle_tol_sq = self.handle_tol * self.handle_tol
        self.handle_noise = 0.
        self.object_tol_hand = 0.2
        self.object_tol_hand_sq = self.object_tol_hand * self.object_tol_hand
        self.object_tol_tool = 0.2
        self.object_tol_tool_sq = self.object_tol_tool * self.object_tol_tool

        self.diva_traj = None
        self.produced_sound = None

        Environment.__init__(self,
                             m_mins=[-1.] * (21 + 28),
                             m_maxs=[1.] * (21 + 28),
                             s_mins=[-1.] * 56,
                             s_maxs=[1.] * 56)

        self.current_tool = [-0.5, 0., 0.5, 0.]
        self.current_toy1 = [0.5, 0.5, 0.]
        #self.current_toy2 = [0.7, 0.7, 0.]
        #self.current_toy3 = [0.9, 0.9, 0.]
        self.current_caregiver = [0., 1.7]
        self.reset()
        self.compute_tool()

        self.purge_logs()

        if self.arm.gui:
            self.init_plot()

        self.count_diva = 0
        self.count_arm = 0
        self.count_tool = 0
        self.count_toy1_by_tool = 0
        #self.count_toy2_by_tool = 0
        #self.count_toy3_by_tool = 0
        self.count_toy1_by_hand = 0
        #self.count_toy2_by_hand = 0
        #self.count_toy3_by_hand = 0
        self.count_parent_give_label = 0
        self.count_parent_give_object = 0
        self.count_produced_sounds = {}
        for hs in self.human_sounds:
            self.count_produced_sounds[hs] = 0

        self.time_arm = 0.
        self.time_diva = 0.
        self.time_arm_per_it = 0.
        self.time_diva_per_it = 0.

    def save(self):
        return dict(
            t=self.t,
            human_sounds=self.human_sounds,
            best_vocal_errors=self.best_vocal_errors,
            best_vocal_errors_evolution=self.best_vocal_errors_evolution,
            count_diva=self.count_diva,
            count_arm=self.count_arm,
            count_tool=self.count_tool,
            count_toy1_by_tool=self.count_toy1_by_tool,
            #count_toy2_by_tool=self.count_toy2_by_tool,
            #count_toy3_by_tool=self.count_toy3_by_tool,
            count_toy1_by_hand=self.count_toy1_by_hand,
            #count_toy2_by_hand=self.count_toy2_by_hand,
            #count_toy3_by_hand=self.count_toy3_by_hand,
            count_parent_give_label=self.count_parent_give_label,
            count_parent_give_object=self.count_parent_give_object,
            count_produced_sounds=self.count_produced_sounds,
        )

    def reset(self):

        if self.t % 20 == 0:
            self.reset_toys()

        self.current_tool[3] = 0.
        self.current_toy1[2] = 0.
        #self.current_toy2[2] = 0.
        #self.current_toy3[2] = 0.
        self.reset_caregiver()
        self.current_context = self.get_current_context()

        self.hand = []
        self.tool = []
        self.toy1 = []
        #self.toy2 = []
        #self.toy3 = []
        self.caregiver = []

    def purge_logs(self):
        self.logs_tool = []
        self.logs_toy1 = []
        #self.logs_toy2 = []
        #self.logs_toy3 = []
        self.logs_caregiver = []

    def reset_tool(self):
        self.current_tool[:2] = self.reset_rand2d(region=1)
        self.current_tool[2] = np.random.random()
        self.compute_tool()

    def set_tool(self, pos, angle):
        self.current_tool[:2] = pos
        self.current_tool[2] = angle
        self.compute_tool()

    def reset_toys(self, region=0):
        self.current_toy1[:2] = self.reset_rand2d(region=region)
        #self.current_toy2[:2] = self.reset_rand2d(region=region)
        #self.current_toy3[:2] = self.reset_rand2d(region=region)

    def set_toys(self, pos1, pos2, pos3):
        self.current_toy1[:2] = pos1
        #self.current_toy2[:2] = pos2
        #self.current_toy3[:2] = pos3

    def reset_caregiver(self):
        self.current_caregiver = self.reset_rand2d()

    def set_caregiver(self, pos):
        self.current_caregiver = pos

    def reset_rand2d(self, region=None):
        if region == 0:
            rdm = np.random.random()
            if rdm < 1. / 3.:
                return self.reset_rand2d(region=1)
            elif rdm < 2. / 3.:
                return self.reset_rand2d(region=2)
            else:
                return self.reset_rand2d(region=3)
        elif region == 1:
            alpha = 2. * np.pi * np.random.random()
            r = np.random.random()
            return [r * np.cos(alpha), r * np.sin(alpha)]
        elif region == 2:
            alpha = 2. * np.pi * np.random.random()
            r = 1. + 0.5 * np.random.random()
            return [r * np.cos(alpha), r * np.sin(alpha)]
        elif region == 3:
            alpha = 2. * np.pi * np.random.random()
            r = 1.5 + 0.5 * np.random.random()
            return [r * np.cos(alpha), r * np.sin(alpha)]
        elif region is None:
            return [4. * np.random.random() - 2., 4. * np.random.random() - 2.]

    def get_current_context(self):
        return [
            d / 2. for d in self.current_tool[:2] + self.current_toy1[:2] +
            self.current_caregiver
        ]

    def compute_tool(self):
        a = np.pi * self.current_tool[2]
        self.tool_end_pos = [
            self.current_tool[0] + np.cos(a) * self.tool_length,
            self.current_tool[1] + np.sin(a) * self.tool_length
        ]

    def compute_motor_command(self, m_ag):
        return bounds_min_max(m_ag, self.conf.m_mins, self.conf.m_maxs)

    def motor_babbling(self, arm=False, audio=False):
        m = rand_bounds(self.conf.m_bounds)[0]
        if arm:
            r = 1.
        elif audio:
            r = 0.
        else:
            r = np.random.random()
        if r < 0.5:
            m[:21] = 0.
        else:
            m[21:] = 0.
        return m

    def is_hand_free(self):
        return self.current_tool[3] == 0 and (not self.current_toy1[2] == 1)

    def is_tool_free(self):
        return (not self.current_toy1[2] == 2)

    def give_label(self, toy):

        if toy == "toy1":
            #print "Caregiver says", self.human_sounds[0]
            return self.human_sounds_traj[self.human_sounds[0]]
        elif toy == "random":
            sound_id = np.random.choice([1, 2, 3])
            #print "Caregiver says", self.human_sounds[sound_id]
            return self.human_sounds_traj[self.human_sounds[sound_id]]
        else:
            raise NotImplementedError

    def analysis_sound(self, diva_traj):
        #return self.human_sounds[2]
        best = None
        is_best_found = False
        best_best = None
        best_best_sound_tol = 999
        for hs in self.human_sounds:
            error = np.linalg.norm(
                np.array(self.human_sounds_traj[hs]) -
                np.array([f[0] for f in diva_traj[[0, 12, 24, 37, 49]]] +
                         [f[1] for f in diva_traj[[0, 12, 24, 37, 49]]]))
            if error < self.best_vocal_errors[hs]:
                self.best_vocal_errors[hs] = error
            if error < self.sound_tol:
                if not is_best_found:
                    best = hs
                    best_found = True
                if error < best_best_sound_tol:
                    best_best = hs
                    best_best_sound_tol = error
        if best:
            if not best_best == best:
                print "########### I WAS RIGHT #########, best is " + best + "but best best is " + best_best
            print "***********Agent says", best
            return best
        else:
            return None

    def caregiver_moves_obj(self, caregiver_pos, current_toy):
        middle = [caregiver_pos[0] / 2, caregiver_pos[1] / 2]
        return [
            current_toy[0] + self.caregiver_gives_obj_factor *
            (middle[0] - current_toy[0]), current_toy[1] +
            self.caregiver_gives_obj_factor * (middle[1] - current_toy[1]),
            current_toy[2]
        ]

    def compute_sensori_effect(self, m):
        t = time.time()

        m_arm = m[:21]
        m_diva = m[21:]

        assert np.linalg.norm(m_arm) * np.linalg.norm(m_diva) == 0.

        if np.linalg.norm(m_arm) > 0.:
            cmd = "arm"
        else:
            cmd = "diva"

        self.purge_logs()

        arm_traj = self.arm.update(m_arm)
        #print "arm traj", arm_traj

        if cmd == "diva":
            diva_traj = self.diva.update(m_diva)
            self.diva_traj = diva_traj
            self.produced_sound = self.analysis_sound(self.diva_traj)
            if self.produced_sound is not None:
                self.count_produced_sounds[self.produced_sound] += 1
                if self.produced_sound in self.human_sounds[:3]:
                    self.count_parent_give_object += 1
        else:
            diva_traj = np.zeros((50, 2))
            self.produced_sound = None

        for i in range(self.timesteps):

            # Arm
            arm_x, arm_y, arm_angle = arm_traj[i]

            # Tool
            if not self.current_tool[3]:
                if self.is_hand_free() and (
                    (arm_x - self.current_tool[0])**2. +
                    (arm_y - self.current_tool[1])**2. < self.handle_tol_sq):
                    self.current_tool[0] = arm_x
                    self.current_tool[1] = arm_y
                    self.current_tool[2] = np.mod(
                        arm_angle + self.handle_noise * np.random.randn() + 1,
                        2) - 1
                    self.compute_tool()
                    self.current_tool[3] = 1
            else:
                self.current_tool[0] = arm_x
                self.current_tool[1] = arm_y
                self.current_tool[2] = np.mod(
                    arm_angle + self.handle_noise * np.random.randn() + 1,
                    2) - 1
                self.compute_tool()
            self.logs_tool.append([
                self.current_tool[:2], self.current_tool[2], self.tool_end_pos,
                self.current_tool[3]
            ])

            if cmd == "arm":
                # Toy 1
                if self.current_toy1[2] == 1 or (self.is_hand_free() and (
                    (arm_x - self.current_toy1[0])**2 +
                    (arm_y - self.current_toy1[1])**2 <
                        self.object_tol_hand_sq)):
                    self.current_toy1[0] = arm_x
                    self.current_toy1[1] = arm_y
                    self.current_toy1[2] = 1
                if self.current_toy1[2] == 2 or (
                    (not self.current_toy1[2] == 1) and self.is_tool_free() and
                    ((self.tool_end_pos[0] - self.current_toy1[0])**2 +
                     (self.tool_end_pos[1] - self.current_toy1[1])**2 <
                     self.object_tol_tool_sq)):
                    self.current_toy1[0] = self.tool_end_pos[0]
                    self.current_toy1[1] = self.tool_end_pos[1]
                    self.current_toy1[2] = 2
                self.logs_toy1.append([self.current_toy1])

                self.logs_caregiver.append([self.current_caregiver])

            else:
                # parent gives object if label is produced
                if self.produced_sound == self.human_sounds[0]:
                    self.current_toy1 = self.caregiver_moves_obj(
                        self.current_caregiver, self.current_toy1)

                self.logs_toy1.append([self.current_toy1])
                self.logs_caregiver.append([self.current_caregiver])

            if i in [0, 12, 24, 37, 49]:
                self.hand = self.hand + [arm_x, arm_y]
                self.tool = self.tool + self.current_tool[:2]
                self.toy1 = self.toy1 + self.current_toy1[:2]
                self.caregiver = self.caregiver + self.current_caregiver

            if self.arm.gui:
                if i % 5 == 0:
                    self.plot_step(self.ax, i)

        if cmd == "arm":
            # parent gives label if object is touched by hand
            if self.current_toy1[2] == 1:
                label = self.give_label("toy1")
            else:
                label = self.give_label("random")
            self.sound = label
            #print "parent sound", label, self.sound
        else:
            self.sound = [
                f for formants in diva_traj[[0, 12, 24, 37, 49]]
                for f in formants
            ]
            self.sound = self.sound[0::2] + self.sound[1::2]

        # Sort dims
        self.hand = self.hand[0::2] + self.hand[1::2]
        self.tool = self.tool[0::2] + self.tool[1::2]
        self.toy1 = self.toy1[0::2] + self.toy1[1::2]
        self.caregiver = self.caregiver[0::2] + self.caregiver[1::2]
        #self.sound = self.sound[0::2] + self.sound[1::2]

        # Analysis
        if np.linalg.norm(m[21:]) > 0:
            self.count_diva += 1
        else:
            self.count_arm += 1
        if self.current_tool[3]:
            self.count_tool += 1
        if self.current_toy1[2] == 1:
            self.count_toy1_by_hand += 1
        elif self.current_tool[3] and self.current_toy1[2] == 2:
            self.count_toy1_by_tool += 1

        self.count_parent_give_label = self.count_toy1_by_hand

        if cmd == "arm":
            self.time_arm += time.time() - t
            self.time_arm_per_it = self.time_arm / self.count_arm
        else:
            self.time_diva += time.time() - t
            self.time_diva_per_it = self.time_diva / self.count_diva

        #print "previous context", len(self.current_context), self.current_context
        #print "s_hand", len(self.hand), self.hand
        #print "s_tool", len(self.tool), self.tool
        #print "s_toy1", len(self.toy1), self.toy1
        #print "s_toy2", len(self.toy2), self.toy2
        #print "s_toy3", len(self.toy3), self.toy3
        #print "s_sound", len(self.sound), self.sound
        #print "s_caregiver", len(self.caregiver), self.caregiver

        self.t += 1
        if self.t % 100 == 0:
            self.best_vocal_errors_evolution += [self.best_vocal_errors.copy()]
        if self.t % 1000 == 0:
            print "best_vocal_errors", [(hs, self.best_vocal_errors[hs])
                                        for hs in self.human_sounds]

        context = self.current_context

        # MAP TO STD INTERVAL
        hand = [d / 2 for d in self.hand]
        tool = [d / 2 for d in self.tool]
        toy1 = [d / 2 for d in self.toy1]
        sound = [d - 8.5 for d in self.sound[:5]
                 ] + [d - 10.25 for d in self.sound[5:]]
        caregiver = [d / 2 for d in self.caregiver]

        s = context + hand + tool + toy1 + sound + caregiver
        #print "s_sound", sound
        return bounds_min_max(s, self.conf.s_mins, self.conf.s_maxs)

    def update(self, m_ag, reset=True, log=True):
        """ Computes sensorimotor values from motor orders.

        :param numpy.array m_ag: a motor command with shape (self.conf.m_ndims, ) or a set of n motor commands of shape (n, self.conf.m_ndims)

        :param bool log: emit the motor and sensory values for logging purpose (default: True).

        :returns: an array of shape (self.conf.ndims, ) or (n, self.conf.ndims) according to the shape of the m_ag parameter, containing the motor values (which can be different from m_ag, e.g. bounded according to self.conf.m_bounds) and the corresponding sensory values.

        .. note:: self.conf.ndims = self.conf.m_ndims + self.conf.s_ndims is the dimensionality of the sensorimotor space (dim of the motor space + dim of the sensory space).
        """

        if len(np.array(m_ag).shape) == 1:
            s = self.one_update(m_ag, log)
        else:
            s = []
            for m in m_ag:
                s.append(self.one_update(m, log))
            s = np.array(s)
        if reset:
            self.reset()
        return s

    def print_stats(self):
        print "\n----------------------\nEnvironment Statistics\n----------------------\n"
        print "# Iterations:", self.t
        print "# Arm trials:", self.count_arm
        print "# Vocal trials:", self.count_diva
        print "# Tool actions:", self.count_tool
        print "# toy sounds:", self.human_sounds[0], self.human_sounds[
            1], self.human_sounds[2]
        print "# Produced sounds:", self.count_produced_sounds
        print "# Toy1 was reached by tool:", self.count_toy1_by_tool
        print "# Toy1 was reached by hand:", self.count_toy1_by_hand
        print "# Parent gave vocal labels:", self.count_parent_give_label
        print "# Parent gave object:", self.count_parent_give_object
        print

    def init_plot(self):
        #plt.ion()
        self.ax = plt.subplot()
        plt.gcf().set_size_inches(6., 6., forward=True)
        plt.gca().set_aspect('equal')
        #plt.show(block=False)
        plt.axis([-2, 2, -2, 2])
        plt.draw()

    def plot_tool_step(self, ax, i, **kwargs_plot):
        handle_pos = self.logs_tool[i][0]
        end_pos = self.logs_tool[i][2]

        ax.plot([handle_pos[0], end_pos[0]], [handle_pos[1], end_pos[1]],
                '-',
                color=colors_config['stick'],
                lw=6,
                **kwargs_plot)
        ax.plot(handle_pos[0],
                handle_pos[1],
                'o',
                color=colors_config['gripper'],
                ms=12,
                **kwargs_plot)
        ax.plot(end_pos[0],
                end_pos[1],
                'o',
                color=colors_config['magnetic'],
                ms=12,
                **kwargs_plot)

    def plot_toy1_step(self, ax, i, **kwargs_plot):
        pos = self.logs_toy1[i][0]
        rectangle = plt.Rectangle((pos[0] - 0.1, pos[1] - 0.1),
                                  0.2,
                                  0.2,
                                  color=colors[3],
                                  **kwargs_plot)
        ax.add_patch(rectangle)

    def plot_caregiver_step(self, ax, i, **kwargs_plot):
        pos = self.logs_caregiver[i][0]
        rectangle = plt.Rectangle((pos[0] - 0.1, pos[1] - 0.1),
                                  0.2,
                                  0.2,
                                  color="black",
                                  **kwargs_plot)
        ax.add_patch(rectangle)

    def plot_step(self, ax, i, clean=True, **kwargs_plot):
        #t0 = time.time()
        plt.pause(0.0001)
        #print "t1", time.time() - t0
        if clean:
            plt.cla()
        #print "t2", time.time() - t0
        self.arm.plot_step(ax, i, **kwargs_plot)
        self.plot_tool_step(ax, i, **kwargs_plot)
        self.plot_toy1_step(ax, i, **kwargs_plot)
        self.plot_caregiver_step(ax, i, **kwargs_plot)
        #print "t3", time.time() - t0

        if clean:
            plt.xlim([-2, 2])
            plt.ylim([-2, 2])
            plt.gca().set_xticklabels([])
            plt.gca().set_yticklabels([])
            plt.gca().xaxis.set_major_locator(plt.NullLocator())
            plt.gca().yaxis.set_major_locator(plt.NullLocator())
            plt.xlabel("")
            plt.ylabel("")

        plt.draw()
        plt.show(block=False)