Пример #1
0
    def __init__(self):
        # Register Ctrl-C sigint
        signal.signal(signal.SIGINT, self._signal_handler)
        rpack = RosPack()
        pkg_path = rpack.get_path('dialogflow_ros')

        rospy.Subscriber('/bat_charge', Float64, self.cbk_bat_charge)

        # -------------------------------------------------- #
        #  COSTRUISCO IL PANNELLO
        # ---------------------------------------------- #
        global app
        app = QApplication(sys.argv)
        #MainWindow = AppMainWindow()
        win = QMainWindow()
        win.resize(800, 600)

        BtnLayout = QtWidgets.QHBoxLayout()
        self.PlusButton = QtWidgets.QPushButton('+')
        self.MinusButton = QtWidgets.QPushButton('-')
        BtnLayout.addWidget(self.PlusButton)
        BtnLayout.addWidget(self.MinusButton)

        button = QPushButton('Click')

        button.clicked.connect(self.on_button_clicked)
        button.show()

        app.exec_()
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'learning.json')) as f:
            self.params = json.load(f)

        self.translator = EnvironmentTranslator()
        self.learning = None

        # User control and critical resources
        self.lock_iteration = RLock()
        self.ready_for_interaction = True
        self.focus = None
        self.set_iteration = -1

        # Saved experiment files
        self.dir = join(self.rospack.get_path('apex_playground'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)
        # self.stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        # self.source_file = join(self.dir, self.source_name + '.pickle')
        self.main_experiment = True
        self.condition = ""
        self.trial = ""
        self.task = ""

        # Serving these services
        self.service_name_perceive = "learning/perceive"
        self.service_name_produce = "learning/produce"
        self.service_name_set_interest = "learning/set_interest"
        self.service_name_set_iteration = "learning/set_iteration"
        self.service_name_interests = "learning/get_interests"

        # Publishing these topics
        self.pub_focus = rospy.Publisher('learning/current_focus',
                                         String,
                                         queue_size=1,
                                         latch=True)
        self.pub_user_focus = rospy.Publisher('learning/user_focus',
                                              String,
                                              queue_size=1,
                                              latch=True)
        self.pub_ready = rospy.Publisher('learning/ready_for_interaction',
                                         Bool,
                                         queue_size=1,
                                         latch=True)
        self.pub_iteration = rospy.Publisher('iteration',
                                             UInt32,
                                             queue_size=1,
                                             latch=True)

        # Using these services
        self.service_name_get_perception = "perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo(
                "Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception,
                                            GetSensorialState)
Пример #3
0
    def __init__(self):
        self.interrupted = False
        self.detector = None
        rpack = RosPack()
        # UMDL or PMDL file paths along with audio files
        pkg_path = rpack.get_path('dialogflow_ros')

        #self.model_path = pkg_path + '/scripts/snowboy/resources/jarvis.umdl'
        self.model_path = pkg_path + '/scripts/snowboy/resources/OK_Robot.pmdl'
        print("Hotoword: {}".format(self.model_path))

        ding_path = pkg_path + '/scripts/snowboy/resources/ding.wav'
        # Setup df
        self.df_client = None
        # Setup audio output
        ding = wave.open(ding_path, 'rb')
        self.ding_data = ding.readframes(ding.getnframes())
        self.audio = pyaudio.PyAudio()
        self.stream_out = self.audio.open(
            format=self.audio.get_format_from_width(ding.getsampwidth()),
            channels=ding.getnchannels(),
            rate=ding.getframerate(),
            input=False,
            output=True)
        self.last_contexts = []
        rospy.loginfo("HOTWORD_CLIENT: Ready!")
Пример #4
0
 def __init__(self, filename=None, ros_pack=None):
     rp = ros_pack or RosPack()
     qtgui_path = rp.get_path('qt_gui')
     super(Main, self).__init__(qtgui_path,
                                invoked_filename=filename,
                                settings_filename='rqt_gui')
     self._ros_pack = rp
Пример #5
0
    def __init__(self):
        self.rospack = RosPack()
        with open(join(self.rospack.get_path('nips2016'), 'config', 'learning.json')) as f:
            self.params = json.load(f)

        self.translator = EnvironmentTranslator()
        self.learning = Learning(self.translator.config, 
                                 n_motor_babbling=self.params["n_motor_babbling"], 
                                 explo_noise=self.params["explo_noise"], 
                                 choice_eps=self.params["choice_eps"], 
                                 enable_hand=self.params["enable_hand"],
                                 normalize_interests=self.params["normalize_interests"])
        self.experiment_name = rospy.get_param("/nips2016/experiment_name", "experiment")
        # self.source_name = rospy.get_param("/nips2016/source_name", "experiment")

        rospy.loginfo("Learning node will write {}".format(self.experiment_name))
        # rospy.loginfo("Learning node will read {}".format(self.source_name))

        # User control and critical resources
        self.lock_iteration = RLock()
        self.ready_for_interaction = True
        self.focus = None
        self.set_iteration = -1
        self.demonstrate = None

        # Saved experiment files
        self.dir = join(self.rospack.get_path('nips2016'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)
        # self.stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        # self.source_file = join(self.dir, self.source_name + '.pickle')
        self.experiment_file = join(self.dir, self.experiment_name + '.pickle') # if self.source_name == "none" else self.source_file
        self.main_experiment = True

        if isfile(self.experiment_file):
            self.learning.restart_from_end_of_file(self.experiment_file)
        else:
            self.learning.start()

        # Serving these services
        self.service_name_perceive = "/nips2016/learning/perceive"
        self.service_name_produce = "/nips2016/learning/produce"
        self.service_name_set_interest = "/nips2016/learning/set_interest"
        self.service_name_set_iteration = "/nips2016/learning/set_iteration"
        self.service_name_demonstrate = "/nips2016/learning/assess"

        # Publishing these topics
        self.pub_interests = rospy.Publisher('/nips2016/learning/interests', Interests, queue_size=1, latch=True)
        self.pub_focus = rospy.Publisher('/nips2016/learning/current_focus', String, queue_size=1, latch=True)
        self.pub_user_focus = rospy.Publisher('/nips2016/learning/user_focus', String, queue_size=1, latch=True)
        self.pub_ready = rospy.Publisher('/nips2016/learning/ready_for_interaction', Bool, queue_size=1, latch=True)
        self.pub_iteration = rospy.Publisher('/nips2016/iteration', UInt32, queue_size=1, latch=True)

        # Using these services
        self.service_name_get_perception = "/nips2016/perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo("Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception, GetSensorialState)
Пример #6
0
 def create_application(self, argv):
     from python_qt_binding.QtGui import QIcon
     app = super(Main, self).create_application(argv)
     rp = RosPack()
     logo = os.path.join(rp.get_path('rqt_gui'), 'resource', 'rqt.svg')
     icon = QIcon(logo)
     app.setWindowIcon(icon)
     return app
Пример #7
0
def handle_rwt_srv(req):
    srv = rosmsg.MODE_SRV
    pkg = RosPack()
    package = rosmsg.iterate_packages(pkg, srv)
    messageList = []
    for pack in package:
        service, path = pack
        message = rosmsg.list_srvs(service)
        messageList.append(str(message))
    return SrvListResponse(messageList)
Пример #8
0
def handle_rwt_action(req):
    action = rosaction.MODE_ACTION
    pkg = RosPack()
    package = rosaction.iterate_packages(pkg, action)
    messageList = []
    for pack in package:
        action, path = pack
        message = rosmsg.list_msgs(action)
        messageList.append(str(message))
    return ActionListResponse(messageList)
    def __init__(self, title='Options', description=None):
        super(SimpleSettingsDialog, self).__init__()
        self.setObjectName('SimpleSettingsDialog')

        rp = RosPack()
        ui_file = os.path.join(rp.get_path('qt_gui_py_common'), 'resource', 'simple_settings_dialog.ui')
        loadUi(ui_file, self)

        self.setWindowTitle(title)
        self._settings_groups = []

        if description is not None:
            self.add_label(description)
Пример #10
0
    def __init__(self, title='Options', description=None):
        super(SimpleSettingsDialog, self).__init__()
        self.setObjectName('SimpleSettingsDialog')

        rp = RosPack()
        ui_file = os.path.join(rp.get_path('rqt_quaternion_view'), 'resource',
                               'QuaternionViewOptions.ui')
        loadUi(ui_file, self)

        self.setWindowTitle(title)

        if description is not None:
            self.add_label(description)
Пример #11
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('nips2016'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "/nips2016/perception/get"
        self.service_name_record = "/nips2016/perception/record"
        # Using these services
        self.service_name_set_compliant = "/nips2016/torso/set_compliant"
        self.topics = TopicAggregator(
        )  # All topics are read and stored in that object
Пример #12
0
    def __init__(self):
        self._action_name = '/pobax_playground/perception/record_server'
        self._as = actionlib.SimpleActionServer(self._action_name,
                                                RecordAction,
                                                execute_cb=self.execute_cb,
                                                auto_start=False)
        self._as.start()
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        self.topics = TopicAggregator()
Пример #13
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'perception.json')) as f:
            self.params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "/pobax_playground/perception/get"
        self.service_name_record = "/pobax_playground/perception/record"
        #self.service_name_baxter_start_record = "/pobax_playground/perception/baxter_start_record"
        #self.service_name_baxter_stop_record = "/pobax_playground/perception/baxter_stop_record"
        # Using these services
        self.topics = TopicAggregator(
        )  # All topics are read and stored in that object
Пример #14
0
    def __init__(self):
        self.rospack = RosPack()

        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'voice.json')) as f:
            self.params = json.load(f)

        # Serving these services
        self.service_name_execute_analyse = "/pobax_playground/voice/execute_analyse"
        self.service_name_baxter_analyse = "/pobax_playground/voice/baxter_analyse"

        # Init DIVA vocal tract simulator
        self.voice = Voice(self.params["tau"],
                           self.params["pa"],
                           self.params["pc"],
                           gui=self.params["gui"],
                           audio=self.params["audio"])
Пример #15
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'general.json')) as f:
            self.params = json.load(f)
        with open(
                join(self.rospack.get_path('apex_playground'), 'config',
                     'torso.json')) as f:
            self.torso_params = json.load(f)
        self.rate = rospy.Rate(self.params['recording_rate'])

        # Serving these services
        self.service_name_get = "perception/get"
        self.service_name_record = "perception/record"
        # Using these services
        self.service_name_set_compliant = "{}/left_arm/set_compliant".format(
            self.torso_params["robot_name"])
        self.topics = PerceptionServices(
        )  # All topics are read and stored in that object
Пример #16
0
from std_msgs.msg import String, Float64, Bool
from geometry_msgs.msg import Twist

# ROS-Dialogflow
from dialogflow_ros.msg import *

#esegue i comandi riconosciuti da DialogFlow
from dialogflow_mycommands import DialogflowCommandExecutor

# Use to convert Struct messages to JSON
# from google.protobuf.json_format import MessageToJson
sys.path.append('/opt/ros/melodic/lib/python2.7/dist-packages'
                )  # in order to import cv2 under python3

rpack = RosPack()
# UMDL or PMDL file paths along with audio files
import wave
pkg_path = rpack.get_path('dialogflow_ros')
ding_path = pkg_path + '/scripts/snowboy/resources/ding.wav'
ding = wave.open(ding_path, 'rb')


def isNumber(s):
    try:
        float(s)
        return True
    except ValueError:
        return False

Пример #17
0
    def __init__(self, tau, pa, pc, gui=False, audio=False):

        self.t = 0
        self.tau = tau
        self.pa = pa
        self.pc = pc

        # SOUND CONFIG
        self.v_o = list(np.log2([500, 900]))
        self.v_y = list(np.log2([300, 1700]))
        self.v_u = list(np.log2([300, 800]))
        self.v_e = list(np.log2([400, 2200]))
        self.v_i = list(np.log2([300, 2300]))

        # Retrieve caregiver sounds and trajectories from json
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'human_sounds.pickle')) as f:
            self.full_human_motor_traj, self.full_human_sounds_traj = pickle.load(
                f)
        self.human_sounds = self.full_human_sounds_traj.keys()
        rospy.loginfo('Voice node using the word %s for culbuto name' %
                      self.human_sounds[0])

        self.vowels = dict(o=self.v_o,
                           y=self.v_y,
                           u=self.v_u,
                           e=self.v_e,
                           i=self.v_i)

        self.human_sounds = ['eyu', 'oey', 'eou', 'oyi']
        #random.shuffle(self.human_sounds)
        print "human sounds", self.human_sounds

        def compress_sound_traj(sound):
            assert (len(sound) == 100)
            f1s = sound[:50]
            f3s = sound[50:]
            return np.append(f1s[np.array([0, 12, 24, 37, 49])],
                             f3s[np.array([0, 12, 24, 37, 49])])

        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compress_sound_traj(
                self.full_human_sounds_traj[hs])
            self.human_sounds_traj_std[hs] = [
                d - 8.5 for d in self.human_sounds_traj[hs][:5]
            ] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]
        '''
        def compute_s_sound(sound):
            s1 = self.vowels[sound[0]]
            s2 = [(self.vowels[sound[0]][0] + self.vowels[sound[1]][0]) / 2., (self.vowels[sound[0]][1] + self.vowels[sound[1]][1]) / 2.]
            s3 = self.vowels[sound[1]]
            s4 = [(self.vowels[sound[1]][0] + self.vowels[sound[2]][0]) / 2., (self.vowels[sound[1]][1] + self.vowels[sound[2]][1]) / 2.]
            s5 = self.vowels[sound[2]]
            rdm = 0.0 * (2.*np.random.random((1,10))[0] - 1.)
            return list(rdm + np.array([f[0] for f in [s1, s2, s3, s4, s5]] + [f[1] for f in [s1, s2, s3, s4, s5]]))
        
        
        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compute_s_sound(hs)
            self.human_sounds_traj_std[hs] = [d - 8.5 for d in self.human_sounds_traj[hs][:5]] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]    
        '''

        self.diva_traj = None
        self.produced_sound = None
        self.sound_tol = 0.4

        self.audio = audio

        self.time_arm = 0.
        self.time_diva = 0.
        self.time_arm_per_it = 0.
        self.time_diva_per_it = 0.

        self.n_dmps = 7  #Quick and Dirty, todo json
        self.timesteps = 50  #Quick and Dirty, todo json

        # DIVA CONFIG
        diva_cfg = dict(
            m_mins=np.array([-1, -1, -1, -1, -1, -1, -1]),
            m_maxs=np.array([1, 1, 1, 1, 1, 1, 1]),
            s_mins=np.array([7.5, 9.25]),
            s_maxs=np.array([9.5, 11.25]),
            m_used=range(7),
            s_used=range(1, 3),
            rest_position_diva=list([0] * 7),
            audio=self.audio,
            diva_use_initial=True,
            diva_use_goal=True,
            used_diva=list([True] * 7),
            n_dmps_diva=self.n_dmps,
            n_bfs_diva=2,
            move_steps=self.timesteps,
        )
        # Init Diva
        self.diva = Diva(**diva_cfg)
Пример #18
0
    def __init__(self):
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'learning.json')) as f:
            self.params = json.load(f)
        #with open(join(self.rospack.get_path('pobax_playground'), 'config', 'dmps.json')) as f:
        #    self.dmps_params = json.load(f)

        self.arm_translator = EnvironmentTranslatorArm()
        self.diva_translator = EnvironmentTranslatorDiva()
        self.config = dict(m_mins=[-1.] * 68,
                           m_maxs=[1.] * 68,
                           s_mins=[-1.] * self.params["sensory_state_size"],
                           s_maxs=[1.] * self.params["sensory_state_size"])
        self.learning = Learning(
            self.config,
            sensory_state_size=self.params["sensory_state_size"],
            model_babbling=self.params["model_babbling"],
            n_motor_babbling=self.params["n_motor_babbling"],
            explo_noise=self.params["explo_noise"],
            choice_eps=self.params["choice_eps"],
            proba_imitate=self.params["proba_imitate"],
            tau=self.params["tau"])

        self.experiment_name = rospy.get_param(
            "/pobax_playground/experiment_name", "experiment")
        rospy.loginfo("Learning node will write {}".format(
            self.experiment_name))

        # Saved experiment files
        self.dir = join(self.rospack.get_path('pobax_playground'), 'logs')
        if not os.path.isdir(self.dir):
            os.makedirs(self.dir)

        self.experiment_file = join(self.dir, self.experiment_name + '.pickle')

        starting_iteration = 0
        if isfile(self.experiment_file):
            starting_iteration = self.learning.restart_from_end_of_file(
                self.experiment_file)
        else:
            self.learning.start()
        rospy.set_param("/pobax_playground/starting_iteration",
                        starting_iteration)

        # Serving these services
        self.service_name_perceive = "/pobax_playground/learning/perceive"
        self.service_name_produce = "/pobax_playground/learning/produce"
        self.service_name_save = "/pobax_playground/learning/save"

        # Publishing these topics
        #self.pub_interests = rospy.Publisher('/pobax_playground/learning/interests', Interests, queue_size=1, latch=True)
        self.pub_focus = rospy.Publisher(
            '/pobax_playground/learning/current_focus',
            String,
            queue_size=1,
            latch=True)

        # Using these services
        self.service_name_get_perception = "/pobax_playground/perception/get"
        for service in [self.service_name_get_perception]:
            rospy.loginfo(
                "Learning  node is waiting service {}...".format(service))
            rospy.wait_for_service(service)
        self.get_state = rospy.ServiceProxy(self.service_name_get_perception,
                                            GetSensorialState)
Пример #19
0
    def __init__(self, gui=False, audio=False):

        self.t = 0

        # ARM CONFIG

        arm_cfg = dict(m_mins=[-1.] * 3 * 7,
                       m_maxs=[1.] * 3 * 7,
                       s_mins=[-1.] * 3 * 50,
                       s_maxs=[1.] * 3 * 50,
                       lengths=[0.5, 0.3, 0.2],
                       angle_shift=0.5,
                       rest_state=[0., 0., 0.],
                       n_dmps=3,
                       n_bfs=6,
                       timesteps=50,
                       gui=gui)

        # SOUND CONFIG

        self.v_o = list(np.log2([500, 900]))
        self.v_y = list(np.log2([300, 1700]))
        self.v_u = list(np.log2([300, 800]))
        self.v_e = list(np.log2([400, 2200]))
        self.v_i = list(np.log2([300, 2300]))

        self.vowels = dict(o=self.v_o,
                           y=self.v_y,
                           u=self.v_u,
                           e=self.v_e,
                           i=self.v_i)

        # Retrieve caregiver sounds and trajectories from json
        self.rospack = RosPack()
        with open(
                join(self.rospack.get_path('pobax_playground'), 'config',
                     'human_sounds.pickle')) as f:
            self.full_human_motor_traj, self.full_human_sounds_traj = pickle.load(
                f)
        self.human_sounds = self.full_human_sounds_traj.keys()
        rospy.loginfo('Voice node using the word %s for culbuto name' %
                      self.human_sounds[0])

        def compress_sound_traj(sound):
            assert (len(sound) == 100)
            f1s = sound[:50]
            f3s = sound[50:]
            return np.append(f1s[np.array([0, 12, 24, 37, 49])],
                             f3s[np.array([0, 12, 24, 37, 49])])

        #reduce number of sounds
        self.human_sounds = ['eyu', 'oey', 'eou', 'oyi']
        #random.shuffle(self.human_sounds)

        print self.human_sounds
        self.human_sounds_traj = dict()
        self.human_sounds_traj_std = dict()
        self.best_vocal_errors = {}
        self.best_vocal_errors_evolution = []
        for hs in self.human_sounds:
            self.best_vocal_errors[hs] = 10.
            self.human_sounds_traj[hs] = compress_sound_traj(
                self.full_human_sounds_traj[hs])
            self.human_sounds_traj_std[hs] = [
                d - 8.5 for d in self.human_sounds_traj[hs][:5]
            ] + [d - 10.25 for d in self.human_sounds_traj[hs][5:]]

        self.sound_tol = 0.4

        # DIVA CONFIG

        diva_cfg = dict(
            m_mins=np.array([-1, -1, -1, -1, -1, -1, -1]),
            m_maxs=np.array([1, 1, 1, 1, 1, 1, 1]),
            s_mins=np.array([7.5, 9.25]),
            s_maxs=np.array([9.5, 11.25]),
            m_used=range(7),
            s_used=range(1, 3),
            rest_position_diva=list([0] * 7),
            audio=audio,
            diva_use_initial=True,
            diva_use_goal=True,
            used_diva=list([True] * 7),
            n_dmps_diva=7,
            n_bfs_diva=2,
            move_steps=50,
        )

        self.arm = ArmEnvironment(**arm_cfg)
        self.diva = DivaEnvironment(**diva_cfg)

        self.timesteps = 50

        # OBJECTS CONFIG

        self.caregiver_gives_obj_factor = 0.01
        self.tool_length = 0.5
        self.handle_tol = 0.2
        self.handle_tol_sq = self.handle_tol * self.handle_tol
        self.handle_noise = 0.
        self.object_tol_hand = 0.2
        self.object_tol_hand_sq = self.object_tol_hand * self.object_tol_hand
        self.object_tol_tool = 0.2
        self.object_tol_tool_sq = self.object_tol_tool * self.object_tol_tool

        self.diva_traj = None
        self.produced_sound = None

        Environment.__init__(self,
                             m_mins=[-1.] * (21 + 28),
                             m_maxs=[1.] * (21 + 28),
                             s_mins=[-1.] * 56,
                             s_maxs=[1.] * 56)

        self.current_tool = [-0.5, 0., 0.5, 0.]
        self.current_toy1 = [0.5, 0.5, 0.]
        #self.current_toy2 = [0.7, 0.7, 0.]
        #self.current_toy3 = [0.9, 0.9, 0.]
        self.current_caregiver = [0., 1.7]
        self.reset()
        self.compute_tool()

        self.purge_logs()

        if self.arm.gui:
            self.init_plot()

        self.count_diva = 0
        self.count_arm = 0
        self.count_tool = 0
        self.count_toy1_by_tool = 0
        #self.count_toy2_by_tool = 0
        #self.count_toy3_by_tool = 0
        self.count_toy1_by_hand = 0
        #self.count_toy2_by_hand = 0
        #self.count_toy3_by_hand = 0
        self.count_parent_give_label = 0
        self.count_parent_give_object = 0
        self.count_produced_sounds = {}
        for hs in self.human_sounds:
            self.count_produced_sounds[hs] = 0

        self.time_arm = 0.
        self.time_diva = 0.
        self.time_arm_per_it = 0.
        self.time_diva_per_it = 0.