def configure(self, rf):
        # set the module name used to name ports
        self.setName((rf.check("name", yarp.Value("/exampleModule")).asString()))

        # open io ports
        if not self.input_port.open(self.getName() + "/AE:i"):
            print("Could not open input port")
            return False
        self.input_port.setStrict()

        if not self.rpc_port.open(self.getName() + "/rpc"):
            print("Could not open rpc port")
            return False
        self.attach_rpc_server(self.rpc_port)  # rpc port receives command in the respond method

        # read flags and parameters
        example_flag = rf.check("example_flag") and rf.check("example_flag", yarp.Value(True)).asBool()
        default_value = 0.1
        example_parameter = rf.check("example_parameter", yarp.Value(default_value)).asDouble()

        # do any other set-up required here
        # start the asynchronous and synchronous threads
        threading.Thread(target=self.run).start()

        return True
    def configure(self, rf):
        self.module_name = rf.check("name", yarp.Value("SpeakerRecognition"),
                                    "module name (string)").asString()

        self.model_path = rf.check(
            "path",
            yarp.Value(
                "/home/jonas/CLionProjects/soundLocalizer/python-scripts/analysis/data/saved_model/my_model.h5"
            ), "Model path (.h5) (string)").asString()

        # Create handle port to read message
        self.handle_port.open('/' + self.module_name)

        # Create a port to receive an audio object
        self.audio_in_port.open('/' + self.module_name + '/recorder:i')

        print("TensorFlow version: {}".format(tf.__version__))

        # Create a new model instance
        self.model = tf.keras.models.load_model(self.model_path)

        self.head_motorPort.open('/' + self.module_name + 'angle:o')

        print("Model successfully loaded, running ")

        yarpLog.info("Initialization complete")

        return True
    def configure(self, rf):
        self.module_name = rf.check("name", yarp.Value("AudioRecorder"),
                                    "module name (string)").asString()

        self.saving_path = rf.check("path", yarp.Value("/tmp"),
                                    "saving path name (string)").asString()

        # Create handle port to read message
        self.handle_port.open('/' + self.module_name)

        # Create a port to receive an audio object
        self.audio_in_port.open('/' + self.module_name + '/recorder:i')

        if not os.path.exists(f'{self.saving_path}/{self.date_path}'):
            yarpLog.info("Creating directory for saving")
            os.makedirs(f'{self.saving_path}/{self.date_path}')

        yarpLog.info("Initialization complete")

        return True
Beispiel #4
0
def save_file(segment, index):
    '''
    Function that save an audio segment and notify the Command Recognizer pipeline.
    :param segment: the audio segment
    :param index: index that wil be added to the name of the wav file in order to recognize the segment
    '''
    waveform = np.array(audio_reconstruction(segment))
    fname = os.path.join(destination_folder, str(index) + ".wav")
    waveform = waveform.astype(np.int16)
    write(fname, sampling_rate / downsampling_factor, waveform)
    # notify the CR_pipeline
    file_output_port.write(yarp.Value(fname))
    def configure(self, rf):

        # Module parameters
        self.module_name = rf.check("name", yarp.Value("SpeechToText"),
                                    "module name (string)").asString()

        self.saving_path = rf.check("path", yarp.Value("/tmp"),
                                    "saving path name (string)").asString()
        self.language = rf.check(
            "lang", yarp.Value("it-IT"),
            "Language for the Google speech API (string)").asString()

        # Opening Ports
        # Create handle port to read message
        self.handle_port.open('/' + self.module_name)

        # Audio
        self.audio_in_port.open('/' + self.module_name + '/speech:i')
        self.audio_power_port.open('/' + self.module_name + '/power:i')

        # Speech
        self.speech_output.open('/' + self.module_name + '/speech:o')
        self.actions_output.open('/' + self.module_name + '/actions:o')

        # Speech recognition parameters
        self.threshold_voice = rf.check(
            "voice_threshold", yarp.Value("4"),
            "Energy threshold use by the VAD (int)").asDouble()

        self.speech_recognizer = sr.Recognizer()

        # Actions list
        keywords = rf.find("KEYWORDS").asList()
        actions = rf.find("ACTIONS").asList()
        self.process_actions(keywords, actions)

        info("Initialization complete")

        return True
Beispiel #6
0
    def configure(self, rf):
        moduleName = rf.check("name", yarp.Value("reachableArea")).asString()
        self.setName(moduleName)

        self.rpc_port = yarp.Port()
        self.rpc_port.open("/reachableArea/rpc")
        self.attach(self.rpc_port)

        self.human_area_x_bounds = self.get_bounds_from_config(
            "human_area_x_bounds", -0.7, -0.5)
        self.human_area_y_bounds = self.get_bounds_from_config(
            "human_area_y_bounds", -0.3, -0.5)
        self.shared_area_x_bounds = self.get_bounds_from_config(
            "shared_area_x_bounds", -0.7, -0.5)
        self.shared_area_y_bounds = self.get_bounds_from_config(
            "shared_area_y_bounds", -0.7, -0.5)
        self.robot_area_x_bounds = self.get_bounds_from_config(
            "robot_area_x_bounds", -0.7, -0.5)
        self.robot_area_y_bounds = self.get_bounds_from_config(
            "robot_area_y_bounds", -0.7, -0.5)

        return True
 def read(self, connection):
     print('entered read')
     if not (connection.isValid()):
         print("Connection shutting down")
         return False
     #binp = yarp.Bottle()
     binp = yarp.Bottle()
     bout = po.prepare()
     bout.clear()
     ok = binp.read(connection)
     if not (ok):
         print("Failed to read input")
         return False
     #data = event_driven.getData(binp)
     data = decode(binp.pop().toString())
     print(data[0][0])
     processed = process(data)
     str_rep_new = encode(processed)
     v = yarp.Value()
     v.fromString('(%s)' % str_rep_new)
     bout.addString('AE')
     bout.add(v)
     po.write()
     return True
Beispiel #8
0
    def configure(self, rf):

        self.config = yarp.Property()
        self.config.fromConfigFile(
            '/code/icub_perceptual_optimisation/yarp/config.ini')
        self.width = self.config.findGroup('CAMERA').find('width').asInt32()
        self.height = self.config.findGroup('CAMERA').find('height').asInt32()

        if self.config.findGroup('GENERAL').find('show_images').asBool():
            import matplotlib
            matplotlib.use('TKAgg')
            self.ax_left = plt.subplot(1, 2, 1)
            self.ax_right = plt.subplot(1, 2, 2)

        # prepare motor driver
        self.head_motors = 'head'
        self.head_motorprops = yarp.Property()
        self.head_motorprops.put("device", "remote_controlboard")
        self.head_motorprops.put("local",
                                 "/client_babbling/" + self.head_motors)
        self.head_motorprops.put("remote", "/icubSim/" + self.head_motors)

        self.left_motors = 'left_arm'
        self.left_motorprops = yarp.Property()
        self.left_motorprops.put("device", "remote_controlboard")
        self.left_motorprops.put("local",
                                 "/client_babbling/" + self.left_motors)
        self.left_motorprops.put("remote", "/icubSim/" + self.left_motors)

        self.right_motors = 'right_arm'
        self.right_motorprops = yarp.Property()
        self.right_motorprops.put("device", "remote_controlboard")
        self.right_motorprops.put("local",
                                  "/client_babbling/" + self.right_motors)
        self.right_motorprops.put("remote", "/icubSim/" + self.right_motors)

        # create remote driver
        self.head_driver = yarp.PolyDriver(self.head_motorprops)
        print('head motor driver prepared')
        # query motor control interfaces
        self.head_iPos = self.head_driver.viewIPositionControl()
        self.head_iVel = self.head_driver.viewIVelocityControl()
        self.head_iEnc = self.head_driver.viewIEncoders()
        self.head_iCtlLim = self.head_driver.viewIControlLimits()

        self.left_armDriver = yarp.PolyDriver(self.left_motorprops)
        print('left motor driver prepared')
        # query motor control interfaces
        self.left_iPos = self.left_armDriver.viewIPositionControl()
        self.left_iVel = self.left_armDriver.viewIVelocityControl()
        self.left_iEnc = self.left_armDriver.viewIEncoders()
        self.left_iCtlLim = self.left_armDriver.viewIControlLimits()

        self.right_armDriver = yarp.PolyDriver(self.right_motorprops)
        print('right motor driver prepared')
        # query motor control interfaces
        self.right_iPos = self.right_armDriver.viewIPositionControl()
        self.right_iVel = self.right_armDriver.viewIVelocityControl()
        self.right_iEnc = self.right_armDriver.viewIEncoders()
        self.right_iCtlLim = self.right_armDriver.viewIControlLimits()

        #  number of joints
        self.num_joints = self.left_iPos.getAxes()
        print('Num joints: ', self.num_joints)

        self.head_num_joints = self.head_iPos.getAxes()
        print('Num head joints: ', self.head_num_joints)

        self.head_limits = []
        for i in range(self.head_num_joints):
            head_min = yarp.DVector(1)
            head_max = yarp.DVector(1)
            self.head_iCtlLim.getLimits(i, head_min, head_max)
            print('lim head ', i, ' ', head_min[0], ' ', head_max[0])
            self.head_limits.append([head_min[0], head_max[0]])

        self.left_limits = []
        self.right_limits = []
        for i in range(self.num_joints):
            left_min = yarp.DVector(1)
            left_max = yarp.DVector(1)
            self.left_iCtlLim.getLimits(i, left_min, left_max)
            #print('lim left ', i, ' ', left_min[0], ' ', left_max[0])
            self.left_limits.append([left_min[0], left_max[0]])

            right_min = yarp.DVector(1)
            right_max = yarp.DVector(1)
            self.right_iCtlLim.getLimits(i, right_min, right_max)
            #print('lim right ', i, ' ', right_min[0], ' ', right_max[0])
            self.right_limits.append([right_min[0], right_max[0]])

        self.go_to_starting_pos()

        moduleName = rf.check("name", yarp.Value("BabblingModule")).asString()
        self.setName(moduleName)
        print('module name: ', moduleName)
        yarp.delay(5.0)
        print('starting now')
    raise SystemExit

mode = dd.viewIControlMode()
enc = dd.viewIEncoders()
posd = dd.viewIPositionDirect()
var = dd.viewIRemoteVariables()

if mode is None or enc is None or posd is None or var is None:
    print('Unable to acquire robot interfaces')
    raise SystemExit

p = yarp.Property()
p.put('enable', True)
p.put('mode', args.ip)

v = yarp.Value()
v.asList().addString('linInterp')
v.asList().addList().fromString(p.toString())

b = yarp.Bottle()
b.add(v)

if not var.setRemoteVariable('all', b):
    print('Unable to set interpolation mode')
    raise SystemExit

if not mode.setControlMode(args.joint, yarp.VOCAB_CM_POSITION_DIRECT):
    print('Unable to set position direct mode')
    raise SystemExit

time.sleep(
Beispiel #10
0
    def configure(self, rf):
        yarp.Network.init()
        self.SIGNALS_TO_NAMES_DICT = dict((getattr(signal, n), n) \
            for n in dir(signal) if n.startswith('SIG') and '_' not in n )

        self.terminal = 'xterm'

        rootPath = rf.check("root_path")
        interactionConfPath = rf.check("config_path")

        if (interactionConfPath == False and rootPath == False):
            print "Cannot find .ini settings"
            return False
        else:
            self.rootPath = rf.find("root_path").asString()
            self.interactionConfPath = rf.find("config_path").asString()
            persistence = rf.check("persistence",
                                   yarp.Value("False")).asString()
            windowed = rf.check("windowed", yarp.Value("True")).asString()
            verbose = rf.check("verbose", yarp.Value("True")).asString()

            self.persistence = True if (persistence == "True") else False
            self.windowed = True if (windowed == "True") else False
            self.verbose = True if (verbose == "True") else False

            print 'Root supervisor path:     \t', self.rootPath
            print 'Model configuration file: \t', self.interactionConfPath
            print 'Bash Persistence set to:  \t', self.persistence
            print 'Windowed set to:          \t', self.windowed
            print 'Verbose set to:           \t', self.verbose

            self.modelPath = self.rootPath + '/Models'
            self.dataPath = self.rootPath + '/Data'
            #OLD
            # self.trainingFunctionsPath = os.environ.get("WYSIWYD_DIR")+"/bin"
            #NEW
            self.trainingFunctionsPath = SAM.SAM_Drivers.__path__
            self.trainingListHandles = dict(
            )  #make this a dict to have a label attached to each subprocess
            self.loadedListHandles = dict()
            self.iter = 0
            self.rpcConnections = []
            self.inputBottle = yarp.Bottle()
            self.sendingBottle = yarp.Bottle()
            self.responseBottle = yarp.Bottle()
            self.outputBottle = yarp.Bottle()

            if (not self.windowed): self.devnull = open('/dev/null', 'w')

            out = yarp.Bottle()
            self.checkAvailabilities(out)
            if (self.verbose): print out.toString()

            self.supervisorPort = yarp.Port()
            self.supervisorPort.open('/sam/rpc:i')
            self.attach(self.supervisorPort)

            cmd = 'ipcluster start -n 4'
            command = "bash -c \"" + cmd + "\""

            if self.windowed:
                c = subprocess.Popen([self.terminal, '-e', command],
                                     shell=False)
            else:
                c = subprocess.Popen([cmd],
                                     shell=True,
                                     stdout=self.devnull,
                                     stderr=self.devnull)

            self.trainingListHandles['Cluster'] = c

            if len(self.uptodateModels) + len(self.updateModels) > 0:
                if self.verbose:
                    print "Loading models according to " + self.interactionConfPath
                # start loading model configuration according to interactionConfPath file

                rfModel = yarp.ResourceFinder()
                rfModel.setVerbose(self.verbose)
                rfModel.setDefaultContext("samSupervisor")
                self.interactionConfFile = rfModel.findFile(
                    self.interactionConfPath)

                # Iterate over all sections within the interactionConfPath,
                # create a list and check against the available models
                # warn if model specified in interactionConfPath not loadable
                self.interactionParser = SafeConfigParser()
                self.interactionParser.read(self.interactionConfFile)
                self.interactionSectionList = self.interactionParser.sections()
                if self.verbose:
                    print
                    print self.dataPath
                    print self.interactionSectionList
                    print
                for j in self.interactionSectionList:
                    command = yarp.Bottle()
                    command.addString("load")
                    command.addString(j)
                    if self.verbose:
                        print command.toString()
                    reply = yarp.Bottle()

                    self.loadModel(reply, command)
                    if self.verbose:
                        print reply.toString()
                        print "-----------------------------------------------"
                        print

            elif len(self.noModels) > 0:
                if self.verbose:
                    print "Models available for training."
                # Train a model according to ineractionConfPath file
            else:
                if self.verbose:
                    print "No available models to load or train"
                # wait for a training command

            return True
import yarp

yarp.Network.init()
yarp.Network.setLocalMode(True)

# Create (Simulation + environment + viewer)
simulationOptions = yarp.Property()
simulationOptions.put('env', 'data/testwamcamera.env.xml')
simulationOptions.put('device', 'YarpOpenraveSimulation')
simulationOptions.put('robotIndex', 0)  # required, dummy
simulationOptions.put('view', 1)
simulationDevice = yarp.PolyDriver(simulationOptions)

simulation = roboticslab_openrave_yarp_plugins.viewISimulation(
    simulationDevice)  # syntax is different
penvValue = yarp.Value()
simulation.getSimulationRawPointerValue(penvValue)

# Create Controlboard + using penv
controlboardOptions = yarp.Property()
controlboardOptions.put('penv', penvValue)
controlboardOptions.put('device', 'YarpOpenraveControlboard')
controlboardOptions.put('robotIndex', 0)
controlboardOptions.put('manipulatorIndex', 0)
controlboardDevice = yarp.PolyDriver(controlboardOptions)

# Create Grabber using penv
grabberOptions = yarp.Property()
grabberOptions.put('penv', penvValue)
grabberOptions.put('device', 'YarpOpenraveGrabber')
grabberOptions.put('robotIndex', 0)
    def configure(self, rf):

        self.module_name = rf.check("name", yarp.Value("ObjectsDetector"),
                                    "module name (string)").asString()

        self.label_path = rf.check('label_path', yarp.Value(''),
                                   'Path to the label file').asString()

        self.width_img = rf.check('width', yarp.Value(320),
                                  'Width of the input image').asInt()

        self.height_img = rf.check('height', yarp.Value(240),
                                   'Height of the input image').asInt()

        self.model_path = rf.check('model_path', yarp.Value(),
                                   'Path to the model').asString()

        self.threshold = rf.check('threshold', yarp.Value(0.5),
                                  'Theshold detection score').asDouble()

        self.nms_iou_threshold = rf.check(
            'filtering_distance', yarp.Value(0.8),
            'Filtering distance in pixels').asDouble()

        self.process = rf.check('process', yarp.Value(True),
                                'enable automatic run').asBool()

        # Create handle port to read message
        self.handle_port.open('/' + self.module_name)

        # Create a port to ouput an image
        self.output_img_port.open('/' + self.module_name + '/image:o')

        # Create a port to output the template image of the recognized face
        self.output_raw_port.open('/' + self.module_name + '/raw:o')

        # Create a port to ouput the recognize names
        self.output_objects_port.open('/' + self.module_name + '/objects:o')

        # Output
        self.display_buf_image.resize(self.width_img, self.height_img)
        self.display_buf_array = np.zeros((self.height_img, self.width_img, 3),
                                          dtype=np.uint8).tobytes()
        self.display_buf_image.setExternal(self.display_buf_array,
                                           self.width_img, self.height_img)

        # Create a port to receive an image
        self.input_port.open('/' + self.module_name + '/image:i')
        self.input_img_array = np.zeros((self.height_img, self.width_img, 3),
                                        dtype=np.uint8).tobytes()

        info('Model initialization ')

        if not self._read_label():
            error("Unable to load label file")
            return False

        if not self._load_graph():
            error("Unable to load model")
            return False

        # self.cap = cv2.VideoCapture(0)  # Change only if you have more than one webcams
        info('Module initialization done, Running the model')
        return True
    def configure(self, rf):
        self.lock = threading.Lock()

        self.config = yarp.Property()
        self.config.fromConfigFile(
            '/code/icub_perceptual_optimisation/yarp/config.ini')
        self.width = self.config.findGroup('CAMERA').find(
            'yarp_width').asInt32()
        self.height = self.config.findGroup('CAMERA').find(
            'yarp_height').asInt32()
        self.target_width = self.config.findGroup('CAMERA').find(
            'target_width').asInt32()
        self.target_height = self.config.findGroup('CAMERA').find(
            'target_height').asInt32()
        self.max_dataset_size = self.config.findGroup('GENERAL').find(
            'max_dataset_size').asInt32()

        if self.config.findGroup('GENERAL').find('show_images').asBool():
            import matplotlib
            matplotlib.use('TKAgg')
            self.ax_left = plt.subplot(1, 2, 1)
            self.ax_right = plt.subplot(1, 2, 2)

        # Create a port and connect it to the iCub simulator virtual camera
        self.input_port_cam = yarp.Port()
        self.input_port_cam.open("/dataCollector/camera_left")
        yarp.Network.connect("/icubSim/cam/left", "/dataCollector/camera_left")

        self.input_port_skin_left_hand = yarp.Port()
        self.input_port_skin_left_hand.open(
            "/dataCollector/skin/left_hand_comp")  #
        yarp.Network.connect("/icubSim/skin/left_hand_comp",
                             "/dataCollector/skin/left_hand_comp")

        self.input_port_skin_left_forearm = yarp.Port()
        self.input_port_skin_left_forearm.open(
            "/dataCollector/skin/left_forearm_comp")  #
        yarp.Network.connect("/icubSim/skin/left_forearm_comp",
                             "/dataCollector/skin/left_forearm_comp")

        self.input_port_command = yarp.Port()
        self.input_port_command.open("/dataCollector/command")  #
        yarp.Network.connect("/client_babbling/left_arm/command",
                             "/dataCollector/command")

        #self.input_port_skin.read(False);    #  clean the buffer
        #self.input_port_skin.read(False);    #  clean the buffer

        # prepare image
        self.yarp_img_in = yarp.ImageRgb()
        self.yarp_img_in.resize(self.width, self.height)
        self.img_array = np.ones((self.height, self.width, 3), dtype=np.uint8)
        # yarp image will be available in self.img_array
        self.yarp_img_in.setExternal(self.img_array.data, self.width,
                                     self.height)

        # prepare motor driver
        self.head_motors = 'head'
        self.head_motorprops = yarp.Property()
        self.head_motorprops.put("device", "remote_controlboard")
        self.head_motorprops.put("local",
                                 "/client_datacollector/" + self.head_motors)
        self.head_motorprops.put("remote", "/icubSim/" + self.head_motors)

        self.left_motors = 'left_arm'
        self.left_motorprops = yarp.Property()
        self.left_motorprops.put("device", "remote_controlboard")
        self.left_motorprops.put("local",
                                 "/client_datacollector/" + self.left_motors)
        self.left_motorprops.put("remote", "/icubSim/" + self.left_motors)

        #self.right_motors = 'right_arm'
        #self.right_motorprops = yarp.Property()
        #self.right_motorprops.put("device", "remote_controlboard")
        #self.right_motorprops.put("local", "/client_datacollector/" + self.right_motors)
        #self.right_motorprops.put("remote", "/icubSim/" + self.right_motors)

        # create remote driver
        self.head_driver = yarp.PolyDriver(self.head_motorprops)
        self.head_iEnc = self.head_driver.viewIEncoders()
        self.head_iPos = self.head_driver.viewIPositionControl()

        self.left_armDriver = yarp.PolyDriver(self.left_motorprops)
        self.left_iEnc = self.left_armDriver.viewIEncoders()
        self.left_iPos = self.left_armDriver.viewIPositionControl()

        #self.right_armDriver = yarp.PolyDriver(self.right_motorprops)
        #self.right_iEnc = self.right_armDriver.viewIEncoders()
        #self.right_iPos = self.right_armDriver.viewIPositionControl()

        #  number of joints
        self.num_joints = self.left_iPos.getAxes()
        self.head_num_joints = self.head_iPos.getAxes()

        moduleName = rf.check("name",
                              yarp.Value("DataCollectorModule")).asString()
        self.setName(moduleName)
        print('module name: ', moduleName)

        self.skin_left_hand_input = yarp.Bottle()
        self.skin_left_forearm_input = yarp.Bottle()
        #self.left_command_input = yarp.Bottle()
        self.left_command_input = yarp.Vector(self.num_joints)

        self.dataset_timestamps = []
        self.dataset_images = []
        self.dataset_skin_values = []
        self.dataset_joint_encoders = []
        self.dataset_motor_commands = []

        print('starting now')
Beispiel #14
0
    def configure(self, rf):

        self.config = yarp.Property()
        self.config.fromConfigFile('/code/icub_intrinsic_motivation/yarp/config.ini')
        self.width = self.config.findGroup('CAMERA').find('width').asInt32()
        self.height = self.config.findGroup('CAMERA').find('height').asInt32()

        if self.config.findGroup('GENERAL').find('show_images').asBool():
            import matplotlib
            matplotlib.use('TKAgg')
            self.ax_left = plt.subplot(1, 2, 1)
            self.ax_right = plt.subplot(1, 2, 2)

        # Create a port and connect it to the iCub simulator virtual camera
        self.input_port_cam = yarp.Port()
        self.input_port_cam.open("/icub/camera_left")
        yarp.Network.connect("/icubSim/cam/left", "/icub/camera_left")

        # prepare image
        self.yarp_img_in = yarp.ImageRgb()
        self.yarp_img_in.resize(self.width, self.height)
        self.img_array = np.ones((self.height, self.width, 3), dtype=np.uint8)
        # yarp image will be available in self.img_array
        self.yarp_img_in.setExternal(self.img_array.data, self.width, self.height)

        # prepare motor driver
        self.left_motors = 'left_arm'
        self.left_motorprops = yarp.Property()
        self.left_motorprops.put("device", "remote_controlboard")
        self.left_motorprops.put("local", "/client/" + self.left_motors)
        self.left_motorprops.put("remote", "/icubSim/" + self.left_motors)

        self.right_motors = 'right_arm'
        self.right_motorprops = yarp.Property()
        self.right_motorprops.put("device", "remote_controlboard")
        self.right_motorprops.put("local", "/client/" + self.right_motors)
        self.right_motorprops.put("remote", "/icubSim/" + self.right_motors)

        # create remote driver
        self.left_armDriver = yarp.PolyDriver(self.left_motorprops)
        print('left motor driver prepared')
        # query motor control interfaces
        self.left_iPos = self.left_armDriver.viewIPositionControl()
        self.left_iVel = self.left_armDriver.viewIVelocityControl()
        self.left_iEnc = self.left_armDriver.viewIEncoders()
        self.left_iCtlLim = self.left_armDriver.viewIControlLimits()

        self.right_armDriver = yarp.PolyDriver(self.right_motorprops)
        print('right motor driver prepared')
        # query motor control interfaces
        self.right_iPos = self.right_armDriver.viewIPositionControl()
        self.right_iVel = self.right_armDriver.viewIVelocityControl()
        self.right_iEnc = self.right_armDriver.viewIEncoders()
        self.right_iCtlLim = self.right_armDriver.viewIControlLimits()

        #  number of joints
        self.num_joints = self.left_iPos.getAxes()
        print('Num joints: ', self.num_joints)

        self.left_limits = []
        self.right_limits = []
        for i in range(self.num_joints):
            left_min =yarp.DVector(1)
            left_max =yarp.DVector(1)
            self.left_iCtlLim.getLimits(i, left_min, left_max)
            print('lim left ', i, ' ', left_min[0], ' ', left_max[0])
            self.left_limits.append([left_min[0], left_max[0]])

            right_min =yarp.DVector(1)
            right_max =yarp.DVector(1)
            self.right_iCtlLim.getLimits(i, right_min, right_max)
            print('lim right ', i, ' ', right_min[0], ' ', right_max[0])
            self.right_limits.append([right_min[0], right_max[0]])

        self.go_to_starting_pos()

        moduleName = rf.check("name", yarp.Value("BabblingModule")).asString()
        self.setName(moduleName)
        print('module name: ',moduleName)
        yarp.delay(1.0)
        print('starting now')
    def configure(self, rf):

        success = True

        # handle port for the RFModule
        self.handle_port = yarp.Port()
        self.attach(self.handle_port)

        # Define vars to receive audio
        self.audio_in_port = yarp.BufferedPortSound()
        self.label_outputPort = yarp.Port()
        self.eventPort = yarp.BufferedPortBottle()

        # Module parameters
        self.module_name = rf.check("name",
                                    yarp.Value("PersonRecognition"),
                                    "module name (string)").asString()
        self.handle_port.open('/' + self.module_name)

        self.dataset_path = rf.check("dataset_path",
                                     yarp.Value(
                                         ""),
                                     "Root path of the embeddings database (voice & face) (string)").asString()

        self.database = DatabaseHandler(self.dataset_path)

        self.length_input = rf.check("length_input",
                                     yarp.Value(1),
                                     "length audio input in seconds (int)").asInt()

        self.threshold_audio = rf.check("threshold_audio",
                                        yarp.Value(0.32),
                                        "threshold_audio for detection (double)").asDouble()

        self.threshold_face = rf.check("threshold_face",
                                       yarp.Value(0.55),
                                       "threshold_face for detection (double)").asDouble()

        self.face_model_path = rf.check("face_model_path",
                                        yarp.Value(""),
                                        "Path of the model for face embeddings (string)").asString()

        # Set the device for inference for the models
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        print('Running on device: {}'.format(self.device))

        success &= self.load_model_face()

        self.sampling_rate = rf.check("fs",
                                      yarp.Value(48000),
                                      " Sampling rate of the incoming audio signal (int)").asInt()

        success &= self.load_model_audio()

        # Audio and voice events
        self.audio_in_port.open('/' + self.module_name + '/audio:i')
        self.eventPort.open('/' + self.module_name + '/events:i')

        # Label
        self.label_outputPort.open('/' + self.module_name + '/label:o')

        # Image and face
        self.width_img = rf.check('width', yarp.Value(320),
                                  'Width of the input image').asInt()

        self.height_img = rf.check('height', yarp.Value(244),
                                   'Height of the input image').asInt()

        self.face_coord_port.open('/' + self.module_name + '/coord:i')
        self.face_coord_port.setStrict(False)

        self.image_in_port.open('/' + self.module_name + '/image:i')
        self.input_img_array = np.zeros((self.height_img, self.width_img, 3), dtype=np.uint8).tobytes()

        self.opc_port.open('/' + self.module_name + '/OPC:rpc')
        self.threshold_multimodal = 0.8

        info("Initialization complete")
        return success