示例#1
0
 def __enter__(self):
     self.pi = yarp.Port()
     self.pi.setReader(self)
     self.pi.open(self.portname)
示例#2
0
        if not (connection.isValid()):
            print("Connection shutting down")
            return False
        bin = yarp.Bottle()
        bout = yarp.Bottle()
        print("Trying to read from connection")
        ok = bin.read(connection)
        if not (ok):
            print("Failed to read input")
            return False
        print("Received [%s]" % bin.toString())
        bout.addString("Received:")
        bout.append(bin)
        print("Sending [%s]" % bout.toString())
        writer = connection.getWriter()
        if writer == None:
            print("No one to reply to")
            return True
        return bout.write(writer)


p = yarp.Port()
r = DataProcessor()
p.setReader(r)
p.open("/python")

yarp.Time.delay(100)
print("Test program timer finished")

yarp.Network.fini()
示例#3
0
print("Starting system...")

print("")
print("Loading Sentiment Analysis engine...")

print("")
print("Initializing YARP network...")

# Init YARP Network
yarp.Network.init()

print("")
print("Opening data input port with name /sentimentAnalysis/data:i ...")

# Open input data port
sentimentAnalysis_inputPort = yarp.Port()
sentimentAnalysis_inputPortName = '/sentimentAnalysis/data:i'
sentimentAnalysis_inputPort.open(sentimentAnalysis_inputPortName)

# Create input data bottle
inputBottle = yarp.Bottle()

print("")
print("Opening data output port with name /sentimentAnalysis/data:o ...")

# Open output data port
sentimentAnalysis_outputPort = yarp.Port()
sentimentAnalysis_outputPortName = '/sentimentAnalysis/data:o'
sentimentAnalysis_outputPort.open(sentimentAnalysis_outputPortName)

# Create output data bottle
示例#4
0
# Init YARP Network
yarp.Network.init()

print("")
print("[INFO] Opening image input port with name /facialAnalysisLiteDetection2D/img:i ...")

# Open input image port
facialAnalysisLiteDetection2D_portIn = yarp.BufferedPortImageRgb()
facialAnalysisLiteDetection2D_portNameIn = '/facialAnalysisLiteDetection2D/img:i'
facialAnalysisLiteDetection2D_portIn.open(facialAnalysisLiteDetection2D_portNameIn)

print("")
print("[INFO] Opening image output port with name /facialAnalysisLiteDetection2D/img:o ...")

# Open output image port
facialAnalysisLiteDetection2D_portOut = yarp.Port()
facialAnalysisLiteDetection2D_portNameOut = '/facialAnalysisLiteDetection2D/img:o'
facialAnalysisLiteDetection2D_portOut.open(facialAnalysisLiteDetection2D_portNameOut)

print("")
print("[INFO] Opening data output port with name /facialAnalysisLiteDetection2D/data:o ...")

# Open output data port
facialAnalysisLiteDetection2D_portOutDet = yarp.Port()
facialAnalysisLiteDetection2D_portNameOutDet = '/facialAnalysisLiteDetection2D/data:o'
facialAnalysisLiteDetection2D_portOutDet.open(facialAnalysisLiteDetection2D_portNameOutDet)

# Create data bootle
outputBottleFacialAnalysisLiteDetection2D = yarp.Bottle()

# Image size
示例#5
0
	def __init__(self, size_buffer):
		yarp.BottleCallback.__init__(self)
		self.port = yarp.Port()
		self.buffer = []
		self.size_buffer = size_buffer
 def __init__(self):
     yarp.BottleCallback.__init__(self)
     self.port = yarp.Port()
     self.flag_init = 0
示例#7
0
                command_recognizer.run_CR_dnn(current_file, index)


if __name__ == '__main__':
    # Initializing parameters
    parser = argparse.ArgumentParser(description='Command Recognition System.')
    parser.add_argument(
        '-r',
        '--rejection_threshold',
        type=float,
        default=2,
        help='rejection threshold for the commands based on the acoustic cost')
    args = parser.parse_args()

    yarp.Network.init()
    cmd_output_port = yarp.Port()
    cmd_output_port.open("/cmd_writer:o")
    command_recognizer = CR_wrapper(
        CR_path=CR_path_DNN,
        output_folder=output_folder,
        input_folder=input_folder,
        scp_files_folder=scp_files_folder,
        FE_path=FE_path,
        cmd_output_port=cmd_output_port,
        rejection_threshold=args.rejection_threshold)
    p = yarp.BufferedPortBottle()
    updater = CR_pipeline_updater()
    p.open("/file_reader:i")
    yarp.Network.connect("/file_writer:o", "/file_reader:i")
    p.useCallback(updater)
示例#8
0
print(
    "**************************************************************************"
)
print("")
print("Initializing YARP network ...")

# Init YARP Network
yarp.Network.init()

print("")
print(
    "[INFO] Opening data input port with name /voskSpeechRecognition/data:i ..."
)

# Open input voskSpeechRecognition port
voskSpeechRecognition_inputPort = yarp.Port()
voskSpeechRecognition_inputPortName = '/voskSpeechRecognition/data:i'
voskSpeechRecognition_inputPort.open(voskSpeechRecognition_inputPortName)

# Create input data bottle
inputBottle = yarp.Bottle()

print("")
print(
    "[INFO] Opening data output port with name /voskSpeechRecognition/data:o ..."
)

# Open output voskSpeechRecognition port
voskSpeechRecognition_outputPort = yarp.Port()
voskSpeechRecognition_outputPortName = '/voskSpeechRecognition/data:o'
voskSpeechRecognition_outputPort.open(voskSpeechRecognition_outputPortName)
class Detector (yarp.RFModule):
    def __init__(self,
                in_port_name,
                out_det_img_port_name,
                out_det_port_name
                out_img_port_name,
                classes,
                image_w,
                image_h,
                deeplabmodel,
                cpu_mode,
                gpu_id):


        if tf.__version__ < '1.5.0':
            raise ImportError('Please upgrade your tensorflow installation to v1.5.0 or newer!')

        self.LABEL_NAMES = np.asarray([
            'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
            'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
            'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
            'train', 'tv'
        ])

        self.FULL_LABEL_MAP = np.arange(len(self.LABEL_NAMES)).reshape(len(self.LABEL_NAMES), 1)
        self.FULL_COLOR_MAP = get_dataset_colormap.label_to_color_image(self.FULL_LABEL_MAP)

        self.deeplabmodel = deeplabmodel
        #self._TARBALL_NAME = 'deeplab_model.tar.gz'
        print(self.LABEL_NAMES)

        self.model = DeepLabModel(self.deeplabmodel)


        # Images port initialization
        ## Prepare ports

        self._in_port = yarp.BufferedPortImageRgb()
        #  self._in_port = yarp.Port()
        self._in_port_name = in_port_name
        self._in_port.open(self._in_port_name)

        self._out_det_port = yarp.BufferedPortBottle()
        self._out_det_port_name = out_det_port_name
        self._out_det_port.open(self._out_det_port_name)

        self._out_det_img_port = yarp.Port()
        self._out_det_img_port_name = out_det_img_port_name
        self._out_det_img_port.open(self._out_det_img_port_name)

        self._out_img_port = yarp.Port()
        self._out_img_port_name = out_img_port_name
        self._out_img_port.open(self._out_img_port_name)

        ## Prepare image buffers
        ### Input
        print 'prepare input image'

        #self._in_buf_array = np.ones((image_h, image_w, 3), dtype = np.uint8)
        self._in_buf_array = Image.new(mode='RGB', size=(image_w, image_h))

        self._in_buf_image = yarp.ImageRgb()
        self._in_buf_image.resize(image_w, image_h)
        self._in_buf_image.setExternal(self._in_buf_array, self._in_buf_array.shape[1], self._in_buf_array.shape[0])

        ### Output
        print 'prepare output image'
        self._out_buf_image = yarp.ImageRgb()
        self._out_buf_image.resize(image_w, image_h)
        #self._out_buf_array = np.zeros((image_h, image_w, 3), dtype = np.uint8)
        self._out_buf_array = Image.new(mode='RGB', size=(image_w, image_h))

        self._out_buf_image.setExternal(self._out_buf_array, self._out_buf_array.shape[1], self._out_buf_array.shape[0])
示例#10
0
    def configure(self, rf):
        # # --------------------------------Testing---------------------------------
        # self.list_of_actions = ["left_arm_gun_pt1", "fast_long_wave", "left_arm_outstretched",
        #                         "hand_up_5_fingers_palm_outward", "left_arm_kill", "left_arm_outstretched_you_got_it",
        #                         "hand_up_5_fingers_palm_inward", "left_arm_outstretched_thumbs_up", "left_arm_kill"]
        # self.num_actions = len(self.list_of_actions)
        #
        # self.list_of_emotions = ["neutral", "talking", "happy", "sad", "surprised", "evil", "angry", "shy", "cunning"]
        # self.num_emotions = len(self.list_of_emotions)
        #
        # for n in self.list_of_actions:
        #     self.prepared_action_dict[n] = self.prepare_movement(n)
        #
        # r = self.get_chatbot_reply("This is a sentence")
        # self.parse_chatbot_reply(r)
        # # ------------------------------------------------------------------------

        self.withProactive = rf.find(
            'withProactive').toString_c().lower() == "true"

        # Open BBC rpc port
        self.portsList["rpc"] = yarp.Port()
        self.portsList["rpc"].open("/BBC_Demo/rpc:i")
        self.attach(self.portsList["rpc"])
        yarp.Network.connect("/sentence_tokenizer/audio:o",
                             self.portsList["rpc"].getName())
        # yarp.Network.connect("/deepSpeechToText/text:o", self.portsList["rpc"].getName())

        # Open Body control port
        self.portsList["body_control"] = yarp.RpcClient()
        self.portsList["body_control"].open("/BBC_Demo/body_control/rpc:o")
        yarp.Network.connect(self.portsList["body_control"].getName(),
                             "/body_control/rpc:i")

        # Open speech dev control port
        self.portsList["speech_control"] = yarp.RpcClient()
        self.portsList["speech_control"].open("/BBC_Demo/speech_control/rpc:o")
        yarp.Network.connect(self.portsList["speech_control"].getName(),
                             "/icub/speech:rpc")

        # Open tokenizer control port
        self.portsList["tokenizer_control"] = yarp.RpcClient()
        self.portsList["tokenizer_control"].open(
            "/BBC_Demo/sentence_tokenizer/rpc:o")
        yarp.Network.connect(self.portsList["tokenizer_control"].getName(),
                             "/sentence_tokenizer/rpc:i")

        self.list_of_actions = [
            "left_arm_gun_pt1", "fast_long_wave", "left_arm_outstretched",
            "hand_up_5_fingers_palm_outward", "left_arm_kill",
            "left_arm_outstretched_you_got_it",
            "hand_up_5_fingers_palm_inward", "left_arm_outstretched_thumbs_up",
            "left_arm_kill"
        ]
        self.num_actions = len(self.list_of_actions)

        self.list_of_emotions = [
            "neutral", "talking", "happy", "sad", "surprised", "evil", "angry",
            "shy", "cunning"
        ]
        self.num_emotions = len(self.list_of_emotions)

        rep = yarp.Bottle()
        for n in self.list_of_actions:
            rep.clear()
            self.prepared_action_dict[n] = self.prepare_movement(n)
            cmd = self.prepare_movement(n, duration=True)
            self.portsList["body_control"].write(cmd, rep)
            self.duration_action_dict[n] = rep.get(1).asDouble()

        if self.withProactive:
            self.portsList["toHomeo"] = yarp.Port()
            self.portsList["toHomeo"].open(self.homeoPortName)
            yarp.Network.connect(self.homeoPortName, self.homeoRPC)

        # Setup icub client
        self.iCub = icubclient.ICubClient("BBC_Demo", "icubClient",
                                          "BBC_demo.ini")
        if not self.iCub.connect():
            print "iCub not connected"
            # return False

        if not self.iCub.getSpeechClient():
            print "Speech not connected"
            # return False
        else:
            self.speech_client = self.iCub.getSpeechClient()
            self.speech_client.Connect()

        if not self.iCub.getEmotionClient():
            print "Emotion not connected"
            # return False
        else:
            self.emotion_client = self.iCub.getEmotionClient()
            self.emotion_client.Connect()

        # if not self.iCub.getSAMClient():
        #     print "SAM not connected"
        # return False
        # else:
        #     self.sam_client = self.iCub.getSAMClient()
        #     self.sam_client.Connect()
        # Setting up speech if available

        # Setting up Rasa Grammar
        if self.grammar_mode == "rasa":
            search_dir = join(self.rasa_root_dir, self.rasa_model_dir)
            model_dirs = [
                join(search_dir, d) for d in os.listdir(search_dir)
                if os.path.isdir(join(search_dir, d))
            ]
            self.rasa_model_dir = max(model_dirs, key=os.path.getmtime)

            self.rasa_config = RasaNLUConfig(
                join(self.rasa_root_dir, self.rasa_config_file))
            self.rasa_interpreter = Interpreter.load(
                join(self.rasa_root_dir, self.rasa_model_dir),
                self.rasa_config)
        else:
            with open(self.TalkML_grammar_file) as f:
                content = f.readlines()
            # you may also want to remove whitespace characters like `\n` at the end of each line
            content = [x.strip() for x in content]
            for j in content:
                parts = j.split('.*')
                key = parts[0].replace('\t', '')
                vals = parts[1][1:-1].split('|')
                for v in vals:
                    if "*" in v:
                        try:
                            self.grammars_mult[key].append(v.split("*"))
                        except:
                            self.grammars_mult[key] = [v.split("*")]
                    else:
                        try:
                            self.grammars[key].append(v)
                        except:
                            self.grammars[key] = [v]

            # Setting up chat interface
        if self.chat_interface == "TalkML":
            # Reading in tkml file
            with open(self.TalkML_tkml_file, "r") as tkml_file:
                self.tkml_def = tkml_file.read().replace("\n", " ")

            # Setting up upload request body
            startup_request = \
                {"version": "1.0",
                 "action": self.TKML_Actions.upload.value,
                 "tkml": self.tkml_def}

            # Sending startup request
            startup_reply = self.TalkML_Send(startup_request)
            if str(startup_reply) == "<Response [200]>":
                print "Upload successful"
                # Sending start
                start_request = \
                    {
                        "action": "start",
                        "version": "1.0"
                    }
                start_reply = self.TalkML_Send(start_request)
                self.TalkML_enabled = self.parse_chatbot_reply(start_reply)
                if self.TalkML_enabled:
                    print "Start successful"
                else:
                    print "TalkML reply: ", start_reply
                    print "Start unsuccessful"
            else:
                print "TalkML reply: ", startup_reply
                if startup_reply is not None:
                    print startup_reply._content
                print "TalkML upload unsuccessful"
                return False
        elif self.chat_interface == "testing":
            r, _ = self.get_chatbot_reply("Hello")
            self.parse_chatbot_reply(r)

        return True
示例#11
0
    def configure(self, rf):

        for attr in ["xpos", "ypos", "width", "height"]:
            a = rf.find(attr)
            val = a.asInt() if not a.isNull() else default_geometry[attr]
            setattr(self, attr, val)
        self.win_size = 600

        self.module_name = "allostatic_plot"
        self.setName(self.module_name)
        self.homeo_rpc = yarp.Port()
        self.homeo_rpc.open("/" + self.module_name + "/to_homeo_rpc")
        yarp.Network.connect(self.homeo_rpc.getName(), "/homeostasis/rpc")
        self.behaviorManager_rpc = yarp.Port()
        self.behaviorManager_rpc.open("/" + self.module_name +
                                      "/to_behaviorManager_rpc")
        yarp.Network.connect(self.behaviorManager_rpc.getName(),
                             "/BehaviorManager/trigger:i")

        request = yarp.Bottle()
        rep = yarp.Bottle()

        # Retrieve drive names from the homeostasis module
        request.addString("names")
        self.homeo_rpc.write(request, rep)
        self.drives = []
        names = rep.get(0).asList()
        if not names:
            return False
        for i in range(names.size()):
            self.drives.append(names.get(i).asString())

        # Retrieve behavior names from the behaviorManager module
        request.clear()
        rep.clear()
        request.addString("names")
        self.behaviorManager_rpc.write(request, rep)
        self.behaviors = []
        self.behavior_ports = []
        names = rep.get(0).asList()
        if not names:
            return False
        for i in range(names.size()):
            self.behaviors.append(names.get(i).asString())
            self.behavior_ports.append(yarp.BufferedPortBottle())
            self.behavior_ports[-1].open("/" + self.module_name +
                                         "/behaviors/" + self.behaviors[-1] +
                                         ":i")
            yarp.Network.connect(
                "/BehaviorManager/" + self.behaviors[-1] + "/start_stop:o",
                self.behavior_ports[-1].getName())

        # Retrieve homeostasis boundaries
        self.homeo_mins = []
        self.homeo_maxs = []
        for d in self.drives:
            request.clear()
            request.addString("ask")
            request.addString(d)
            request.addString("min")
            rep.clear()
            self.homeo_rpc.write(request, rep)
            self.homeo_mins.append(rep.get(0).asDouble())

            request.clear()
            request.addString("ask")
            request.addString(d)
            request.addString("max")
            rep.clear()
            self.homeo_rpc.write(request, rep)
            self.homeo_maxs.append(rep.get(0).asDouble())

        # Connect to the homeostasis module to get drive values
        self.drive_value_ports = [
            yarp.BufferedPortBottle() for _ in self.drives
        ]
        for i, d in enumerate(self.drives):
            self.drive_value_ports[i].open("/" + self.module_name + "/" + d +
                                           ":i")
            yarp.Network.connect("/homeostasis/" + d + "/max:o",
                                 self.drive_value_ports[i].getName())

        # Init matplotlib stuff
        min_val = min(self.homeo_mins)
        max_val = max(self.homeo_maxs)
        center_val = (max_val - min_val) / 2.
        self.y_min = -0.1
        self.y_max = 1.1
        self.fig = plt.figure()
        thismanager = get_current_fig_manager()
        thismanager.window.setGeometry(self.xpos, self.ypos, self.width,
                                       self.height)
        self.ax = plt.axes(xlim=(0, self.win_size),
                           ylim=(self.y_min, self.y_max))
        self.colors = plt.cm.viridis(linspace(0, 1, len(self.drives) + 2))
        self.value_lines = [
            self.ax.plot([], [],
                         linestyle='-',
                         color=self.colors[i + 1],
                         lw=2,
                         label=d) for i, d in enumerate(self.drives)
        ]
        self.homeo_min_lines = [
            self.ax.plot([], [],
                         linestyle='--',
                         color=self.colors[i + 1],
                         lw=1) for i, d in enumerate(self.drives)
        ]
        self.homeo_max_lines = [
            self.ax.plot([], [],
                         linestyle='--',
                         color=self.colors[i + 1],
                         lw=1) for i, d in enumerate(self.drives)
        ]
        plt.legend(change_drive_names(self.drives))
        self.ax.set_xlim(0 - self.win_size, 0)

        self.drive_values = [[0.] * self.win_size for _ in self.drives]

        self.behaviors_to_plot = defaultdict(list)

        self.t = 0

        return True
示例#12
0
                        help='0.1 for sound player, 0.7 for mic, 0.5 DEFAULT')
    args = parser.parse_args()
    min_num_frame_speech_recognizer = args.min_num_frame_speech_recognizer
    print('min_num_frame_speech_recognizer:', min_num_frame_speech_recognizer)
    speech_recognizer_threshold = args.speech_recognizer_threshold
    print('speech_recognizer_threshold:', speech_recognizer_threshold)

    yarp.Network.init()
    p = yarp.BufferedPortSound()
    p.setStrict()
    updater = DisplayUpdater()
    p.useCallback(updater)
    p.open("/reader:i")

    # open yarp port to notify the CR_pipeline when a new file is create
    file_output_port = yarp.Port()
    file_output_port.open("/file_writer:o")

    thread_VAD = Thread(target=blackBoxVAD)
    thread_VAD.start()

    thread_smoother = Thread(target=smoother)
    thread_smoother.start()

    # Test code-----------------#
    output_timer = 0
    muted = False
    while not SignalHandler.should_stop:
        time.sleep(1)
        if output_timer % output_delay == 0:
            print("audio:", audio_buffer.qsize())
示例#13
0
    def configure (self, rf):
        '''
        Configure the module internal variables and ports according to resource finder
        '''

        self._rf = rf

        #   Input
        #   Image port initialization
        self._port_in = yarp.BufferedPortImageRgb()
        self._port_in.open('/' +  self._module_name + '/RGBimage:i')

        #   Input buffer initialization
        self._input_buf_image = yarp.ImageRgb()
        self._input_buf_image.resize(self._input_img_width, self._input_img_height)
        self._input_buf_array = np.ones((self._input_img_height, self._input_img_width, 3), dtype = np.uint8)
        self._input_buf_image.setExternal(self._input_buf_array,
                                          self._input_buf_array.shape[1],
                                          self._input_buf_array.shape[0])

        print('Input image buffer configured')

        #   Output
        #   Output image port initialization
        self._port_out = yarp.Port()
        self._port_out.open('/' + self._module_name + '/RGBimage:o')

        #   Output blobs port initialization
        self._port_out_bboxes = yarp.Port()
        self._port_out_bboxes.open('/' + self._module_name + '/bboxes:o')

        #   Output detection info port initialization
        self._port_out_info = yarp.Port()
        self._port_out_info.open('/' + self._module_name + '/detectionInfo:o')

        #   Output buffer initialization
        self._output_buf_image = yarp.ImageRgb()
        self._output_buf_image.resize(self._input_img_width, self._input_img_height)
        self._output_buf_array = np.zeros((self._input_img_height, self._input_img_width, 3), dtype = np.uint8)
        self._output_buf_image.setExternal(self._output_buf_array,
                                           self._output_buf_array.shape[1],
                                           self._output_buf_array.shape[0])

        print('Output image buffer configured')

        #   Output mask port initialization
        self._port_out_mask = yarp.Port()
        self._port_out_mask.open('/' + self._module_name + '/maskImage:o')

        #   Output mask buffer initialization
        self._output_mask_buf_image = yarp.ImageMono()
        self._output_mask_buf_image.resize(self._input_img_width, self._input_img_height)
        print('Output mask buffer configured')


        #   RPC port initialization
        self._port_rpc = yarp.RpcServer()
        self._port_rpc.open('/' + self._module_name + '/rpc')
        self.attach_rpc_server(self._port_rpc)

        #   Inference model setup
        #   Configure some parameters for inference
        config = tabletop.YCBVideoConfigInference()
        config.POST_NMS_ROIS_INFERENCE        =300
        config.PRE_NMS_LIMIT                  =1000
        config.DETECTION_MAX_INSTANCES        =10
        config.DETECTION_MIN_CONFIDENCE       =0.75
        
        config.display()

        self._model = modellib.MaskRCNN(mode='inference',
                                  model_dir=MODEL_DIR,
                                  config=config)

        self._detection_results = None

        print('Inference model configured')

        #   Load class names
        dataset_root = os.path.join(ROOT_DIR, "datasets", "YCB_Video_Dataset")

        # Automatically discriminate the dataset according to the config file
        if isinstance(config, tabletop.TabletopConfigInference):
            # Load the validation dataset
            self._dataset = tabletop.TabletopDataset()
        elif isinstance(config, tabletop.YCBVideoConfigInference):
            self._dataset = tabletop.YCBVideoDataset()

        # No need to load the whole dataset, just the class names will be ok
        self._dataset.load_class_names(dataset_root)

        # Create a dict for assigning colors to each class
        random_class_colors = tabletop.random_colors(len(self._dataset.class_names))
        self._class_colors = {class_id: color for (color, class_id) in zip(random_class_colors, self._dataset.class_names)}

        #   Load model weights
        try:
            assert os.path.exists(self._model_weights_path)
        except AssertionError as error:
            print("Model weights path invalid: file does not exist")
            print(error)
            return False

        self._model.load_weights(self._model_weights_path, by_name=True)

        print("Model weights loaded")

        return True
示例#14
0
    labels_file = 'data/iCubWorld28/synsets.txt'
elif TEST_TYPE == 'ICUBWORLD28_7_objects':
    dataset = "iCubWorld28"
    model = "bvlc_reference_caffenet_iCubWorld28"
    trained = "caffenet_7_12120.caffemodel"
    mean_file = "iCubWorld28_mean.npy"
    labels_file = 'data/iCubWorld28/synsets.txt'
else:
    print "Error: Unknown TEST_TYPE!"

# Initialise YARP
yarp.Network.init()

## Ports
# Create a port and connect it to the iCub simulator virtual camera
input_port = yarp.Port()
input_port.close()
input_port.open("/python-image-port")
yarp.Network.connect("/cropped_image/out", "/python-image-port")

output_port = yarp.Port()
output_port.close()
output_port.open("/python-features-out")
yarp.Network.connect("/python-features-out", "/matlab/read_features")

# Create numpy array to receive the image and the YARP image wrapped around it
img_array = np.zeros((256, 256, 3), dtype=np.uint8)
yarp_image = yarp.ImageRgb()
yarp_image.resize(256, 256)
yarp_image.setExternal(img_array, img_array.shape[1], img_array.shape[0])
示例#15
0
)
print("YARP configuration:")
print(
    "**************************************************************************"
)
print("")
print("Initializing YARP network...")

# Init YARP Network
yarp.Network.init()

print("")
print("Opening data input port with name /wikipediaKnowledge/data:i ...")

# Open input data port
wikipediaKnowledge_inputPort = yarp.Port()
wikipediaKnowledge_inputPortName = '/wikipediaKnowledge/data:i'
wikipediaKnowledge_inputPort.open(wikipediaKnowledge_inputPortName)

# Create input data bottle
inputBottle = yarp.Bottle()

print("")
print("Opening data output port with name /wikipediaKnowledge/data:o ...")

# Open output data port
wikipediaKnowledge_outputPort = yarp.Port()
wikipediaKnowledge_outputPortName = '/wikipediaKnowledge/data:o'
wikipediaKnowledge_outputPort.open(wikipediaKnowledge_outputPortName)

# Create output data bottle
示例#16
0
optionsSN.put('device', 'remote_controlboard'
              )  # we add a name-value pair that indicates the YARP device
optionsSN.put('remote',
              robot + '/softNeck')  # we add info on to whom we will connect
optionsSN.put(
    'local', '/demo' + robot + '/softNeck'
)  # we add NSNinfo on how we will call ourselves on the YARP network
ddSN = yarp.PolyDriver(
    optionsSN)  # create a YARP multi-use driver with the given options
posSN = ddSN.viewIPositionControl(
)  # make a position controller object we call 'pos'
encSN = ddSN.viewIEncoders()  # encoders
axesSN = posSN.getAxes()  # retrieve number of joints

#create a new input port and open it
in_port = yarp.Port()
in_port.open("/softimu/in")
#connect up the output port to our input port
yarp.Network.connect("/softimu/out", "/softimu/in")

# Configure acceleration
for joint in range(0, axesTR):
    posTR.setRefAcceleration(joint, 10)  # manual por rpc --> set accs (20 20)

# Configure speed
sp = yarp.DVector(axesTR, 10)
posTR.setRefSpeeds(sp)

# writing CSV
start = yarp.now()
imu = yarp.Vector(2)
示例#17
0
print("Starting system...")

print("")
print("Loading GPS Tracker engine...")

print("")
print("Initializing YARP network...")

# Init YARP Network
yarp.Network.init()

print("")
print("Opening data input port with name /gpsTracker/data:i ...")

# Open input data port
gpstracker_inputPort = yarp.Port()
gpstracker_inputPortName = '/gpsTracker/data:i'
gpstracker_inputPort.open(gpstracker_inputPortName)

# Create input data bottle
inputBottle = yarp.Bottle()

print("")
print("Opening data output port with name /gpsTracker/data:o ...")

# Open output data port
gpstracker_outputPort = yarp.Port()
gpstracker_outputPortName = '/gpsTracker/data:o'
gpstracker_outputPort.open(gpstracker_outputPortName)

# Create output data bottle
示例#18
0
    def configure(self, rf):
        """
         Configure interactionSAMModel yarp module

        Args:
            rf: Yarp RF context input
            argv_1 : String containing data path.
            argv_2 : String containing model path.
            argv_3 : String containing config file path (from `config_path` parameter of samSupervisor config file).
            argv_4 : String driver name corresponding to a valid driver present in SAM_Drivers folder.
            argv_5 : String `'True'` or `'False'` to switch formatting of logging depending on whether interaction is logging to a separate window or to the stdout of another process.
        Returns:
            Boolean indicating success or no success in initialising the yarp module
        """
        stringCommand = 'from SAM.SAM_Drivers import ' + sys.argv[
            4] + ' as Driver'
        exec stringCommand

        self.mm = [Driver()]
        self.dataPath = sys.argv[1]
        self.modelPath = sys.argv[2]
        self.driverName = sys.argv[4]
        self.configPath = sys.argv[3]
        self.windowedMode = sys.argv[5] == 'True'
        self.modelRoot = self.dataPath.split('/')[-1]

        file_i = 0
        loggerFName = join(self.dataPath,
                           self.baseLogFileName + '_' + str(file_i) + '.log')

        # check if file exists
        while os.path.isfile(loggerFName) and os.path.getsize(loggerFName) > 0:
            loggerFName = join(
                self.dataPath,
                self.baseLogFileName + '_' + str(file_i) + '.log')
            file_i += 1

        if self.windowedMode:
            logFormatter = logging.Formatter("[%(levelname)s]  %(message)s")
        else:
            logFormatter = logging.Formatter(
                "\033[31m%(asctime)s [%(name)-33s] [%(levelname)8s]  %(message)s\033[0m"
            )

        rootLogger = logging.getLogger('interaction ' + self.driverName)
        rootLogger.setLevel(logging.DEBUG)

        fileHandler = logging.FileHandler(loggerFName)
        fileHandler.setFormatter(logFormatter)
        rootLogger.addHandler(fileHandler)

        consoleHandler = logging.StreamHandler()
        consoleHandler.setFormatter(logFormatter)
        rootLogger.addHandler(consoleHandler)
        logging.root = rootLogger

        off = 17
        logging.info('Arguments: ' + str(sys.argv))
        logging.info(stringCommand)
        logging.info('Using log' + str(loggerFName))
        logging.info('-------------------')
        logging.info('Interaction Settings:')
        logging.info('Data Path: '.ljust(off) + str(self.dataPath))
        logging.info('Model Path: '.ljust(off) + str(self.modelPath))
        logging.info('Config Path: '.ljust(off) + str(self.configPath))
        logging.info('Driver: '.ljust(off) + str(self.driverName))
        logging.info('-------------------')
        logging.info('Configuring Interaction...')
        logging.info('')

        # parse settings from config file
        parser2 = SafeConfigParser()
        parser2.read(self.configPath)
        proposedBuffer = 5
        if self.modelRoot in parser2.sections():
            self.portNameList = parser2.items(self.dataPath.split('/')[-1])
            logging.info(str(self.portNameList))
            self.portsList = []
            for j in range(len(self.portNameList)):
                if self.portNameList[j][0] == 'rpcbase':
                    self.portsList.append(yarp.Port())
                    self.portsList[j].open(self.portNameList[j][1] + ":i")
                    self.svPort = j
                    self.attach(self.portsList[j])
                elif self.portNameList[j][0] == 'visualise':
                    if self.portNameList[j][1] == "True":
                        self.drawLatent = True
                elif self.portNameList[j][0] == 'callsign':
                    # should check for repeated call signs by getting list from samSupervisor
                    self.callSignList = self.portNameList[j][1].split(',')
                elif self.portNameList[j][0] == 'latentmodelport':
                    self.portsList.append(yarp.BufferedPortImageRgb())
                    self.latentPort = j
                    ports = self.portNameList[j][1].split(',')
                    self.portsList[j].open(ports[0])
                    yarp.Network.connect(ports[0], ports[1])
                elif self.portNameList[j][0] == 'collectionmethod':
                    self.collectionMethod = self.portNameList[j][1].split(
                        ' ')[0]
                    try:
                        proposedBuffer = int(
                            self.portNameList[j][1].split(' ')[1])
                    except ValueError:
                        logging.error(
                            'collectionMethod bufferSize is not an integer')
                        logging.error(
                            'Should be e.g: collectionMethod = buffered 3')
                        return False

                    if self.collectionMethod not in [
                            'buffered', 'continuous', 'future_buffered'
                    ]:
                        logging.error(
                            'collectionMethod should be set to buffered / continuous / future_buffered'
                        )
                        return False
                elif self.portNameList[j][0] == 'recency':
                    try:
                        self.recency = int(self.portNameList[j][1])
                    except ValueError:
                        logging.error('Recency value for ' +
                                      str(self.driverName) +
                                      ' is not an integer')
                        self.recency = 5
                else:
                    parts = self.portNameList[j][1].split(' ')
                    logging.info(parts)

                    if parts[1].lower() == 'imagergb':
                        self.portsList.append(yarp.BufferedPortImageRgb())
                        # self.portsList[j].open(parts[0])

                    elif parts[1].lower() == 'imagemono':
                        self.portsList.append(yarp.BufferedPortImageMono())
                        # self.portsList[j].open(parts[0])

                    elif parts[1].lower() == 'bottle':
                        self.portsList.append(yarp.BufferedPortBottle())
                        # self.portsList[j].open(parts[0])

                    else:
                        logging.error('Data type ' + str(parts[1]) + ' for ' +
                                      str(self.portNameList[j][0]) +
                                      ' unsupported')
                        return False
                    # mrd models with label/instance training will always have:
                    # 1 an input data line which is used when a label is requested
                    # 2 an output data line which is used when a generated instance is required
                    if parts[0][-1] == 'i':
                        self.labelPort = j
                        self.labelPortName = parts[0]
                    elif parts[0][-1] == 'o':
                        self.instancePort = j
                        self.instancePortName = parts[0]

            if self.collectionMethod == 'continuous':
                self.portsList.append(yarp.BufferedPortBottle())
                self.eventPort = len(self.portsList) - 1
                self.eventPortName = '/'.join(
                    self.labelPortName.split('/')[:3]) + '/event'
                self.portsList[self.eventPort].open(self.eventPortName)
                self.classTimestamps = []
                if self.recency is None:
                    logging.warning('No recency value specified for ' +
                                    self.driverName)
                    logging.warning('Setting value to default of 5 seconds')
                    self.recency = 5

            if self.svPort is None or self.labelPort is None or self.instancePort is None:
                logging.warning(
                    'Config file properties incorrect. Should look like this:')
                logging.warning('[Actions]')
                logging.warning('dataIn = /sam/actions/actionData:i Bottle')
                logging.warning('dataOut = /sam/actions/actionData:o Bottle')
                logging.warning('rpcBase = /sam/actions/rpc')
                logging.warning(
                    'callSign = ask_action_label, ask_action_instance')
                logging.warning('collectionMethod = buffered 3')

            # self.mm[0].configInteraction(self)
            self.inputType = self.portNameList[self.labelPort][1].split(
                ' ')[1].lower()
            self.outputType = self.portNameList[self.labelPort][1].split(
                ' ')[1].lower()
            self.dataList = []
            self.classificationList = []
            self.probClassList = []
            self.classTimestamps = []
            yarp.Network.init()

            self.mm = initialiseModels(
                [self.dataPath, self.modelPath, self.driverName],
                'update',
                'interaction',
                drawLatent=self.drawLatent)
            self.modelLoaded = True
            if self.drawLatent:
                self.latentPlots = dict()
                self.latentPlots['ax'], self.latentPlots['dims'] = self.mm[
                    0].SAMObject.visualise(plot_scales=True)
                self.sendLatent(self.latentPlots['ax'])

            if self.mm[0].model_mode != 'temporal':
                self.bufferSize = proposedBuffer
            elif self.mm[0].model_mode == 'temporal':
                self.bufferSize = self.mm[0].temporalModelWindowSize

            self.portsList[self.labelPort].open(self.labelPortName)
            self.portsList[self.instancePort].open(self.instancePortName)
            # self.test()

            return True
        else:
            logging.error('Section ' + str(self.modelRoot) + ' not found in ' +
                          str(self.configPath))
            return False
    def configure(self, rf):
        self.lock = threading.Lock()

        self.config = yarp.Property()
        self.config.fromConfigFile(
            '/code/icub_perceptual_optimisation/yarp/config.ini')
        self.width = self.config.findGroup('CAMERA').find(
            'yarp_width').asInt32()
        self.height = self.config.findGroup('CAMERA').find(
            'yarp_height').asInt32()
        self.target_width = self.config.findGroup('CAMERA').find(
            'target_width').asInt32()
        self.target_height = self.config.findGroup('CAMERA').find(
            'target_height').asInt32()
        self.max_dataset_size = self.config.findGroup('GENERAL').find(
            'max_dataset_size').asInt32()

        if self.config.findGroup('GENERAL').find('show_images').asBool():
            import matplotlib
            matplotlib.use('TKAgg')
            self.ax_left = plt.subplot(1, 2, 1)
            self.ax_right = plt.subplot(1, 2, 2)

        # Create a port and connect it to the iCub simulator virtual camera
        self.input_port_cam = yarp.Port()
        self.input_port_cam.open("/dataCollector/camera_left")
        yarp.Network.connect("/icubSim/cam/left", "/dataCollector/camera_left")

        self.input_port_skin_left_hand = yarp.Port()
        self.input_port_skin_left_hand.open(
            "/dataCollector/skin/left_hand_comp")  #
        yarp.Network.connect("/icubSim/skin/left_hand_comp",
                             "/dataCollector/skin/left_hand_comp")

        self.input_port_skin_left_forearm = yarp.Port()
        self.input_port_skin_left_forearm.open(
            "/dataCollector/skin/left_forearm_comp")  #
        yarp.Network.connect("/icubSim/skin/left_forearm_comp",
                             "/dataCollector/skin/left_forearm_comp")

        self.input_port_command = yarp.Port()
        self.input_port_command.open("/dataCollector/command")  #
        yarp.Network.connect("/client_babbling/left_arm/command",
                             "/dataCollector/command")

        #self.input_port_skin.read(False);    #  clean the buffer
        #self.input_port_skin.read(False);    #  clean the buffer

        # prepare image
        self.yarp_img_in = yarp.ImageRgb()
        self.yarp_img_in.resize(self.width, self.height)
        self.img_array = np.ones((self.height, self.width, 3), dtype=np.uint8)
        # yarp image will be available in self.img_array
        self.yarp_img_in.setExternal(self.img_array.data, self.width,
                                     self.height)

        # prepare motor driver
        self.head_motors = 'head'
        self.head_motorprops = yarp.Property()
        self.head_motorprops.put("device", "remote_controlboard")
        self.head_motorprops.put("local",
                                 "/client_datacollector/" + self.head_motors)
        self.head_motorprops.put("remote", "/icubSim/" + self.head_motors)

        self.left_motors = 'left_arm'
        self.left_motorprops = yarp.Property()
        self.left_motorprops.put("device", "remote_controlboard")
        self.left_motorprops.put("local",
                                 "/client_datacollector/" + self.left_motors)
        self.left_motorprops.put("remote", "/icubSim/" + self.left_motors)

        #self.right_motors = 'right_arm'
        #self.right_motorprops = yarp.Property()
        #self.right_motorprops.put("device", "remote_controlboard")
        #self.right_motorprops.put("local", "/client_datacollector/" + self.right_motors)
        #self.right_motorprops.put("remote", "/icubSim/" + self.right_motors)

        # create remote driver
        self.head_driver = yarp.PolyDriver(self.head_motorprops)
        self.head_iEnc = self.head_driver.viewIEncoders()
        self.head_iPos = self.head_driver.viewIPositionControl()

        self.left_armDriver = yarp.PolyDriver(self.left_motorprops)
        self.left_iEnc = self.left_armDriver.viewIEncoders()
        self.left_iPos = self.left_armDriver.viewIPositionControl()

        #self.right_armDriver = yarp.PolyDriver(self.right_motorprops)
        #self.right_iEnc = self.right_armDriver.viewIEncoders()
        #self.right_iPos = self.right_armDriver.viewIPositionControl()

        #  number of joints
        self.num_joints = self.left_iPos.getAxes()
        self.head_num_joints = self.head_iPos.getAxes()

        moduleName = rf.check("name",
                              yarp.Value("DataCollectorModule")).asString()
        self.setName(moduleName)
        print('module name: ', moduleName)

        self.skin_left_hand_input = yarp.Bottle()
        self.skin_left_forearm_input = yarp.Bottle()
        #self.left_command_input = yarp.Bottle()
        self.left_command_input = yarp.Vector(self.num_joints)

        self.dataset_timestamps = []
        self.dataset_images = []
        self.dataset_skin_values = []
        self.dataset_joint_encoders = []
        self.dataset_motor_commands = []

        print('starting now')
示例#20
0
def main():
    # The state machine has the following states:
    # [VOID, INIT, FIND, SHOW, KEY, ICUB, WHICH, QUIT]
    #
    # VOID: Empty state, to use for test
    # INIT: It is called only once for the initialisation
    # FIND: use the hardware face and landmark detection libraries
    # SHOW: Print the image on screen using OpenCV
    # KEY: Check which key is pressed
    # ICUB: Pressing the (h) button the robot look in front of itself
    # WHICH: Pressing the (w) button is like asking to the robot to look to a object on the table
    # QUIT: Pressing (q) unsubscribe and close the script

    #Configuration Variables, adjust to taste
    #ICUB_IP = "192.168.0.100"
    #ICUB_PORT = 9559
    RECORD_VIDEO = True #If True record a video from the ICUB camera
    USE_FESTIVAL_TTS = True #To use the Festival Text To Speach
    #If you want to use Acapela TTS you have to fill the
    #following variable with the correct values
    #USE_ACAPELA_TTS = False
    #ACCOUNT_LOGIN = '******'
    #APPLICATION_LOGIN = '******'
    #APPLICATION_PASSWORD = '******'
    #SERVICE_URL = 'http://vaas.acapela-group.com/Services/Synthesizer'
    #The initial state
    STATE = "VOID" 

    while(True):

        #Empty STATE, to be used for test
        if(STATE == "VOID"):            
            STATE = "INIT"

        # The zero state is an init phase
        # In this state all the proxies are
        # created and tICUB subscribe to the services
        #
        elif(STATE=="INIT"):
            #Init some generic variables
            #This counter allows continuing the program flow
            #without calling a sleep
            which_counter = 0 #Counter increased when WHICH is called
            which_counter_limit = 30 #Limit for the which counter

            #Init YARP
            print("[STATE " + str(STATE) + "] " + "YARP init" + "\n")
            yarp.Network.init()
            
            #Camera connection
            try:
                print("[STATE " + str(STATE) + "] " + "Waiting for '/icubSim/cam/left' ..." + "\n")
                cam_w = 320 #640
                cam_h = 240 #480
                input_port = yarp.Port()
                input_port.open("/pyera-image-port")
                yarp.Network.connect("/icubSim/cam/left", "/pyera-image-port")
                img_array = np.zeros((cam_h, cam_w, 3), dtype=np.uint8)
                yarp_image = yarp.ImageRgb()
                yarp_image.resize(cam_w, cam_h)
                yarp_image.setExternal(img_array, img_array.shape[1], img_array.shape[0])
            except BaseException, err:
                print("[ERROR] connect To Camera catching error " + str(err))
                return

            #Head connection and Reset
            print("[STATE " + str(STATE) + "] " + "Waiting for '/icubSim/head/rpc:i' ..." + "\n")
            rpc_client = yarp.RpcClient()
            rpc_client.addOutput("/icubSim/head/rpc:i")
            print("[STATE " + str(STATE) + "] " + "Reset the head position..." + "\n")
            set_icub_head_pose(rpc_client, roll=0, pitch=0, yaw=0)

            #Initialise the OpenCV video recorder
            if(RECORD_VIDEO == True):
                print("[STATE " + str(STATE) + "] " + "Starting the video recorder..." + "\n")
                fourcc = cv2.cv.CV_FOURCC(*'XVID')
                video_out = cv2.VideoWriter("./output.avi", fourcc, 20.0, (cam_w,cam_h))

            #Init dlib Face detector
            #my_face_detector = dlib.get_frontal_face_detector()

            #Init the Deepgaze face detector
            my_cascade = haarCascade("./haarcascade_frontalface_alt.xml", "./haarcascade_profileface.xml")

            #Talking            
            if(USE_FESTIVAL_TTS == True): 
                print("[STATE " + str(STATE) + "] " + "Trying the TTS Festival..." + "\n")
                say_something("Hello World, I'm ready!")

            #if(USE_ACAPELA_TTS == True):                
                #print("[ACAPELA] Downloading the mp3 file...")
                #tts_acapela = acapela.Acapela(account_login=ACCOUNT_LOGIN, application_login=APPLICATION_LOGIN, application_password=APPLICATION_PASSWORD, 
                                              #service_url=SERVICE_URL, quality='22k', directory='/tmp/')    
                #tts_acapela.prepare(text="Hello world, I'm ready!", lang='US', gender='M', intonation='NORMAL')
                #output_filename = tts_acapela.run()
                #subprocess.Popen(["play","-q","/tmp/" + str(output_filename)])
                #print "[ACAPELA] Recorded TTS to %s" % output_filename           

            #Swithc to STATE > 1
            print("[STATE " + str(STATE) + "] " + "Switching to next state" + "\n")
            time.sleep(2)
            STATE = "FIND"

        # Get data from landmark detection and 
        # face detection.
        #
        elif(STATE=="FIND"):

            #Get Data for Face Detection
            #if(face_data and isinstance(face_data, list) and len(face_data) > 0):
                #print("[ICUB] Face detected!")
            #else:
                 #print("No face detected...")
                 #is_face_detected = False
            input_port.read(yarp_image)

            '''
            faces_array = my_face_detector(img_array, 1)
            print("Total Faces: " + str(len(faces_array)))
            for i, pos in enumerate(faces_array):

                face_x1 = pos.left()
                face_y1 = pos.top()
                face_x2 = pos.right()
                face_y2 = pos.bottom()
                text_x1 = face_x1
                text_y1 = face_y1 - 3

                cv2.putText(img_array, "FACE " + str(i+1), (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
                cv2.rectangle(img_array, 
                         (face_x1, face_y1), 
                         (face_x2, face_y2), 
                         (0, 255, 0), 
                          2)
            '''
            gray = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
            # Return code: 1=Frontal, 2=FrontRotLeft, 3=FronRotRight,
            #              4=ProfileLeft, 5=ProfileRight.
            my_cascade.findFace(gray, runFrontal=True, runFrontalRotated=False, runLeft=True, runRight=True, 
                                frontalScaleFactor=1.18, rotatedFrontalScaleFactor=1.18, leftScaleFactor=1.18, 
                                rightScaleFactor=1.18, minSizeX=70, minSizeY=70, rotationAngleCCW=30, 
                                rotationAngleCW=-30, lastFaceType=my_cascade.face_type)   

            face_x1 = my_cascade.face_x 
            face_y1 = my_cascade.face_y 
            face_x2 = my_cascade.face_x + my_cascade.face_w 
            face_y2 = my_cascade.face_y + my_cascade.face_h
            text_x1 = face_x1
            text_y1 = face_y1 - 3
            if(my_cascade.face_type == 1 or my_cascade.face_type == 2 or my_cascade.face_type == 3): cv2.putText(img_array, "FRONTAL", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
            elif(my_cascade.face_type == 4): cv2.putText(img_array, "LEFT", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
            elif(my_cascade.face_type == 5): cv2.putText(img_array, "RIGH", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
            cv2.rectangle(img_array, 
                             (face_x1, face_y1), 
                             (face_x2, face_y2), 
                             (0, 255, 0),
                              2)

            is_face_detected = False
            STATE = "SHOW"
示例#21
0
def tactile_prcptn_yarp():
    # Open and connect YARP-Port to read right upper arm skin sensor data
    input_port_skin_rarm = yarp.Port()
    if not input_port_skin_rarm.open("/" + CLIENT_PREFIX + "/skin_read/rarm"):
        print("[ERROR] Could not open skin arm port")
    if not yarp.Network.connect("/" + ROBOT_PREFIX + "/skin/right_arm_comp",
                                input_port_skin_rarm.getName()):
        print("[ERROR] Could not connect skin arm port!")
    # Open and connect YARP-Port to read right forearm skin sensor data
    input_port_skin_rforearm = yarp.Port()
    if not input_port_skin_rforearm.open("/" + CLIENT_PREFIX +
                                         "/skin_read/rforearm"):
        print("[ERROR] Could not open skin forearm port!")
    if not yarp.Network.connect(
            "/" + ROBOT_PREFIX + "/skin/right_forearm_comp",
            input_port_skin_rforearm.getName()):
        print("[ERROR] Could not connect skin forearm port!")
    # Open and connect YARP-Port to read right hand skin sensor data
    input_port_skin_rhand = yarp.Port()
    if not input_port_skin_rhand.open("/" + CLIENT_PREFIX +
                                      "/skin_read/rhand"):
        print("[ERROR] Could not open skin hand port!")
    if not yarp.Network.connect("/" + ROBOT_PREFIX + "/skin/right_hand_comp",
                                input_port_skin_rhand.getName()):
        print("[ERROR] Could not connect skin hand port!")
    ######################################################################
    ####################### Read skin sensor data ########################
    ######################################################################
    for i in range(10):
        tactile_rarm = yarp.Vector(768)
        while (not input_port_skin_rarm.read(tactile_rarm)):
            yarp.delay(0.001)
        tactile_rforearm = yarp.Vector(384)
        while (not input_port_skin_rforearm.read(tactile_rforearm)):
            yarp.delay(0.001)

        tactile_rhand = yarp.Vector(192)
        while (not input_port_skin_rhand.read(tactile_rhand)):
            yarp.delay(0.001)

        data_rarm = []
        data_rforearm = []
        data_rhand = []
        for j in range(len(skin_idx_r_arm)):
            if skin_idx_r_arm[j] > 0:
                data_rarm.append(tactile_rarm.get(j))
        for j in range(len(skin_idx_r_forearm)):
            if skin_idx_r_forearm[j] > 0:
                data_rforearm.append(tactile_rforearm.get(j))
        for j in range(len(skin_idx_r_hand)):
            if skin_idx_r_hand[j] > 0:
                data_rhand.append(tactile_rhand.get(j))
        print("Data arm:")
        print(data_rarm)
        print("Data forearm:")
        print(data_rforearm)
        print("Data hand:")
        print(data_rhand)
        time.sleep(2.0)
    ######################################################################
    ######################## Closing the program: ########################
    #### Delete objects/models and close ports, network, motor cotrol ####
    print('----- Close opened ports -----')
    # disconnect the ports
    if not yarp.Network.disconnect("/" + ROBOT_PREFIX + "/skin/right_arm_comp",
                                   input_port_skin_rarm.getName()):
        print("[ERROR] Could not disconnect skin arm port!")
    if not yarp.Network.disconnect(
            "/" + ROBOT_PREFIX + "/skin/right_forearm_comp",
            input_port_skin_rforearm.getName()):
        print("[ERROR] Could not disconnect skin forearm port!")
    if not yarp.Network.disconnect(
            "/" + ROBOT_PREFIX + "/skin/right_hand_comp",
            input_port_skin_rhand.getName()):
        print("[ERROR] Could not disconnect skin hand port!")
    # close the ports
    input_port_skin_rarm.close()
    input_port_skin_rforearm.close()
    input_port_skin_rhand.close()
    yarp.Network.fini()
示例#22
0
def main():

    # module parameters
    expID = 39

    maxIterations = [    77,    14,   134,    66,    34,    81,    52,    31,     48,    66]

    proximalJointStartPos = 40
    distalJointStartPos = 0
    joint1StartPos = 18
    #                    0               1   2   3   4   5   6   7   8   9  10  11  12                    13                  14  15
    startingPosEncs = [-44, joint1StartPos, -4, 39,-14,  2,  2, 18, 10,  0,163,  0,  0,proximalJointStartPos,distalJointStartPos,  0]   
    #                        0   1   2   3   4   5
    headStartingPosEncs = [-29,  0, 18,  0,  0,  0]
    actionEnabled = True

    rolloutsNumFirst = 30
    rolloutsNumStd = 10

    finger = 1
    proximalJoint = 13
    distalJoint = 14
    proximalJointEnc = 6
    distalJointEnc_1 = 7
    distalJointEnc_2 = 8

    resetProbability = 0.02

    actionDuration = 0.25
    pauseDuration = 0.0

    maxFbAngle = math.pi
    minFbAngle = -math.pi
    maxFbAngleDifference = math.pi/3.0
    fbAngleRange = maxFbAngle - minFbAngle

    normalizedMaxVoltageY = 1.0
    maxVoltageProxJointY = 250.0
    maxVoltageDistJointY = 800.0
    slopeAtMaxVoltageY = 1.0

    waitTimeForFingersRepositioning = 7.0

    dataDumperPortName = "/gpc/log:i"
    iCubIconfigFileName = "iCubInterface.txt"
    inputFilePath = "./"
    initInputFileName = "controller_init_roll_fing.txt"
    standardInputFileName = "controller_input.txt"
    outputFilePath = "./"
    outputFileName = "controller_output.txt"
    dataPath = "./data/experiments/"
  
    jointsToActuate = [proximalJoint,distalJoint]
    
    fileNameIterID = "iterationID.txt"
    fileNameExperimentID = "experimentID.txt"
    fileNameExpParams = "parameters.txt"

    isNewExperiment = False
    if len(sys.argv) > 1:
        if sys.argv[1] == 'new':
            isNewExperiment = True 
    
    expID = readValueFromFile(fileNameExperimentID)        
    if isNewExperiment:
        expID = expID + 1
        writeIntoFile(fileNameExperimentID,str(expID))

    # create output folder name
    experimentFolderName = dataPath + "exp_" + str(expID) + "/" # could be changed adding more information about the experiment
    print expID,isNewExperiment
    if os.path.exists(experimentFolderName):
        # get iteration ID
        iterID = readValueFromFile(fileNameIterID)        
        writeIntoFile(fileNameIterID,str(iterID+1))    
        inputFileFullName = inputFilePath + standardInputFileName
        rolloutsNum = rolloutsNumStd
    else:
        # create directory, create an experiment descrition file and reset iteration ID
        os.mkdir(experimentFolderName)
        descriptionData = ""
        descriptionData = descriptionData + "proximalJointMaxVoltage " + str(maxVoltageProxJointY) + "\n"
        descriptionData = descriptionData + "distalJointMaxVoltage " + str(maxVoltageDistJointY) + "\n"
        descriptionData = descriptionData + "slopeAtMaxVoltage " + str(slopeAtMaxVoltageY) + "\n"
        descriptionData = descriptionData + "actionDuration " + str(actionDuration) + "\n"
        descriptionData = descriptionData + "pauseDuration " + str(pauseDuration) + "\n"
        descriptionData = descriptionData + "finger " + str(finger) + "\n"
        descriptionData = descriptionData + "jointActuated " + str(proximalJoint) + " " + str(distalJoint) + "\n"
        descriptionData = descriptionData + "jointStartingPositions " + str(proximalJointStartPos) + " " + str(distalJointStartPos) + "\n"
        descriptionData = descriptionData + "resetProbabilty " + str(resetProbability) + "\n"
        descriptionData = descriptionData + "additionaNotes " + "" + "\n"
        writeIntoFile(experimentFolderName + fileNameExpParams,descriptionData)
        iterID = 0
        writeIntoFile(fileNameIterID,"1")
        inputFileFullName = inputFilePath + initInputFileName
        rolloutsNum = rolloutsNumFirst

    outputInputFileSuffix = str(expID) + "_" + str(iterID);
    backupOutputFileFullName = experimentFolderName + "contr_out_" + outputInputFileSuffix + ".txt"
    backupInputFileFullName = experimentFolderName + "contr_in_" + outputInputFileSuffix + ".txt"
    outputFileFullName = outputFilePath + outputFileName

    # calculate voltageX-voltageY mapping parameters (voltageY = k*(voltageX^(1/3)))
    k = pow(3*slopeAtMaxVoltageY*(pow(normalizedMaxVoltageY,2)),(1/3.0))

    maxVoltageX = pow(normalizedMaxVoltageY/k,3)

    # load gaussian process controller
    gp = gpc.GPController(inputFileFullName)
    gp.load_controller()

    # load iCub interface
    iCubI = iCubInterface.ICubInterface(dataDumperPortName,iCubIconfigFileName)
    iCubI.loadInterfaces()

    # cameras port
    cameraPort = yarp.Port()
    cameraPortName = "/gpc/leftEye"
    cameraPort.open(cameraPortName)
    yarp.Network.connect("/icub/cam/left",cameraPortName)

    # image settings
    width = 640
    height = 480 
    # Create numpy array to receive the image and the YARP image wrapped around it
    img_array = np.zeros((height, width, 3), dtype=np.uint8)
    yarp_image = yarp.ImageRgb()
    yarp_image.resize(width, height)
    yarp_image.setExternal(img_array, img_array.shape[1], img_array.shape[0])

    # set start position
    if actionEnabled:
        iCubI.setArmPosition(startingPosEncs)
        iCubI.setHeadPosition(headStartingPosEncs)
        #iCubI.setRefVelocity(jointsToActuate,100)

    # wait for the user
    raw_input("- press enter to start the controller -")

    fd = open(outputFileFullName,"w")
    fd.write("nrollouts: ")
    fd.write(str(rolloutsNum))
    fd.write("\n")
    fd.close()
    
    # initialize velocity mode
    if actionEnabled:
        iCubI.setOpenLoopMode(jointsToActuate)

    rolloutsCounter = 0
    while rolloutsCounter < rolloutsNum:

        print "starting iteration n. ",rolloutsCounter + 1
        fd = open(outputFileFullName,"a")
        fd.write("# HEADER ")
        fd.write(str(rolloutsCounter + 1))
        fd.write("\n")

        iterCounter = 0
        exit = False
        voltage = [0,0]
        oldVoltage = [0,0]
        realVoltage = [0,0]
        readImage(cameraPort,yarp_image)
#        currentFbAngle = getFeedbackAngle(yarp_image,img_array)
        # main loop
        while iterCounter < maxIterations[rolloutsCounter%10] and not exit:

            # read tactile data
            fullTactileData = iCubI.readTactileData()
            tactileData = []              
            for j in range(12):
                tactileData.append(fullTactileData.get(12*finger+j).asDouble())
            #print np.sum(tactileData[0:12])
            # read encoders data from port
            fullEncodersData = iCubI.readEncodersDataFromPort()
            encodersData = []
            encodersData.append(fullEncodersData.get(proximalJointEnc).asDouble())
            encodersData.append(fullEncodersData.get(distalJointEnc_1).asDouble())
            encodersData.append(fullEncodersData.get(distalJointEnc_2).asDouble())

            state = [tactileData,encodersData,voltage]

            # store image to be processed while action is applied
            readImage(cameraPort,yarp_image)

            # choose action
            action = gp.get_control(state)


            # update and cut voltage
            oldVoltage[0] = voltage[0]
            oldVoltage[1] = voltage[1]
            voltage[0] = action[0] #voltage[0] + action[0];
            voltage[1] = action[1] #voltage[1] + action[1];
            #if abs(voltage[0]) > maxVoltageX:
            #    voltage[0] = maxVoltageX*np.sign(voltage[0])
            #if abs(voltage[1]) > maxVoltageX:
            #    voltage[1] = maxVoltageX*np.sign(voltage[1])

            # calculate real applied voltage
            realVoltage[0] = maxVoltageProxJointY*k*pow(abs(voltage[0]),1/3.0)*np.sign(voltage[0])
            realVoltage[1] = maxVoltageDistJointY*k*pow(abs(voltage[1]),1/3.0)*np.sign(voltage[1])

            # voltage safety check (it should never happen!)
            if abs(realVoltage[0]) > maxVoltageProxJointY:
                realVoltage[0] = maxVoltageProxJointY*np.sign(realVoltage[0])
                print 'warning, voltage out of bounds!'
            if abs(realVoltage[1]) > maxVoltageDistJointY:
                realVoltage[1] = maxVoltageDistJointY*np.sign(realVoltage[1])
                print 'warning, voltage out of bounds!'


            # apply action
            if actionEnabled:
                iCubI.openLoopCommand(proximalJoint,realVoltage[0])        
                iCubI.openLoopCommand(distalJoint,realVoltage[1])

            # get feedback angle
#            previousFbAngle = currentFbAngle
            beforeTS = time.time()
           # if rolloutsCounter == 0 and iterCounter < 50:
           # matplotlib.image.imsave('images/test_'+ str(rolloutsCounter) + '_' + str(iterCounter) +'.tiff', img_array, format='tiff')
#            currentFbAngle = getFeedbackAngle(yarp_image,img_array)
#            fbAngleDifference = calculateFeedbackAngleDifference(previousFbAngle,currentFbAngle,fbAngleRange)
#            if abs(fbAngleDifference) > maxFbAngleDifference:
#                currentFbAngle = previousFbAngle
#                fbAngleDifference = 0.0
#            print fbAngleDifference
            afterTS = time.time()
            timeToSleep = max(actionDuration-(afterTS-beforeTS),0)
            time.sleep(timeToSleep)


            #print "curr ",previousFbAngle*180/3.1415,"diff ",fbAngleDifference*180/3.1415,afterTS - beforeTS,timeToSleep


            # wait for stabilization
            time.sleep(pauseDuration)
 
            # log data
            iCubI.logData(tactileData + encodersData + oldVoltage + voltage)#[action[0],action[1]])
            logArray(tactileData,fd)
            logArray(encodersData,fd)
            logArray(oldVoltage,fd)
            logArray(action,fd)
            fbAngleDifference = 0; # TODO TO REMOVE
            logArray([fbAngleDifference],fd)
            fd.write("\n")

            #print 'prev ',previousFbAngle*100/3.1415,'curr ',currentFbAngle*100/3.1415,'diff ',fbAngleDifference*100/3.1415


            iterCounter = iterCounter + 1
            exit = False #exitModule(resetProbability)

        fd.close()

        if actionEnabled:
            print "finger ripositioning..."
            # finger repositioning
            iCubI.setPositionMode(jointsToActuate)
            iCubI.setJointPosition(1,joint1StartPos + 12)
            time.sleep(1)
            iCubI.setJointPosition(proximalJoint,proximalJointStartPos)
            iCubI.setJointPosition(distalJoint,distalJointStartPos)
            time.sleep(3)
            iCubI.setJointPosition(1,joint1StartPos)
            time.sleep(2)
            iCubI.setOpenLoopMode(jointsToActuate)


#            iCubI.setPositionMode(jointsToActuate)
#            iCubI.setJointPosition(proximalJoint,0.0)
#            iCubI.setJointPosition(distalJoint,0.0)
#            time.sleep(waitTimeForFingersRepositioning)
#            iCubI.setJointPosition(proximalJoint,proximalJointStartPos)
#            iCubI.setJointPosition(distalJoint,distalJointStartPos)
#            time.sleep(waitTimeForFingersRepositioning)
#            iCubI.setOpenLoopMode(jointsToActuate)
            print "...done"


        rolloutsCounter = rolloutsCounter + 1
            
    os.system("cp " + inputFileFullName + " " + backupInputFileFullName)
    os.system("cp " + outputFileFullName + " " + backupOutputFileFullName)

    # copy input and output file
    # restore position mode and close iCubInterface
    if actionEnabled:
        iCubI.setPositionMode(jointsToActuate)
    cameraPort.close()
    iCubI.closeInterface()
示例#23
0
	def __init__(self):
		yarp.RFModule.__init__(self)
		self.handlerPort = yarp.Port()
示例#24
0
def visual_input_yarp():
    ######################################################################
    ######################### Init YARP network ##########################

    print('----- Init network -----')
    # network initialization and check
    yarp.Network.init()
    if not yarp.Network.checkNetwork():
        print('[ERROR] Please try running yarp server')

    print('----- Open ports -----')
    # Initialization of all needed ports
    # Port for right eye image
    input_port_right_eye = yarp.Port()
    if not input_port_right_eye.open("/" + CLIENT_PREFIX + "/eyes/right"):
        print("[ERROR] Could not open right eye port")
    if not yarp.Network.connect("/" + ROBOT_PREFIX + "/cam/right",
                                "/" + CLIENT_PREFIX + "/eyes/right"):
        print("[ERROR] Could not connect input_port_right_eye")

    # Port for left eye image
    input_port_left_eye = yarp.Port()
    if not input_port_left_eye.open("/" + CLIENT_PREFIX + "/eyes/left"):
        print("[ERROR] Could not open left eye port")
    if not yarp.Network.connect("/" + ROBOT_PREFIX + "/cam/left",
                                "/" + CLIENT_PREFIX + "/eyes/left"):
        print("[ERROR] Could not connect input_port_left_eye")

    ######################################################################
    ################## Initialization of both eye images #################

    print('----- Init image array structures -----')
    # Create numpy array to receive the image and the YARP image wrapped around it
    left_eye_img_array = np.ones((240, 320, 3), np.uint8)
    left_eye_yarp_image = yarp.ImageRgb()
    left_eye_yarp_image.resize(320, 240)

    right_eye_img_array = np.ones((240, 320, 3), np.uint8)
    right_eye_yarp_image = yarp.ImageRgb()
    right_eye_yarp_image.resize(320, 240)

    left_eye_yarp_image.setExternal(left_eye_img_array.data,
                                    left_eye_img_array.shape[1],
                                    left_eye_img_array.shape[0])
    right_eye_yarp_image.setExternal(right_eye_img_array.data,
                                     right_eye_img_array.shape[1],
                                     right_eye_img_array.shape[0])

    ######################################################################
    ################### Read camera images from robot ####################
    print('----- Read images from robot cameras -----')
    for i in range(5):
        print("Image:", i)
        # Read the images from the robot cameras
        input_port_left_eye.read(left_eye_yarp_image)
        input_port_left_eye.read(left_eye_yarp_image)
        input_port_right_eye.read(right_eye_yarp_image)
        input_port_right_eye.read(right_eye_yarp_image)

        if left_eye_yarp_image.getRawImage().__int__(
        ) != left_eye_img_array.__array_interface__['data'][0]:
            print("read() reallocated my left_eye_yarp_image!")
        if right_eye_yarp_image.getRawImage().__int__(
        ) != right_eye_img_array.__array_interface__['data'][0]:
            print("read() reallocated my right_eye_yarp_image!")

        # show images
        plt.figure(figsize=(10, 5))
        plt.tight_layout()
        plt.subplot(121)
        plt.title("Left camera image")
        plt.imshow(left_eye_img_array)

        plt.subplot(122)
        plt.title("Right camera image")
        plt.imshow(right_eye_img_array)
        plt.show()

    ######################################################################
    ######################## Closing the program: ########################
    #### Delete objects/models and close ports, network, motor cotrol ####
    print('----- Close opened ports -----')

    # disconnect the ports
    if not yarp.Network.disconnect("/" + ROBOT_PREFIX + "/cam/right",
                                   input_port_right_eye.getName()):
        print("[ERROR] Could not disconnect input_port_right_eye")

    if not yarp.Network.disconnect("/" + ROBOT_PREFIX + "/cam/left",
                                   input_port_left_eye.getName()):
        print("[ERROR] Could not disconnect input_port_left_eye")

    # close the ports
    input_port_right_eye.close()
    input_port_left_eye.close()

    # close the yarp network
    yarp.Network.fini()
示例#25
0
    def __init__(
        self,
        robot,
        roi=roi,
    ):
        if robot == 'icub':
            self.input_port = yarp.Port()
            self.input_port.open("/python-image-port")
            yarp.Network.connect("/icubSim/cam/left", "/python-image-port")
        elif robot == "nao":
            IP = "127.0.0.1"  # Replace here with your NAOqi's IP address.
            PORT = 9559
            self.camProxy = ALProxy("ALVideoDevice", IP, PORT)

            resolution = vision_definitions.kQVGA
            colorSpace = vision_definitions.kYUVColorSpace
            fps = 30

            # subscribe top camera
            AL_kTopCamera = 0
            AL_kQVGA = 1  # 320x240
            AL_kBGRColorSpace = 13
            self.nameId = self.camProxy.subscribeCamera(
                "test", AL_kTopCamera, AL_kQVGA, AL_kBGRColorSpace, 10)
            # self.nameId = self.camProxy.subscribe("python_GVM", resolution, colorSpace, fps)
            print self.nameId
            # Create proxy on ALVideoDevice

            print "Creating ALVideoDevice proxy to ", IP
            self.width = 320
            self.height = 240
            self.image = numpy.zeros((self.height, self.width, 3), numpy.uint8)

        self.roi = roi
        # while(True):
        #
        #
        #
        #     self.yarp_to_python()
        #     if cv2.waitKey(1) & 0xFF == ord('q'):
        #         break
        self.pub = rospy.Publisher('chatter', String, queue_size=10)
        rospy.init_node('talker', anonymous=True)
        rate = rospy.Rate(10)  # 10hz
        self.list = []
        self.max_len = 10
        while not rospy.is_shutdown():
            if robot == "icub":
                self.yarp_to_python()
            elif robot == "nao":
                self.naoqi_to_python()
            green = self.list.count(1)
            not_green = self.list.count(0)
            green_perc = 0
            if green + not_green > 0:
                green_perc = green / (green + not_green)

            if green_perc > 0.7:
                self.pub.publish('green')
            else:
                self.pub.publish('')

            # hello_str = "hello world %s" % rospy.get_time()

            rate.sleep()
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
示例#26
0
#!/usr/bin/env python

import yarp
import time

#Open network
yarp.Network.init()

# Open the RPC port
toHomeo = yarp.Port()
homeoPortName="/manager/toHomeostasis/rpc"+":o"#"/NNsound:i"    
toHomeo.open(homeoPortName)

toAllo = yarp.Port()
alloPortName="/manager/toAllostasis/rpc"+":o"#"/NNsound:i"    
toAllo.open(alloPortName)

toBM = yarp.Port()
BMPortName="/manager/BehaviorManager/rpc"+":o"#"/NNsound:i"    
toBM.open(BMPortName)

toPlanner = yarp.Port()
plannerPortName="/manager/toPlanner/rpc:o"
toPlanner.open(plannerPortName)

homeoRPC = "/homeostasis/rpc"
alloRPC = "/AllostaticController/rpc"
BMRPC = "/BehaviorManager/rpc"
plannerRPC = "/planner/rpc"

print yarp.Network.connect(homeoPortName,homeoRPC)
示例#27
0
    def configure(self, rf):
        yarp.Network.init()
        self.SIGNALS_TO_NAMES_DICT = dict((getattr(signal, n), n) \
            for n in dir(signal) if n.startswith('SIG') and '_' not in n )

        self.terminal = 'xterm'

        rootPath = rf.check("root_path")
        interactionConfPath = rf.check("config_path")

        if (interactionConfPath == False and rootPath == False):
            print "Cannot find .ini settings"
            return False
        else:
            self.rootPath = rf.find("root_path").asString()
            self.interactionConfPath = rf.find("config_path").asString()
            persistence = rf.check("persistence",
                                   yarp.Value("False")).asString()
            windowed = rf.check("windowed", yarp.Value("True")).asString()
            verbose = rf.check("verbose", yarp.Value("True")).asString()

            self.persistence = True if (persistence == "True") else False
            self.windowed = True if (windowed == "True") else False
            self.verbose = True if (verbose == "True") else False

            print 'Root supervisor path:     \t', self.rootPath
            print 'Model configuration file: \t', self.interactionConfPath
            print 'Bash Persistence set to:  \t', self.persistence
            print 'Windowed set to:          \t', self.windowed
            print 'Verbose set to:           \t', self.verbose

            self.modelPath = self.rootPath + '/Models'
            self.dataPath = self.rootPath + '/Data'
            #OLD
            # self.trainingFunctionsPath = os.environ.get("WYSIWYD_DIR")+"/bin"
            #NEW
            self.trainingFunctionsPath = SAM.SAM_Drivers.__path__
            self.trainingListHandles = dict(
            )  #make this a dict to have a label attached to each subprocess
            self.loadedListHandles = dict()
            self.iter = 0
            self.rpcConnections = []
            self.inputBottle = yarp.Bottle()
            self.sendingBottle = yarp.Bottle()
            self.responseBottle = yarp.Bottle()
            self.outputBottle = yarp.Bottle()

            if (not self.windowed): self.devnull = open('/dev/null', 'w')

            out = yarp.Bottle()
            self.checkAvailabilities(out)
            if (self.verbose): print out.toString()

            self.supervisorPort = yarp.Port()
            self.supervisorPort.open('/sam/rpc:i')
            self.attach(self.supervisorPort)

            cmd = 'ipcluster start -n 4'
            command = "bash -c \"" + cmd + "\""

            if self.windowed:
                c = subprocess.Popen([self.terminal, '-e', command],
                                     shell=False)
            else:
                c = subprocess.Popen([cmd],
                                     shell=True,
                                     stdout=self.devnull,
                                     stderr=self.devnull)

            self.trainingListHandles['Cluster'] = c

            if len(self.uptodateModels) + len(self.updateModels) > 0:
                if self.verbose:
                    print "Loading models according to " + self.interactionConfPath
                # start loading model configuration according to interactionConfPath file

                rfModel = yarp.ResourceFinder()
                rfModel.setVerbose(self.verbose)
                rfModel.setDefaultContext("samSupervisor")
                self.interactionConfFile = rfModel.findFile(
                    self.interactionConfPath)

                # Iterate over all sections within the interactionConfPath,
                # create a list and check against the available models
                # warn if model specified in interactionConfPath not loadable
                self.interactionParser = SafeConfigParser()
                self.interactionParser.read(self.interactionConfFile)
                self.interactionSectionList = self.interactionParser.sections()
                if self.verbose:
                    print
                    print self.dataPath
                    print self.interactionSectionList
                    print
                for j in self.interactionSectionList:
                    command = yarp.Bottle()
                    command.addString("load")
                    command.addString(j)
                    if self.verbose:
                        print command.toString()
                    reply = yarp.Bottle()

                    self.loadModel(reply, command)
                    if self.verbose:
                        print reply.toString()
                        print "-----------------------------------------------"
                        print

            elif len(self.noModels) > 0:
                if self.verbose:
                    print "Models available for training."
                # Train a model according to ineractionConfPath file
            else:
                if self.verbose:
                    print "No available models to load or train"
                # wait for a training command

            return True
示例#28
0
        try:
            if icub_root.find("Sim") > -1:
                print(
                    "[ICUB] Simulation Mode, connecting grabber to texture/screen "
                )
                # yarp connect /grabber /icubSim/texture/screen
                yarp.Network.connect("/grabber", icub_root + "/texture/screen")
        except BaseException, err:
            print(
                "[ICUB][ERROR] connecting /grabber to /texture/screen catching error "
                + str(err))
            return

        try:
            self.port_ikin_mono = yarp.Port()
            self.port_ikin_mono.open("/pyera-ikin-mono")
            yarp.Network.connect("/pyera-ikin-mono", "/iKinGazeCtrl/mono:i")
        except BaseException, err:
            print(
                "[ICUB][ERROR] connect To iKinGazeCtrl/mono catching error " +
                str(err))
            return

        try:
            self.port_ikin_stereo = yarp.Port()
            self.port_ikin_stereo.open("/pyera-ikin-stereo")
            yarp.Network.connect("/pyera-ikin-stereo",
                                 "/iKinGazeCtrl/stereo:i")
        except BaseException, err:
            print(
#! /usr/bin/env python

import numpy
import yarp
import matplotlib.pylab
import time

yarp.Network.init()

if not yarp.Network.checkNetwork():
    print "[error] Please try running yarp server"
    quit()

input_port = yarp.Port()
input_port.open("/python/ecroSim/depthImage:i")
if not yarp.Network.connect("/ecroSim/depthImage:o",
                            "/python/ecroSim/depthImage:i"):
    print "[error] Could not connect"
    quit()

# Just once to get measurements
yarp_tmp_image = yarp.ImageFloat()
input_port.read(yarp_tmp_image)
height = yarp_tmp_image.height()
width = yarp_tmp_image.width()

# Create numpy array to receive the image and the YARP image wrapped around it
img_array = numpy.zeros((height, width), dtype=numpy.float32)
yarp_image = yarp.ImageFloat()
yarp_image.resize(width, height)
yarp_image.setExternal(img_array, img_array.shape[1], img_array.shape[0])
示例#30
0
print(
    "[INFO] Opening image input port with name /humanActivityDetection2D/img:i ..."
)

# Open input image port
humanActivityDetection2D_portIn = yarp.BufferedPortImageRgb()
humanActivityDetection2D_portNameIn = '/humanActivityDetection2D/img:i'
humanActivityDetection2D_portIn.open(humanActivityDetection2D_portNameIn)

print("")
print(
    "[INFO] Opening image output port with name /humanActivityDetection2D/img:o ..."
)

# Open output image port
humanActivityDetection2D_portOut = yarp.Port()
humanActivityDetection2D_portNameOut = '/humanActivityDetection2D/img:o'
humanActivityDetection2D_portOut.open(humanActivityDetection2D_portNameOut)

print("")
print(
    "[INFO] Opening data output port with name /humanActivityDetection2D/data:o ..."
)

# Open output data port
humanActivityDetection2D_portOutDet = yarp.Port()
humanActivityDetection2D_portNameOutDet = '/humanActivityDetection2D/data:o'
humanActivityDetection2D_portOutDet.open(
    humanActivityDetection2D_portNameOutDet)

# Create data bootle