示例#1
0
def CameraIntrinsicsProtoFromColorCameraProto(input_message):
    output_message = Message.create_message_builder('CameraIntrinsicsProto')
    output_message.proto.pinhole = input_message.proto.pinhole
    output_message.proto.distortion = input_message.proto.distortion
    output_message.acqtime = input_message.acqtime
    output_message.pubtime = input_message.pubtime
    return output_message
    def test_send_message_proto(self):
        send_msg = Message.create_message_builder('PingProto')
        send_msg.proto.message = 'payload'
        self.app.publish('node', 'ledger', 'in', send_msg)

        recv_msg = self.app.receive('node', 'ledger', 'in')
        self.assertNotEqual(recv_msg.type_id, 0)
        self.assertEqual(recv_msg.proto.message, 'payload')
    def test_send_message_has_metadata(self):
        send_msg = Message.create_message_builder('PingProto')
        send_msg.proto.message = 'payload'
        self.app.publish('node', 'ledger', 'in', send_msg)

        recv_msg = self.app.receive('node', 'ledger', 'in')
        self.assertGreater(recv_msg.pubtime, 0)
        self.assertNotEqual(recv_msg.uuid, "")
    def test_send_message_with_acqtime(self):
        send_msg = Message.create_message_builder('PingProto')
        send_msg.proto.message = 'payload'
        send_msg.acqtime = 10
        self.app.publish('node', 'ledger', 'in', send_msg)

        recv_msg = self.app.receive('node', 'ledger', 'in')
        self.assertEqual(recv_msg.proto.message, 'payload')
        self.assertEqual(recv_msg.acqtime, 10)
示例#5
0
def ImageProtoFromDepthCameraProto(input_message):
    output_message = Message.create_message_builder('ImageProto')
    output_message.proto.elementType = input_message.proto.depthImage.elementType
    output_message.proto.rows = input_message.proto.depthImage.rows
    output_message.proto.cols = input_message.proto.depthImage.cols
    output_message.proto.channels = input_message.proto.depthImage.channels
    output_message.proto.dataBufferIndex = input_message.proto.depthImage.dataBufferIndex
    output_message.buffers = input_message.buffers
    output_message.acqtime = input_message.acqtime
    output_message.pubtime = input_message.pubtime
    return output_message
    def test_send_message_with_buffer(self):
        send_msg = Message.create_message_builder('PingProto')
        send_msg.proto.message = 'payload'
        buffer = np.empty(3, dtype=np.dtype('B'))
        buffer[0] = 1
        buffer[1] = 11
        buffer[2] = 111
        send_msg.buffers = [buffer]
        self.app.publish('node', 'ledger', 'in', send_msg)

        recv_msg = self.app.receive('node', 'ledger', 'in')
        self.assertEqual(recv_msg.buffers[0][0], 1)
        self.assertEqual(recv_msg.buffers[0][1], 11)
        self.assertEqual(recv_msg.buffers[0][2], 111)
    def start_mission(self, mission: Mission):
        # Generate the message
        builder = im.MessageBuilder()
        builder.proto = im.CAPNP_DICT["MissionProto"].from_dict({
            "uuid": mission.uuid_dict,
            "config": {
                "serialized": json.dumps(mission.config)
            }
        })

        # Send the mission
        self._connection.send_message(builder, mission.channel)

        # Set the mission state to STARTED
        mission.status = Mission.Status.STARTED

        self._current_mission = mission
    def _process_input_buffer(self):
        '''
        Parses the incoming text buffer, and if any complete messages have been received, converts
        them to capnp messages
        '''
        split_text = self._incoming_text.split("\n")
        new_messages = split_text[:-1]
        self._incoming_text = split_text[-1]

        # For each message, build a capnp message
        for message_text in new_messages:
            # Get the header/payload from the json text
            json_message = json.loads(message_text)
            header = json_message['header']
            payload = json_message['payload']

            # Convert fields that may be stored as string to int
            header["proto"] = int(header["proto"])
            header["acqtime"] = int(header["acqtime"])
            header["pubtime"] = int(header["pubtime"])

            # The proto id is transmitted as int64_t, if its negative it needs to be
            # converted to a uint64_t
            header["proto"] = _signed_to_unsigned(header["proto"])

            # Create a capnp message from the payload
            payload_message = _dict_to_proto(payload, header["proto"])

            # Create an ISAAC style message
            builder = im.MessageBuilder()
            builder.proto = payload_message
            builder.acqtime = header["acqtime"]
            builder.pubtime = header["pubtime"]

            # Put the message into the appropriate message queue based on channel
            channel = header["channel"]
            if channel not in self._messages.keys():
                self._messages[channel] = deque([], MESSAGE_QUEUE_SIZE)
            self._messages[channel].append(builder)

            if self._message_callback is not None:
                self._message_callback(builder, channel)
示例#9
0
    args = parser.parse_args()

    # Create and start an application that has a node with a MessageLedger
    app = Application()
    app.add('node')
    app.start()
    node = app.nodes['node']
    component = node['MessageLedger']

    # Create cask objects for input and output
    input_cask = Cask(args.input_cask_path)
    output_cask = Cask(args.output_cask_path, writable=True)
    # For each input channel, either convert or pass through the messages
    for input_channel in input_cask.channels:
        series = input_cask[input_channel.name]
        if series[0].type_id == Message.create_message_builder(
                'ColorCameraProto').proto.schema.node.id:
            image_channel = output_cask.open_channel(component,
                                                     input_channel.name)
            intrinsics_channel = output_cask.open_channel(
                component, "{}_intrinsics".format(input_channel.name))
            for input_message in tqdm(
                    series,
                    '> Splitting ColorCameraProto type messages of input channel "{0}" to write '
                    'ImageProto and CameraIntrinsicsProto type messages to channels named "{0}" '
                    'and "{0}_intrinsics"'.format(input_channel.name)):
                # Write ImageProto
                image_channel.write_message(
                    ImageProtoFromColorCameraProto(input_message))
                # Write CameraIntrinsicsProto
                intrinsics_channel.write_message(
                    CameraIntrinsicsProtoFromColorCameraProto(input_message))
示例#10
0
Copyright (c) 2020-21, NVIDIA CORPORATION. All rights reserved.

NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
from isaac import Application, Message
import argparse
import random
import sys

if __name__ == '__main__':
    # Loads message local definition and those from Isaac
    Message.load_message_definitions(
        "external/com_nvidia_isaac_sdk/messages/*.capnp")
    Message.load_message_definitions("apps/foo.capnp")

    # Sample of using loaded message definitions
    send_msg = Message.create_message_builder('PingProto')
    foo_msg = Message.create_message_builder('FooProto')
    bar_msg = Message.create_message_builder('BarProto')

    # Loads an JSON graph from Isaac repo and runs it
    app = Application(name="sub")
    app.home_workspace_name = 'velodyne'
    app.load(
        "@com_nvidia_isaac_sdk//packages/navsim/apps/navsim_tcp.subgraph.json",
        prefix="sub")
    app.load_module("@com_nvidia_isaac_sdk//packages/sight")
    comp_sight = app.nodes["websight"]["WebsightServer"]
示例#11
0
    def main(self):
        """
        Runs the training loop while performing inference from the reinforcement learning policy
        """
        # Store intra episodic rewards for each agent
        agent_reward = np.zeros(self.num_agents)
        # Store the age (number of timesteps) for each agent
        agent_life = np.zeros(self.num_agents, dtype=int)

        step_count = 0

        # Main training loop
        while True:
            # Receive data for inference from the batching codelet
            state_tensor = self.app.receive("temporal_batching",
                                            "TemporalBatching",
                                            "temporal_tensor")
            if state_tensor == None:
                # If no new messages are received
                time.sleep(0.05)
                continue

            # Extract data from the received state message
            state_buffer = np.frombuffer(state_tensor.buffers[0],
                                         dtype=np.float32)
            state_buffer = np.reshape(
                state_buffer,
                (self.num_agents, self.look_back * self.state_dim_per_agent))

            # Extract the message from the reward tensor
            reward_tensor = self.app.receive("state_machine_gym_flow",
                                             "StateMachineGymFlow",
                                             "transition_reward")
            if reward_tensor == None:
                raise ValueError(
                    'Reward tensor not received for the current transition')
            reward_buffer = np.frombuffer(reward_tensor.buffers[0],
                                          dtype=np.float32)
            reward_buffer = np.reshape(reward_buffer, (self.num_agents))

            # Extract the dead tensor from Gym
            dead_tensor = self.app.receive("state_machine_gym_flow",
                                           "StateMachineGymFlow",
                                           "transition_dead")
            if dead_tensor == None:
                raise ValueError(
                    'Dead tensor not received for the current transition')
            dead_buffer = np.frombuffer(dead_tensor.buffers[0],
                                        dtype=np.float32)
            dead_buffer = np.reshape(dead_buffer, (self.num_agents))

            # Extract the age tensor from Gym
            aux_tensor = self.app.receive("state_machine_gym_flow",
                                          "StateMachineGymFlow",
                                          "transition_aux")
            if aux_tensor == None:
                raise ValueError(
                    'Aux tensor not received for the current transition')
            aux_buffer = np.frombuffer(aux_tensor.buffers[0], dtype=np.float32)
            aux_buffer = np.reshape(aux_buffer, (self.num_agents, -1))
            age_buffer = aux_buffer[:, self.aux_end_of_episode_flag]

            # Increment step counter
            step_count = step_count + 1
            # Create the output array to store actions to perform
            action_array = np.zeros(
                (self.num_agents, self.action_dim_per_agent), dtype=np.float32)

            # For each agent, pass the state to the neural network and store the inferred output
            for i in range(self.num_agents):
                # Extract tuple for each agent
                agent_tuple = state_buffer[i]
                # Extract the latest state observed by the agent
                observations = agent_tuple.reshape(1, -1)
                # Extract the reward received by the agent
                reward = reward_buffer[i]
                # Extract the flag indicating if the agent is dead or alive
                dead = dead_buffer[i]
                # Extract flag indicating if the robot has reached its maximum age
                age = age_buffer[i]
                # Get action to be performed from the policy
                action_array[i] = self.get_action(observations, step_count)
                # Append current reward with the episodic reward
                agent_reward[i] += reward
                # Increment age of agent by 1
                agent_life[i] += 1
                # If agent is dead
                # Note : A check on dead and age is needed
                # Age flag is reset to avoid undue penalty while training
                if (dead == 1.0 or age == 1.0):
                    # Since episode has completed, append episode reward
                    self.sum_episode_reward += agent_reward[i]
                    # Append final age of agent before dying to the sum
                    self.sum_agent_life += agent_life[i]
                    self.num_episodes += 1
                    # Reset the counters
                    agent_reward[i] = 0.0
                    agent_life[i] = 0
            # Send the actions inferred from the self.policy
            send_msg = Message.create_message_builder("TensorProto")
            send_msg.proto.elementType = "float32"
            send_msg.proto.sizes = [self.num_agents, self.action_dim_per_agent]
            send_msg.proto.dataBufferIndex = 0
            send_msg.buffers = [action_array]
            self.app.publish("state_machine_gym_flow", "StateMachineGymFlow",
                             "action", send_msg)
            # Start training after self.max_episode_length is reached
            if step_count % self.max_episode_length == 0 and self.mode == "train":
                self.train()
                step_count = 0