Exemple #1
0
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        raise NotImplementedError(
            'Currently this should not work as the actions recorded are 6d '
            'but this network only return 1d')
        super().__init__(config=config, quiet=True)
        self.input_size = (30, )
        self.output_size = (1, )
        self.action_min = -1
        self.action_max = +1
        self._actor = mlp_creator(
            sizes=[self.input_size[0], 64, 64, self.output_size[0]],
            activation=nn.Tanh(),
            output_activation=nn.Tanh())

        self._critic = mlp_creator(sizes=[self.input_size[0], 64, 64, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(
            torch.ones(self.output_size, dtype=torch.float32) * log_std,
            requires_grad=True)
        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
            self.initialize_architecture()
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config)
        self.input_size = (3, )
        self.output_size = (1, 1)
        self.action_min = -2
        self.action_max = 2

        self._actor = mlp_creator(
            sizes=[self.input_size[0], 64, 64, self.output_size[0]],
            activation=nn.Tanh(),
            output_activation=None)

        self._critic = mlp_creator(sizes=[self.input_size[0], 64, 64, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(
            torch.ones(self.output_size, dtype=torch.float32) * log_std,
            requires_grad=True)
        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
            self.initialize_architecture()
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_size = (30, )
        self.output_size = (3, )
        self._actor = mlp_creator(
            sizes=[self.input_size[0], 64, 64, self.output_size[0]],
            activation=nn.Tanh(),
            output_activation=None)

        self._critic = mlp_creator(sizes=[self.input_size[0], 64, 64, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)
        self.initialize_architecture()
        self.discrete_action_mapper = DiscreteActionMapper([
            torch.as_tensor([0.2, 0.0, 0.0, 0.0, 0.0, -0.2]),
            torch.as_tensor([0.2, 0.0, 0.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([0.2, 0.0, 0.0, 0.0, 0.0, 0.2]),
        ])
        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
Exemple #4
0
 def __init__(self, config: ArchitectureConfig, quiet: bool = False):
     super().__init__(config=config, quiet=True)
     self._logger = get_logger(
         name=get_filename_without_extension(__file__),
         output_path=config.output_path,
         quiet=False)
     if not quiet:
         cprint(f'Started.', self._logger)
     self.input_size = (3, 128, 128)
     self.output_size = (6, )
     self.discrete = False
     self.dropout = nn.Dropout(
         p=config.dropout) if config.dropout != 'default' else None
     self.encoder = nn.Sequential(
         nn.Conv2d(3, 32, 4, stride=2),
         nn.ReLU(),
         nn.Conv2d(32, 64, 4, stride=2),
         nn.ReLU(),
         nn.Conv2d(64, 128, 4, stride=2),
         nn.ReLU(),
         nn.Conv2d(128, 256, 4, stride=2),
         nn.ReLU(),
     )
     self.decoder = mlp_creator(
         sizes=[256 * 6 * 6, 128, 128, self.output_size[0]],
         activation=nn.ReLU(),
         output_activation=nn.Tanh(),
         bias_in_last_layer=False)
     self.initialize_architecture()
Exemple #5
0
    def __init__(self):
        start_time = time.time()
        max_duration = 60
        while not rospy.has_param('/robot/position_sensor') and time.time(
        ) < start_time + max_duration:
            time.sleep(0.1)
        self._output_path = get_output_path()
        self._logger = get_logger(get_filename_without_extension(__file__),
                                  self._output_path)

        # fields
        self._current_waypoint_index = 0
        self._waypoints = rospy.get_param('/world/waypoints', [])
        self._waypoint_reached_distance = rospy.get_param(
            '/world/waypoint_reached_distance', 0.3)

        # publishers
        self._publisher = rospy.Publisher(
            '/waypoint_indicator/current_waypoint',
            Float32MultiArray,
            queue_size=10)

        # subscribe
        odometry_type = rospy.get_param('/robot/position_sensor/type')
        callback = f'_{camelcase_to_snake_format(odometry_type)}_callback'
        assert callback in self.__dir__()
        rospy.Subscriber(name=rospy.get_param('/robot/position_sensor/topic'),
                         data_class=eval(odometry_type),
                         callback=eval(f'self.{callback}'))
        rospy.Subscriber(name='/fsm/reset',
                         data_class=Empty,
                         callback=self.reset)
        rospy.init_node('waypoint_indicator')
        self.reset()
    def __init__(self):
        self.count = 0
        rospy.init_node('ros_expert')
        stime = time.time()
        max_duration = 60
        while not rospy.has_param('/actor/ros_expert/specs'
                                  ) and time.time() < stime + max_duration:
            time.sleep(0.01)
        self._specs = rospy.get_param('/actor/ros_expert/specs')
        super().__init__(
            config=ActorConfig(name='ros_expert', specs=self._specs))
        self._output_path = get_output_path()
        self._logger = get_logger(get_filename_without_extension(__file__),
                                  self._output_path)
        cprint(f'ros specifications: {self._specs}', self._logger)
        with open(os.path.join(self._output_path, 'ros_expert_specs.yml'),
                  'w') as f:
            yaml.dump(self._specs, f)
        self._reference_height = rospy.get_param('/world/starting_height', 1)
        self._adjust_height = 0
        self._adjust_yaw_collision_avoidance = 0
        self._adjust_yaw_waypoint_following = 0
        self._rate_fps = self._specs[
            'rate_fps'] if 'rate_fps' in self._specs.keys() else 10
        self._next_waypoint = []
        noise_config = self._specs['noise'] if 'noise' in self._specs.keys(
        ) else {}
        self._noise = eval(f"{noise_config['name']}(**noise_config['args'])"
                           ) if noise_config else None

        self._publisher = rospy.Publisher('cmd_vel', Twist, queue_size=10)
        self._subscribe()
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_size = (4,)
        self.output_size = (5,)
        self.discrete = False

        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(torch.ones(self.output_size, dtype=torch.float32) * log_std,
                                          requires_grad=True)

        self._actor = mlp_creator(sizes=[self.input_size[0], 10, self.output_size[0]],
                                  activation=nn.Tanh(),
                                  output_activation=None)

        self._critic = mlp_creator(sizes=[self.input_size[0], 10, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)

        self.initialize_architecture()

        self.discrete_action_mapper = DiscreteActionMapper([
            torch.as_tensor([0.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([-1.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([1.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([0.0, -1.0, 0.0, 0.0]),
            torch.as_tensor([0.0, 1.0, 0.0, 0.0]),
        ])
        if not quiet:
            self._logger = get_logger(name=get_filename_without_extension(__file__),
                                      output_path=config.output_path,
                                      quiet=False)

            cprint(f'Started.', self._logger)
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_scope = 'default'
        self.discrete = False
        self.dropout = nn.Dropout(
            p=config.dropout) if isinstance(config.dropout, float) else None
        self._config.batch_normalisation = config.batch_normalisation if isinstance(config.batch_normalisation, bool) \
            else False
        self.h = self._config.latent_dim if isinstance(config.latent_dim,
                                                       int) else 32
        self.vae = self._config.vae if isinstance(config.vae, bool) else False

        if not quiet:
            self.encoder = nn.Sequential(
                nn.Conv2d(1, self.h * (2 if self.vae else 1), 1, stride=1),
                *[
                    nn.BatchNorm2d(self.h * (2 if self.vae else 1)),
                    nn.LeakyReLU()
                ] if self._config.batch_normalisation else [nn.LeakyReLU()],
            )
            self.decoder = nn.Sequential(nn.Conv2d(self.h, 1, 1, stride=1))
            self.initialize_architecture()
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
Exemple #9
0
    def __init__(self,
                 config: EvaluatorConfig,
                 network: BaseNet,
                 quiet: bool = False):
        self._config = config
        self._net = network
        self.data_loader = DataLoader(config=self._config.data_loader_config)

        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False) if type(self) == Evaluator else None
            cprint(f'Started.', self._logger)

        self._device = torch.device(
            "cuda" if self._config.device in ['gpu', 'cuda']
            and torch.cuda.is_available() else "cpu")
        self._criterion = eval(
            f'{self._config.criterion}(reduction=\'none\', {self._config.criterion_args_str})'
        )
        self._criterion.to(self._device)
        self._lowest_validation_loss = None
        self.data_loader.load_dataset()

        self._minimum_error = float(10**6)
        self._original_model_device = self._net.get_device(
        ) if self._net is not None else None
Exemple #10
0
    def __init__(self):
        rospy.init_node('robot_display')
        stime = time.time()
        max_duration = 60
        while not rospy.has_param(
                '/output_path') and time.time() < stime + max_duration:
            time.sleep(0.01)
        self._output_path = get_output_path()
        self._rate_fps = 10
        self._border_width = 400
        self._counter = 0
        self._skip_first_n = 30
        self._skip_every_n = 4
        # Camera matrices
        self._intrinsic_matrix = np.asarray([[100, 0.0, 100], [0.0, 100, 100],
                                             [0.0, 0.0, 1.0]])
        self._cam_to_opt_rotation = R.from_quat([0.5, -0.5, 0.5,
                                                 0.5]).as_matrix()
        self._base_to_cam_dict = {
            0: R.from_quat([0, 0, 0, 1]).as_matrix(),
            -13: R.from_quat([0, -0.113, 0, 0.994]).as_matrix(),
            -90: R.from_quat([0, 0.7071068, 0, 0.7071068]).as_matrix(),
        }
        self._base_to_cam_translation = np.asarray([0.1, 0, 0])
        self._reference_image_location = None
        self._update_rate_time_tags = []
        self._update_rate = None

        self._logger = get_logger(get_filename_without_extension(__file__),
                                  self._output_path)
        self._subscribe()
        self._add_control_specs()
Exemple #11
0
    def __init__(self,
                 config: TrainerConfig,
                 network: BaseNet,
                 quiet: bool = False):
        super().__init__(config, network, quiet=True)

        if not quiet:
            self._optimizer = eval(f'torch.optim.{self._config.optimizer}')(
                params=self._net.deeply_supervised_parameters(),
                lr=self._config.learning_rate,
                weight_decay=self._config.weight_decay)
            lambda_function = lambda f: 1 - f / self._config.scheduler_config.number_of_epochs

            self._scheduler = torch.optim.lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda_function) \
                if self._config.scheduler_config is not None else None

            self._discriminator_optimizer = eval(
                f'torch.optim.{self._config.optimizer}')(
                    params=self._net.discriminator_parameters(),
                    lr=self._config.critic_learning_rate
                    if self._config.critic_learning_rate != -1 else
                    self._config.learning_rate,
                    weight_decay=self._config.weight_decay)

            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)
            cprint(f'Started.', self._logger)
    def __init__(self):
        stime = time.time()
        max_duration = 60
        while not rospy.has_param('/modified_state_publisher/mode'
                                  ) and time.time() < stime + max_duration:
            time.sleep(0.01)

        self._output_path = get_output_path()
        self._logger = get_logger(get_filename_without_extension(__file__),
                                  self._output_path)

        rospy.Subscriber(
            rospy.get_param('/robot/modified_state_sensor/topic',
                            '/modified_state'),
            eval(
                rospy.get_param('/robot/modified_state_sensor/type',
                                'CombinedGlobalPoses')),
            self._process_state_and_publish_frame)
        self._publisher = rospy.Publisher('/modified_state_frame',
                                          Image,
                                          queue_size=10)
        cprint(
            f"subscribe to {rospy.get_param('/robot/modified_state_sensor/topic', '/modified_state')}",
            self._logger)
        rospy.init_node('modified_state_frame_visualizer')
 def get_action(self, inputs, train: bool = False) -> Action:
     inputs = self.process_inputs(inputs)  # added line 15/10/2020
     output = self.sample(inputs, train=train)
     output = output.clamp(min=self.action_min,
                           max=self.action_max)  # added line 23/10/2020
     return Action(actor_name=get_filename_without_extension(__file__),
                   value=output)
Exemple #14
0
    def __init__(self):
        rospy.init_node('dnn_actor_ros')
        start_time = time.time()
        max_duration = 60
        while not rospy.has_param('/output_path') and time.time() < start_time + max_duration:
            time.sleep(0.1)
        self._specs = rospy.get_param('/actor/dnn_actor/specs')
        super().__init__(
            config=ActorConfig(
                name='dnn_actor',
                specs=self._specs
            )
        )
        self._output_path = get_output_path()
        self._logger = get_logger(get_filename_without_extension(__file__), self._output_path)
        cprint(f'&&&&&&&&&&&&&&&&&& \n {self._specs} \n &&&&&&&&&&&&&&&&&', self._logger)
        with open(os.path.join(self._output_path, 'dnn_actor_specs.yml'), 'w') as f:
            yaml.dump(self._specs, f)
        self._rate_fps = self._specs['rate_fps'] if 'rate_fps' in self._specs.keys() else 20
        self._rate = rospy.Rate(self._rate_fps)
        config_dict = self._specs['model_config']
        config_dict['output_path'] = self._output_path
        cprint(f'loading model...', self._logger)
        self._model = Model(config=ModelConfig().create(config_dict=config_dict))
        self._input_sizes = self._model.get_input_sizes()

        self._publisher = rospy.Publisher(self._specs['command_topic'], Twist, queue_size=10)
        self._subscribe()
        cprint(f'ready', self._logger)
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)

        self.residual_2.residual_net.conv_0.weight = self.residual_1.residual_net.conv_0.weight
        self.residual_2.residual_net.conv_0.bias = self.residual_1.residual_net.conv_0.bias
        self.residual_2.residual_net.conv_1.weight = self.residual_1.residual_net.conv_1.weight
        self.residual_2.residual_net.conv_1.bias = self.residual_1.residual_net.conv_1.bias

        self.residual_3.residual_net.conv_0.weight = self.residual_1.residual_net.conv_0.weight
        self.residual_3.residual_net.conv_0.bias = self.residual_1.residual_net.conv_0.bias
        self.residual_3.residual_net.conv_1.weight = self.residual_1.residual_net.conv_1.weight
        self.residual_3.residual_net.conv_1.bias = self.residual_1.residual_net.conv_1.bias

        self.residual_4.residual_net.conv_0.weight = self.residual_1.residual_net.conv_0.weight
        self.residual_4.residual_net.conv_0.bias = self.residual_1.residual_net.conv_0.bias
        self.residual_4.residual_net.conv_1.weight = self.residual_1.residual_net.conv_1.weight
        self.residual_4.residual_net.conv_1.bias = self.residual_1.residual_net.conv_1.bias

        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            self.initialize_architecture()
            cprint(f'Started.', self._logger)
    def test_cprint(self):
        current_logger = get_logger(name=get_filename_without_extension(__file__),
                                    output_path=self.TEST_DIR,
                                    quiet=True)
        cprint('HELP', current_logger)

        log_file = glob(os.path.join(self.TEST_DIR, 'log_files', '*'))[0]
        with open(log_file, 'r') as f:
            log_line = f.readlines()[0].strip()
            self.assertTrue('HELP' in log_line)
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.discrete = True
        if not quiet:
            self._logger = get_logger(name=get_filename_without_extension(__file__),
                                      output_path=config.output_path,
                                      quiet=False)

            cprint(f'Started.', self._logger)
            self.initialize_architecture()
Exemple #18
0
 def __init__(self, config: DataLoaderConfig):
     self._config = config
     self._logger = get_logger(
         name=get_filename_without_extension(__file__),
         output_path=config.output_path,
         quiet=False)
     cprint(f'Started.', self._logger)
     self._dataset = Dataset()
     self._num_runs = 0
     self._probabilities: List = []
     self.seed()
     self._hdf5_file_index = -1
 def get_action(self, inputs, train: bool = False) -> Action:
     inputs = self.process_inputs(inputs)
     output = self.sample(inputs, train=train)
     output = output.clamp(min=self.action_min, max=self.action_max)
     return Action(
         actor_name=get_filename_without_extension(
             __file__),  # assume output [1, 2] so no batch!
         value=np.stack([
             *output.data.cpu().numpy().squeeze(),
             *get_slow_run(inputs.squeeze())
         ],
                        axis=-1))
    def test_normal_usage(self):
        current_logger = get_logger(name=get_filename_without_extension(__file__),
                                    output_path=self.TEST_DIR)
        current_logger.debug(f'debug message')
        current_logger.info(f'info message')
        current_logger.warning(f'warning message')
        current_logger.error(f'error message')

        log_file = glob(os.path.join(self.TEST_DIR, 'log_files', '*'))[0]
        with open(log_file, 'r') as f:
            log_lines = f.readlines()
            self.assertEqual(len(log_lines), 4)
Exemple #21
0
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self._logger = get_logger(name=get_filename_without_extension(__file__),
                                  output_path=config.output_path,
                                  quiet=False)
        if not quiet:
            cprint(f'Started.', self._logger)

        self.input_size = (1, 200, 200)
        self.output_size = (6,)
        self.discrete = False
        self.dropout = nn.Dropout(p=config.dropout) if config.dropout != 'default' else None

        self.conv2d_1 = nn.Conv2d(in_channels=self.input_size[0], out_channels=32,
                                  kernel_size=5, stride=2, padding=1, bias=True)
        self.maxpool_1 = nn.MaxPool2d(kernel_size=3, stride=2)

        # First residual block
        self.batch_normalization_1 = nn.BatchNorm2d(32)
        self.conv2d_2 = nn.Conv2d(in_channels=32, out_channels=32,
                                  kernel_size=3, stride=2, padding=1, bias=True)
        self.batch_normalization_2 = nn.BatchNorm2d(32)
        self.conv2d_3 = nn.Conv2d(in_channels=32, out_channels=32,
                                  kernel_size=3, stride=1, padding=1, bias=True)
        self.conv2d_4 = nn.Conv2d(in_channels=32, out_channels=32,
                                  kernel_size=1, stride=2, padding=0, bias=True)
        # Second residual block
        self.batch_normalization_3 = nn.BatchNorm2d(32)
        self.conv2d_5 = nn.Conv2d(in_channels=32, out_channels=64,
                                  kernel_size=3, stride=2, padding=1, bias=True)
        self.batch_normalization_4 = nn.BatchNorm2d(64)
        self.conv2d_6 = nn.Conv2d(in_channels=64, out_channels=64,
                                  kernel_size=3, stride=1, padding=1, bias=True)
        self.conv2d_7 = nn.Conv2d(in_channels=32, out_channels=64,
                                  kernel_size=1, stride=2, padding=0, bias=True)
        # Third residual block
        self.batch_normalization_5 = nn.BatchNorm2d(64)
        self.conv2d_8 = nn.Conv2d(in_channels=64, out_channels=128,
                                  kernel_size=3, stride=2, padding=1, bias=True)
        self.batch_normalization_6 = nn.BatchNorm2d(128)
        self.conv2d_9 = nn.Conv2d(in_channels=128, out_channels=128,
                                  kernel_size=3, stride=1, padding=1, bias=True)
        self.conv2d_10 = nn.Conv2d(in_channels=64, out_channels=128,
                                   kernel_size=1, stride=2, padding=0, bias=True)

        #self.dense_1 = nn.Linear(6272, 1)
        #self.dense_2 = nn.Linear(6272, 1)

        self.decoder = mlp_creator(sizes=[6272, 2056, self.output_size[0]],
                                   activation=nn.ReLU(),
                                   output_activation=nn.Tanh(),
                                   bias_in_last_layer=False)
 def __init__(self, config: EpisodeRunnerConfig, data_saver: DataSaver = None,
              environment: Environment = None, net: BaseNet = None, writer=None):
     self._config = config
     self._logger = get_logger(name=get_filename_without_extension(__file__),
                               output_path=config.output_path,
                               quiet=False)
     self._data_saver = data_saver
     self._environment = environment
     self._net = net
     self._writer = writer
     self._max_mean_return = None
     self._min_mean_return = None
     self._reset()
Exemple #23
0
 def __init__(self,
              name: str = '',
              grep_str: str = '',
              output_path: str = ''):
     self._grace_period = 1
     self._name = name if name else 'default'
     self._state = ProcessState.Initializing
     self._grep_str = grep_str if grep_str else self._name
     self._control_str = ''
     self._process_popen = None
     self._logger = get_logger(name=get_filename_without_extension(__file__),
                               output_path=output_path,
                               quiet=False)
     cprint(f'initiate', self._logger)
Exemple #24
0
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_size_net = (2, )
        self.output_size = (8, )
        self.action_max = 0.5
        self.starting_height = -1
        self.previous_input = torch.Tensor([0, 0])
        self.sz = 416

        self._actor = mlp_creator(
            sizes=[self.input_size[0], 8,
                   2],  # for now actors can only fly sideways
            layer_bias=True,
            activation=nn.Tanh(),
            output_activation=nn.Tanh())
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                          requires_grad=True)

        self._critic = mlp_creator(sizes=[self.input_size[0], 8, 1],
                                   layer_bias=True,
                                   activation=nn.Tanh(),
                                   output_activation=nn.Tanh())

        self._adversarial_actor = mlp_creator(sizes=[self.input_size[0], 8, 2],
                                              layer_bias=True,
                                              activation=nn.Tanh(),
                                              output_activation=nn.Tanh())
        self.adversarial_log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                                      requires_grad=True)

        self._adversarial_critic = mlp_creator(
            sizes=[self.input_size[0], 8, 1],
            layer_bias=True,
            activation=nn.Tanh(),
            output_activation=nn.Tanh())

        self.yolov3_tiny = Yolov3Tiny(num_classes=80)
        self.yolov3_tiny.load_state_dict(
            torch.load('src/ai/yolov3/yolov3_files/yolov3_tiny_coco_01.h5'))

        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)
            cprint(f'Started.', self._logger)
            self.initialize_architecture()
Exemple #25
0
    def __init__(self, config: EnvironmentConfig):
        self._config = config
        self._logger = get_logger(name=get_filename_without_extension(__file__),
                                  output_path=self._config.output_path,
                                  quiet=False)

        if self._config.normalize_observations:
            self._observation_filter = NormalizationFilter(clip=self._config.observation_clipping)

        if self._config.normalize_rewards:
            self._reward_filter = ReturnFilter(clip=self._config.reward_clipping,
                                               discount=0.99)

        cprint('initiated', self._logger)
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self._playfield_size = (0, 1, 0)
        self.input_size = (1, )
        self.output_size = (8, )
        self.action_min = -0.5
        self.action_max = 0.5
        self.starting_height = -1
        self.previous_input = 0

        self.waypoint = get_waypoint(self._playfield_size)

        self._actor = mlp_creator(
            sizes=[self.input_size[0], 4,
                   1],  # for now actors can only fly sideways
            layer_bias=False,
            activation=nn.Tanh(),
            output_activation=None)
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                          requires_grad=True)

        self._critic = mlp_creator(sizes=[self.input_size[0], 4, 1],
                                   layer_bias=False,
                                   activation=nn.Tanh(),
                                   output_activation=None)

        self._adversarial_actor = mlp_creator(sizes=[self.input_size[0], 4, 1],
                                              layer_bias=False,
                                              activation=nn.Tanh(),
                                              output_activation=None)
        self.adversarial_log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                                      requires_grad=True)

        self._adversarial_critic = mlp_creator(
            sizes=[self.input_size[0], 4, 1],
            layer_bias=False,
            activation=nn.Tanh(),
            output_activation=None)

        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)
            cprint(f'Started.', self._logger)
            self.initialize_architecture()
Exemple #27
0
    def __init__(self):
        if not rospy.has_param("teleop"):
            rospy.logfatal("no configuration was found, taking node down")
            raise JoyTeleopException("no config")

        self.publishers = {}
        self.al_clients = {}
        self.srv_clients = {}
        self.service_types = {}
        self.message_types = {}
        self.command_list = {}
        self.offline_actions = []
        self.offline_services = []

        self.old_buttons = []

        teleop_cfg = rospy.get_param("teleop")

        for i in teleop_cfg:
            if i in self.command_list:
                rospy.logerr("command {} was duplicated".format(i))
                continue
            action_type = teleop_cfg[i]['type']
            self.add_command(i, teleop_cfg[i])
            if action_type == 'topic':
                self.register_topic(i, teleop_cfg[i])
            elif action_type == 'action':
                self.register_action(i, teleop_cfg[i])
            elif action_type == 'service':
                self.register_service(i, teleop_cfg[i])
            elif action_type == 'method':
                self.register_method(i, teleop_cfg[i])
            else:
                rospy.logerr("unknown type '%s' for command '%s'", action_type,
                             i)

        # Don't subscribe until everything has been initialized.
        rospy.Subscriber('joy', sensor_msgs.msg.Joy, self.joy_callback)

        # Run a low-freq action updater
        rospy.Timer(rospy.Duration(2.0), self.update_actions)

        self._logger = get_logger(get_filename_without_extension(__file__),
                                  get_output_path())
        for c in self.command_list:
            cprint(f'{c}: ', self._logger)
            for k, e in self.command_list[c].items():
                cprint(f'{k}: {e}', self._logger)
 def __init__(self):
     rospy.init_node('robot_display')
     stime = time.time()
     max_duration = 60
     while not rospy.has_param(
             '/output_path') and time.time() < stime + max_duration:
         time.sleep(0.01)
     self._output_path = get_output_path()
     self._rate_fps = 10
     self._border_width = 300
     self._counter = 0
     self._skip_first_n = 30
     self._skip_every_n = 4
     self._logger = get_logger(get_filename_without_extension(__file__),
                               self._output_path)
     self._subscribe()
     self._add_control_specs()
    def __init__(self, config: TrainerConfig, network: BaseNet, quiet=False):
        super().__init__(config, network, quiet=True)

        self._config.epsilon = 0.2 if self._config.epsilon == "default" else self._config.epsilon
        self._config.kl_target = 0.01 if self._config.kl_target == "default" else self._config.kl_target
        if self._config.max_actor_training_iterations == "default":
            self._config.max_actor_training_iterations = 10
        if self._config.max_critic_training_iterations == "default":
            self._config.max_critic_training_iterations = 10

        if not quiet:
            self._logger = get_logger(name=get_filename_without_extension(__file__),
                                      output_path=config.output_path,
                                      quiet=True)
            cprint(f'Started.', self._logger)
            cprint(f'actor checksum {get_checksum_network_parameters(self._net.get_actor_parameters())}')
            cprint(f'critic checksum {get_checksum_network_parameters(self._net.get_critic_parameters())}')
Exemple #30
0
    def __init__(self):
        start_time = time.time()
        max_duration = 60
        while not rospy.has_param('/robot/position_sensor') and time.time(
        ) < start_time + max_duration:
            time.sleep(0.1)
        self._output_path = get_output_path()
        self._logger = get_logger(get_filename_without_extension(__file__),
                                  self._output_path)

        # fields
        self._optical_to_base_transformation_dict = {
            '/forward/image_raw': {
                'translation': [0, -0.086, -0.071],
                'rotation': [0.553, -0.553, 0.440, 0.440]
            },
            '/down/image_raw': {
                'translation': [-0.000, 0.050, -0.100],
                'rotation': [0.707, -0.707, 0.000, 0.000]
            },
            '/bebop/image_raw': {
                'translation': [0., 0.022, -0.097],
                'rotation': [0.553, -0.553, 0.44, 0.44]
            },
        }
        self._optical_to_base_transformation = self._optical_to_base_transformation_dict[
            rospy.get_param('/robot/camera_sensor/topic')]
        self._tag_transforms = {}
        self._waypoint_reached_distance = rospy.get_param(
            '/world/waypoint_reached_distance', 0.3)
        self._calculating = False  # Don't update tag detections while calculating closest
        # publishers
        self._publisher = rospy.Publisher('/reference_pose',
                                          PointStamped,
                                          queue_size=10)

        # subscribe
        rospy.Subscriber(name='/tag_detections',
                         data_class=AprilTagDetectionArray,
                         callback=self._set_tag_transforms)
        rospy.Subscriber(name='/bebop/camera_control',
                         data_class=Twist,
                         callback=self._adjust_for_camera_twist)
        # start node
        rospy.init_node('april_tag_detector')
        cprint(f"Initialised", logger=self._logger)