def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config)
        self.input_size = (3, )
        self.output_size = (1, 1)
        self.action_min = -2
        self.action_max = 2

        self._actor = mlp_creator(
            sizes=[self.input_size[0], 64, 64, self.output_size[0]],
            activation=nn.Tanh(),
            output_activation=None)

        self._critic = mlp_creator(sizes=[self.input_size[0], 64, 64, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(
            torch.ones(self.output_size, dtype=torch.float32) * log_std,
            requires_grad=True)
        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
            self.initialize_architecture()
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_size = (30, )
        self.output_size = (3, )
        self._actor = mlp_creator(
            sizes=[self.input_size[0], 64, 64, self.output_size[0]],
            activation=nn.Tanh(),
            output_activation=None)

        self._critic = mlp_creator(sizes=[self.input_size[0], 64, 64, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)
        self.initialize_architecture()
        self.discrete_action_mapper = DiscreteActionMapper([
            torch.as_tensor([0.2, 0.0, 0.0, 0.0, 0.0, -0.2]),
            torch.as_tensor([0.2, 0.0, 0.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([0.2, 0.0, 0.0, 0.0, 0.0, 0.2]),
        ])
        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_size = (4,)
        self.output_size = (5,)
        self.discrete = False

        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(torch.ones(self.output_size, dtype=torch.float32) * log_std,
                                          requires_grad=True)

        self._actor = mlp_creator(sizes=[self.input_size[0], 10, self.output_size[0]],
                                  activation=nn.Tanh(),
                                  output_activation=None)

        self._critic = mlp_creator(sizes=[self.input_size[0], 10, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)

        self.initialize_architecture()

        self.discrete_action_mapper = DiscreteActionMapper([
            torch.as_tensor([0.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([-1.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([1.0, 0.0, 0.0, 0.0]),
            torch.as_tensor([0.0, -1.0, 0.0, 0.0]),
            torch.as_tensor([0.0, 1.0, 0.0, 0.0]),
        ])
        if not quiet:
            self._logger = get_logger(name=get_filename_without_extension(__file__),
                                      output_path=config.output_path,
                                      quiet=False)

            cprint(f'Started.', self._logger)
Esempio n. 4
0
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        raise NotImplementedError(
            'Currently this should not work as the actions recorded are 6d '
            'but this network only return 1d')
        super().__init__(config=config, quiet=True)
        self.input_size = (30, )
        self.output_size = (1, )
        self.action_min = -1
        self.action_max = +1
        self._actor = mlp_creator(
            sizes=[self.input_size[0], 64, 64, self.output_size[0]],
            activation=nn.Tanh(),
            output_activation=nn.Tanh())

        self._critic = mlp_creator(sizes=[self.input_size[0], 64, 64, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(
            torch.ones(self.output_size, dtype=torch.float32) * log_std,
            requires_grad=True)
        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
            self.initialize_architecture()
Esempio n. 5
0
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_size_net = (2, )
        self.output_size = (8, )
        self.action_max = 0.5
        self.starting_height = -1
        self.previous_input = torch.Tensor([0, 0])
        self.sz = 416

        self._actor = mlp_creator(
            sizes=[self.input_size[0], 8,
                   2],  # for now actors can only fly sideways
            layer_bias=True,
            activation=nn.Tanh(),
            output_activation=nn.Tanh())
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                          requires_grad=True)

        self._critic = mlp_creator(sizes=[self.input_size[0], 8, 1],
                                   layer_bias=True,
                                   activation=nn.Tanh(),
                                   output_activation=nn.Tanh())

        self._adversarial_actor = mlp_creator(sizes=[self.input_size[0], 8, 2],
                                              layer_bias=True,
                                              activation=nn.Tanh(),
                                              output_activation=nn.Tanh())
        self.adversarial_log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                                      requires_grad=True)

        self._adversarial_critic = mlp_creator(
            sizes=[self.input_size[0], 8, 1],
            layer_bias=True,
            activation=nn.Tanh(),
            output_activation=nn.Tanh())

        self.yolov3_tiny = Yolov3Tiny(num_classes=80)
        self.yolov3_tiny.load_state_dict(
            torch.load('src/ai/yolov3/yolov3_files/yolov3_tiny_coco_01.h5'))

        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)
            cprint(f'Started.', self._logger)
            self.initialize_architecture()
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self._playfield_size = (0, 1, 0)
        self.input_size = (1, )
        self.output_size = (8, )
        self.action_min = -0.5
        self.action_max = 0.5
        self.starting_height = -1
        self.previous_input = 0

        self.waypoint = get_waypoint(self._playfield_size)

        self._actor = mlp_creator(
            sizes=[self.input_size[0], 4,
                   1],  # for now actors can only fly sideways
            layer_bias=False,
            activation=nn.Tanh(),
            output_activation=None)
        log_std = self._config.log_std if self._config.log_std != 'default' else -0.5
        self.log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                          requires_grad=True)

        self._critic = mlp_creator(sizes=[self.input_size[0], 4, 1],
                                   layer_bias=False,
                                   activation=nn.Tanh(),
                                   output_activation=None)

        self._adversarial_actor = mlp_creator(sizes=[self.input_size[0], 4, 1],
                                              layer_bias=False,
                                              activation=nn.Tanh(),
                                              output_activation=None)
        self.adversarial_log_std = torch.nn.Parameter(torch.ones(
            (1, ), dtype=torch.float32) * log_std,
                                                      requires_grad=True)

        self._adversarial_critic = mlp_creator(
            sizes=[self.input_size[0], 4, 1],
            layer_bias=False,
            activation=nn.Tanh(),
            output_activation=None)

        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)
            cprint(f'Started.', self._logger)
            self.initialize_architecture()
Esempio n. 7
0
 def __init__(self, config: ArchitectureConfig, quiet: bool = False):
     super().__init__(config=config, quiet=True)
     self._logger = get_logger(
         name=get_filename_without_extension(__file__),
         output_path=config.output_path,
         quiet=False)
     if not quiet:
         cprint(f'Started.', self._logger)
     self.input_size = (3, 128, 128)
     self.output_size = (6, )
     self.discrete = False
     self.dropout = nn.Dropout(
         p=config.dropout) if config.dropout != 'default' else None
     self.encoder = nn.Sequential(
         nn.Conv2d(3, 32, 4, stride=2),
         nn.ReLU(),
         nn.Conv2d(32, 64, 4, stride=2),
         nn.ReLU(),
         nn.Conv2d(64, 128, 4, stride=2),
         nn.ReLU(),
         nn.Conv2d(128, 256, 4, stride=2),
         nn.ReLU(),
     )
     self.decoder = mlp_creator(
         sizes=[256 * 6 * 6, 128, 128, self.output_size[0]],
         activation=nn.ReLU(),
         output_activation=nn.Tanh(),
         bias_in_last_layer=False)
     self.initialize_architecture()
 def test_mlp_creator(self):
     network = mlp_creator(sizes=[4, 10, 10, 1],
                           activation=nn.ReLU(),
                           output_activation=None,
                           bias_in_last_layer=False)
     self.assertEqual(len(network), 5)
     count = 0
     for p in network.parameters():
         count += np.prod(p.shape)
     self.assertEqual(count, 170)
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self.input_size = (4, )
        self.output_size = (2, )
        self._actor = mlp_creator(
            sizes=[self.input_size[0], 64, 64, self.output_size[0]],
            activation=nn.Tanh(),
            output_activation=None)

        self._critic = mlp_creator(sizes=[self.input_size[0], 64, 64, 1],
                                   activation=nn.Tanh(),
                                   output_activation=None)
        if not quiet:
            self._logger = get_logger(
                name=get_filename_without_extension(__file__),
                output_path=config.output_path,
                quiet=False)

            cprint(f'Started.', self._logger)
            self.initialize_architecture()
Esempio n. 10
0
    def __init__(self, config: ArchitectureConfig, quiet: bool = False):
        super().__init__(config=config, quiet=True)
        self._logger = get_logger(name=get_filename_without_extension(__file__),
                                  output_path=config.output_path,
                                  quiet=False)
        if not quiet:
            cprint(f'Started.', self._logger)

        self.input_size = (1, 200, 200)
        self.output_size = (6,)
        self.discrete = False
        self.dropout = nn.Dropout(p=config.dropout) if config.dropout != 'default' else None

        self.conv2d_1 = nn.Conv2d(in_channels=self.input_size[0], out_channels=32,
                                  kernel_size=5, stride=2, padding=1, bias=True)
        self.maxpool_1 = nn.MaxPool2d(kernel_size=3, stride=2)

        # First residual block
        self.batch_normalization_1 = nn.BatchNorm2d(32)
        self.conv2d_2 = nn.Conv2d(in_channels=32, out_channels=32,
                                  kernel_size=3, stride=2, padding=1, bias=True)
        self.batch_normalization_2 = nn.BatchNorm2d(32)
        self.conv2d_3 = nn.Conv2d(in_channels=32, out_channels=32,
                                  kernel_size=3, stride=1, padding=1, bias=True)
        self.conv2d_4 = nn.Conv2d(in_channels=32, out_channels=32,
                                  kernel_size=1, stride=2, padding=0, bias=True)
        # Second residual block
        self.batch_normalization_3 = nn.BatchNorm2d(32)
        self.conv2d_5 = nn.Conv2d(in_channels=32, out_channels=64,
                                  kernel_size=3, stride=2, padding=1, bias=True)
        self.batch_normalization_4 = nn.BatchNorm2d(64)
        self.conv2d_6 = nn.Conv2d(in_channels=64, out_channels=64,
                                  kernel_size=3, stride=1, padding=1, bias=True)
        self.conv2d_7 = nn.Conv2d(in_channels=32, out_channels=64,
                                  kernel_size=1, stride=2, padding=0, bias=True)
        # Third residual block
        self.batch_normalization_5 = nn.BatchNorm2d(64)
        self.conv2d_8 = nn.Conv2d(in_channels=64, out_channels=128,
                                  kernel_size=3, stride=2, padding=1, bias=True)
        self.batch_normalization_6 = nn.BatchNorm2d(128)
        self.conv2d_9 = nn.Conv2d(in_channels=128, out_channels=128,
                                  kernel_size=3, stride=1, padding=1, bias=True)
        self.conv2d_10 = nn.Conv2d(in_channels=64, out_channels=128,
                                   kernel_size=1, stride=2, padding=0, bias=True)

        #self.dense_1 = nn.Linear(6272, 1)
        #self.dense_2 = nn.Linear(6272, 1)

        self.decoder = mlp_creator(sizes=[6272, 2056, self.output_size[0]],
                                   activation=nn.ReLU(),
                                   output_activation=nn.Tanh(),
                                   bias_in_last_layer=False)
 def __init__(self, config: ArchitectureConfig, quiet: bool = False):
     super().__init__(config=config, quiet=True)
     self._logger = get_logger(
         name=get_filename_without_extension(__file__),
         output_path=config.output_path,
         quiet=False)
     if not quiet:
         cprint(f'Started.', self._logger)
     self.input_size = (1, 240, 428)
     self.output_size = (6, )
     self.discrete = False
     self.dropout = nn.Dropout(
         p=config.dropout) if config.dropout != 'default' else None
     self.batch_normalisation = config.batch_normalisation if isinstance(config.batch_normalisation, bool) \
         else False
     self.encoder = nn.Sequential(
         nn.Conv2d(1, 12, 3, stride=2, padding=2),
         nn.ReLU(),
         nn.Conv2d(12, 12, 3, stride=2, padding=2),
         nn.ReLU(),
         nn.Conv2d(12, 12, 3, stride=2, padding=2),
         nn.ReLU(),
         nn.Conv2d(12, 24, 3, stride=2, padding=2),
         nn.ReLU(),
         nn.Conv2d(24, 24, 3, stride=2, padding=2),
         nn.ReLU(),
         nn.Conv2d(24, 24, 3, stride=2, padding=2),
         nn.ReLU(),
         nn.Conv2d(24, 32, 3, stride=1, padding=1),
         nn.ReLU(),
         nn.Conv2d(32, 64, 3, stride=1, padding=1),
         nn.ReLU(),
         nn.Conv2d(64, 64, 3, stride=1, padding=1),
         nn.ReLU(),
     )
     self.decoder = mlp_creator(sizes=[3456, 128, 64, self.output_size[0]],
                                activation=nn.ReLU(),
                                output_activation=nn.Identity(),
                                bias_in_last_layer=False)
     self.initialize_architecture()
Esempio n. 12
0
 def __init__(self, config: ArchitectureConfig, quiet: bool = False):
     super().__init__(config=config, quiet=True)
     self._deeply_supervised_parameter_names = [
         name for name, _ in self.named_parameters()
     ]
     self._discriminator = conv_creator(
         channels=[1, 3, 6, 9],
         kernel_sizes=[5, 5, 5],
         strides=[3, 3, 3],
         activation=nn.LeakyReLU(),
         output_activation=nn.LeakyReLU(),
         batch_norm=self._config.batch_normalisation)
     self._discriminator_decision = mlp_creator(
         [9 * 6 * 6, 1],
         output_activation=nn.Sigmoid(),
         bias_in_last_layer=False)
     if not quiet:
         self._logger = get_logger(
             name=get_filename_without_extension(__file__),
             output_path=config.output_path,
             quiet=False)
         self.initialize_architecture()
         cprint(f'Started.', self._logger)
    ################################################################################
    print(f'{get_date_time_tag()}: Define network')
    input_size = 8 * 16 * 16
    output_size = 64 * 64

    VisualPriorRepresentation._load_unloaded_nets([feature_type])
    encoder = VisualPriorRepresentation.feature_task_to_net[feature_type]
    fixed_encoder = copy.deepcopy(encoder) if arguments.side_tuning else None

    if arguments.end_to_end or arguments.side_tuning:
        for p in encoder.parameters():
            p.requires_grad = True

    if arguments.mlp:
        decoder = mlp_creator(sizes=[input_size, 2056, 2056, output_size],
                              activation=nn.ReLU(),
                              output_activation=None,
                              bias_in_last_layer=False)
    else:
        decoder = TaskonomyDecoder(out_channels=1,
                                   is_decoder_mlp=False,
                                   apply_tanh=True,
                                   eval_only=False)
        for p in decoder.parameters():
            p.requires_grad = True

    print(f'encoder: {get_checksum_network_parameters(encoder.parameters())}')
    print(f'decoder: {get_checksum_network_parameters(decoder.parameters())}')
    if arguments.side_tuning:
        print(
            f'fixed_encoder: {get_checksum_network_parameters(fixed_encoder.parameters())}'
        )