예제 #1
0
파일: fcnet.py 프로젝트: qyccc/rllibddpg
    def _init(self, inputs, num_outputs, options):
        assert type(inputs) is int
        hiddens = options.get("fcnet_hiddens", [256, 256])
        fcnet_activation = options.get("fcnet_activation", "tanh")
        activation = None
        if fcnet_activation == "tanh":
            activation = nn.Tanh
        elif fcnet_activation == "relu":
            activation = nn.ReLU
        print("Constructing fcnet {} {}".format(hiddens, activation))

        layers = []
        last_layer_size = inputs
        for size in hiddens:
            layers.append(
                SlimFC(last_layer_size,
                       size,
                       initializer=normc_initializer(1.0),
                       activation_fn=activation))
            last_layer_size = size

        self.hidden_layers = nn.Sequential(*layers)

        self.logits = SlimFC(last_layer_size,
                             num_outputs,
                             initializer=normc_initializer(0.01),
                             activation_fn=None)
        self.probs = nn.Softmax()
        self.value_branch = SlimFC(last_layer_size,
                                   1,
                                   initializer=normc_initializer(1.0),
                                   activation_fn=None)
예제 #2
0
파일: fcnet.py 프로젝트: jamescasbon/ray
    def __init__(self, obs_space, num_outputs, options):
        TorchModel.__init__(self, obs_space, num_outputs, options)
        hiddens = options.get("fcnet_hiddens")
        activation = _get_activation_fn(options.get("fcnet_activation"))
        logger.debug("Constructing fcnet {} {}".format(hiddens, activation))
        layers = []
        last_layer_size = np.product(obs_space.shape)
        for size in hiddens:
            layers.append(
                SlimFC(
                    in_size=last_layer_size,
                    out_size=size,
                    initializer=normc_initializer(1.0),
                    activation_fn=activation))
            last_layer_size = size

        self._hidden_layers = nn.Sequential(*layers)

        self._logits = SlimFC(
            in_size=last_layer_size,
            out_size=num_outputs,
            initializer=normc_initializer(0.01),
            activation_fn=None)
        self._value_branch = SlimFC(
            in_size=last_layer_size,
            out_size=1,
            initializer=normc_initializer(1.0),
            activation_fn=None)
예제 #3
0
파일: fcnet.py 프로젝트: adgirish/ray
    def _init(self, inputs, num_outputs, options):
        assert type(inputs) is int
        hiddens = options.get("fcnet_hiddens", [256, 256])
        fcnet_activation = options.get("fcnet_activation", "tanh")
        activation = None
        if fcnet_activation == "tanh":
            activation = nn.Tanh
        elif fcnet_activation == "relu":
            activation = nn.ReLU
        print("Constructing fcnet {} {}".format(hiddens, activation))

        layers = []
        last_layer_size = inputs
        for size in hiddens:
            layers.append(SlimFC(
                last_layer_size, size,
                initializer=normc_initializer(1.0),
                activation_fn=activation))
            last_layer_size = size

        self.hidden_layers = nn.Sequential(*layers)

        self.logits = SlimFC(
            last_layer_size, num_outputs,
            initializer=normc_initializer(0.01),
            activation_fn=None)
        self.probs = nn.Softmax()
        self.value_branch = SlimFC(
            last_layer_size, 1,
            initializer=normc_initializer(1.0),
            activation_fn=None)
예제 #4
0
파일: fcnet.py 프로젝트: anke522/ray-1
    def __init__(self, obs_space, num_outputs, options):
        TorchModel.__init__(self, obs_space, num_outputs, options)
        hiddens = options.get("fcnet_hiddens")
        activation = _get_activation_fn(options.get("fcnet_activation"))
        logger.debug("Constructing fcnet {} {}".format(hiddens, activation))
        layers = []
        last_layer_size = np.product(obs_space.shape)
        for size in hiddens:
            layers.append(
                SlimFC(in_size=last_layer_size,
                       out_size=size,
                       initializer=normc_initializer(1.0),
                       activation_fn=activation))
            last_layer_size = size

        self._hidden_layers = nn.Sequential(*layers)

        self._logits = SlimFC(in_size=last_layer_size,
                              out_size=num_outputs,
                              initializer=normc_initializer(0.01),
                              activation_fn=None)
        self._value_branch = SlimFC(in_size=last_layer_size,
                                    out_size=1,
                                    initializer=normc_initializer(1.0),
                                    activation_fn=None)
예제 #5
0
    def _init(self, inputs, num_outputs, options):
        """TF visionnet in PyTorch.

        Params:
            inputs (tuple): (channels, rows/height, cols/width)
            num_outputs (int): logits size
        """
        filters = options.get(
            "conv_filters",
            [[16, [8, 8], 4], [32, [4, 4], 2], [512, [10, 10], 1]])
        layers = []
        in_channels, in_size = inputs[0], inputs[1:]

        for out_channels, kernel, stride in filters[:-1]:
            padding, out_size = valid_padding(in_size, kernel,
                                              [stride, stride])
            layers.append(
                SlimConv2d(in_channels, out_channels, kernel, stride, padding))
            in_channels = out_channels
            in_size = out_size

        out_channels, kernel, stride = filters[-1]
        layers.append(
            SlimConv2d(in_channels, out_channels, kernel, stride, None))
        self._convs = nn.Sequential(*layers)

        self.logits = SlimFC(out_channels,
                             num_outputs,
                             initializer=nn.init.xavier_uniform)
        self.probs = nn.Softmax()
        self.value_branch = SlimFC(out_channels,
                                   1,
                                   initializer=normc_initializer())
예제 #6
0
    def __init__(self, obs_space, num_outputs, options):
        TorchModel.__init__(self, obs_space, num_outputs, options)
        filters = options.get("conv_filters")
        if not filters:
            filters = _get_filter_config(obs_space.shape)
        layers = []

        (w, h, in_channels) = obs_space.shape
        in_size = [w, h]
        for out_channels, kernel, stride in filters[:-1]:
            padding, out_size = valid_padding(in_size, kernel,
                                              [stride, stride])
            layers.append(
                SlimConv2d(in_channels, out_channels, kernel, stride, padding))
            in_channels = out_channels
            in_size = out_size

        out_channels, kernel, stride = filters[-1]
        layers.append(
            SlimConv2d(in_channels, out_channels, kernel, stride, None))
        self._convs = nn.Sequential(*layers)

        self._logits = SlimFC(out_channels,
                              num_outputs,
                              initializer=nn.init.xavier_uniform_)
        self._value_branch = SlimFC(out_channels,
                                    1,
                                    initializer=normc_initializer())
예제 #7
0
파일: visionnet.py 프로젝트: adgirish/ray
    def _init(self, inputs, num_outputs, options):
        """TF visionnet in PyTorch.

        Params:
            inputs (tuple): (channels, rows/height, cols/width)
            num_outputs (int): logits size
        """
        filters = options.get("conv_filters", [
            [16, [8, 8], 4],
            [32, [4, 4], 2],
            [512, [10, 10], 1]
        ])
        layers = []
        in_channels, in_size = inputs[0], inputs[1:]

        for out_channels, kernel, stride in filters[:-1]:
            padding, out_size = valid_padding(
                in_size, kernel, [stride, stride])
            layers.append(SlimConv2d(
                in_channels, out_channels, kernel, stride, padding))
            in_channels = out_channels
            in_size = out_size

        out_channels, kernel, stride = filters[-1]
        layers.append(SlimConv2d(
                in_channels, out_channels, kernel, stride, None))
        self._convs = nn.Sequential(*layers)

        self.logits = SlimFC(
            out_channels, num_outputs, initializer=nn.init.xavier_uniform)
        self.probs = nn.Softmax()
        self.value_branch = SlimFC(
            out_channels, 1, initializer=normc_initializer())
예제 #8
0
    def __init__(self, obs_space, num_outputs, options):
        TorchModel.__init__(self, obs_space, num_outputs, options)
        filters = options.get("conv_filters")
        if not filters:
            filters = _get_filter_config(obs_space.shape)
        layers = []

        (w, h, in_channels) = obs_space.shape
        in_size = [w, h]
        for out_channels, kernel, stride in filters[:-1]:
            padding, out_size = valid_padding(in_size, kernel,
                                              [stride, stride])
            layers.append(
                SlimConv2d(in_channels, out_channels, kernel, stride, padding))
            in_channels = out_channels
            in_size = out_size

        out_channels, kernel, stride = filters[-1]
        layers.append(
            SlimConv2d(in_channels, out_channels, kernel, stride, None))
        self._convs = nn.Sequential(*layers)

        self._logits = SlimFC(
            out_channels, num_outputs, initializer=nn.init.xavier_uniform_)
        self._value_branch = SlimFC(
            out_channels, 1, initializer=normc_initializer())
예제 #9
0
    def __init__(self, n_global_obs, n_actions, n_other_actions, options):
        nn.Module.__init__(self)
        hiddens = options.get("fcnet_hiddens")
        activation = _get_activation_fn(options.get("fcnet_activation"))
        logger.debug("Constructing MADDPG Critic {} {}".format(
            hiddens, activation))
        layers = []
        last_layer_size = n_global_obs + n_actions + n_other_actions
        for size in hiddens:
            layers.append(
                SlimFC(in_size=last_layer_size,
                       out_size=size,
                       initializer=normc_initializer(1.0),
                       activation_fn=activation))
            last_layer_size = size

        self._hidden_layers = nn.Sequential(*layers)

        self._value = SlimFC(in_size=last_layer_size,
                             out_size=1,
                             initializer=normc_initializer(0.01),
                             activation_fn=None)