Beispiel #1
0
 def init_param(self, init_values: to.Tensor = None, **kwargs):
     if init_values is None:
         self.shared.init_param(None, **kwargs)
         init_param(self.head_1, **kwargs)
         init_param(self.head_2, **kwargs)
     else:
         self.param_values = init_values
Beispiel #2
0
 def init_param(self, init_values: to.Tensor = None, **kwargs):
     if init_values is None:
         # Initialize the layers using default initialization
         init_param(self.rnn_layers, **kwargs)
         init_param(self.output_layer, **kwargs)
     else:
         self.param_values = init_values
Beispiel #3
0
 def init_param(self, init_values: to.Tensor = None, **kwargs):
     # See RNNPolicyBase
     if init_values is None:
         # Initialize the layers using default initialization
         init_param(self.rnn_layers, **kwargs)
         init_param(self.output_layer, **kwargs)
     else:
         cp.vector_to_parameters(init_values, self.parameters())
Beispiel #4
0
 def init_param(self, init_values: to.Tensor = None, **kwargs):
     if init_values is None:
         if isinstance(self.net, FNN):
             # Forward to the FNN's custom initialization function (handles dropout)
             self.net.init_param(init_values, **kwargs)
         else:
             # Initialize using default initialization
             init_param(self.net, **kwargs)
     else:
         self.param_values = init_values
    def init_param(self, init_values: to.Tensor = None, **kwargs):
        if init_values is None:
            # Initialize common layers
            init_param(self.obs_layer, **kwargs)
            self.resting_level.data = to.randn_like(self.resting_level.data)

            # Initialize time constant and potentials if learnable
            if self.tau_learnable:
                self._log_tau.data = self._log_tau_init
            if self.potential_init_learnable:
                self._potentials_init.data = to.randn(self.hidden_size)

        else:
            self.param_values = init_values
Beispiel #6
0
    def init_param(self, init_values: to.Tensor = None, **kwargs):
        super().init_param(init_values, **kwargs)

        if init_values is None:
            # Initialize layers
            init_param(self.prev_act_layer, **kwargs)
            if kwargs.get("sigmoid_nlin", False):
                self.prev_act_layer.weight.data.fill_(-0.5)  # inhibit others
                for i in range(self.prev_act_layer.weight.data.shape[0]):
                    self.prev_act_layer.weight.data[i, i] = 1.0  # excite self
            init_param(self.pot_to_act_layer, **kwargs)

            # Initialize cubic decay and capacity if learnable
            if self.potentials_dot_fcn == pd_cubic and self.kappa_learnable:
                self._log_kappa.data = self._log_kappa_init
            elif self.potentials_dot_fcn in [pd_capacity_21, pd_capacity_21_abs, pd_capacity_32, pd_capacity_32_abs]:
                if self.capacity_learnable:
                    self._log_capacity.data = self._log_capacity_init

        else:
            self.param_values = init_values
Beispiel #7
0
    def init_param(self, init_values: to.Tensor = None, **kwargs):
        if init_values is None:
            # Initialize RNN layers
            init_param(self.obs_layer, **kwargs)
            init_param(self.prev_act_layer, **kwargs)
            if kwargs.get('sigmoid_nlin', False):
                self.prev_act_layer.weight.data.fill_(-0.5)  # inhibit others
                for i in range(self.prev_act_layer.weight.data.shape[0]):
                    self.prev_act_layer.weight.data[i, i] = 1.  # excite self
            init_param(self.nonlin_layer, **kwargs)

            # Initialize time constant if modifiable
            if self.tau_learnable:
                self._log_tau.data = self._log_tau_init
            # Initialize cubic decay if modifiable
            if self.potentials_dot_fcn == pd_cubic:
                if self.kappa_learnable:
                    self._log_kappa.data = self._log_kappa_init
            # Initialize capacity if modifiable
            elif self.potentials_dot_fcn in [
                    pd_capacity_21, pd_capacity_21_abs, pd_capacity_32,
                    pd_capacity_32_abs
            ]:
                if self.capacity_learnable:
                    self._log_capacity.data = self._log_capacity_init

        else:
            self.param_values = init_values
Beispiel #8
0
 def init_param(self, init_values: to.Tensor = None, **kwargs):
     if init_values is None:
         # Initialize the layers using default initialization
         init_param(self.shared, **kwargs)
         init_param(self.head_1, **kwargs)
         init_param(self.head_2, **kwargs)
     else:
         self.param_values = init_values
Beispiel #9
0
    def init_param(self, init_values: to.Tensor = None, **kwargs):
        if init_values is None:
            # Initialize RNN layers
            init_param(self.obs_layer, **kwargs)
            # self.obs_layer.weight.data /= 100.
            # self.obs_layer.bias.data /= 100.
            self.resting_level.data = to.randn_like(self.resting_level.data)
            init_param(self.conv_layer, **kwargs)
            # init_param(self.post_conv_layer, **kwargs)
            init_param(self.nonlin_layer, **kwargs)
            init_param(self.act_layer, **kwargs)

            # Initialize time constant if modifiable
            if self.tau_learnable:
                self._log_tau.data = self._log_tau_init

        else:
            self.param_values = init_values
Beispiel #10
0
    def init_param(self, init_values: to.Tensor = None, **kwargs):
        super().init_param(init_values, **kwargs)

        if init_values is None:
            # Initialize layers
            init_param(self.conv_layer, **kwargs)
            # init_param(self.post_conv_layer, **kwargs)
            init_param(self.pot_to_activ, **kwargs)
            init_param(self.act_layer, **kwargs)

        else:
            self.param_values = init_values
Beispiel #11
0
    def init_param(self, init_values: to.Tensor = None, **kwargs):
        """
        Initialize the network's parameters. By default the parameters are initialized randomly.

        :param init_values: Tensor of fixed initial network parameter values
        """
        if init_values is None:
            # Initialize hidden layers
            for i, layer in enumerate(self.hidden_layers):
                if self.dropout == 0:
                    # If there is no dropout, initialize weights and biases for every layer
                    init_param(layer, **kwargs)
                elif self.dropout > 0 and i%2 == 0:
                    # If there is dropout, omit the initialization for the dropout layers
                    init_param(layer, **kwargs)

            # Initialize output layer
            init_param(self.output_layer, **kwargs)

        else:
            self.param_values = init_values
Beispiel #12
0
 def init_param(self, init_values: to.Tensor = None, **kwargs):
     if init_values is None:
         # Initialize the layer using default initialization
         init_param(self.net, **kwargs)
     else:
         self.param_values = init_values  # ignore the IntelliJ warning
Beispiel #13
0
        print(
            f'ptwise_conv_layer weights shape: {ptwise_conv_layer.weight.shape}'
        )

    else:
        # Standard way
        conv_layer = nn.Conv1d(in_channels,
                               out_channels,
                               kernel_size,
                               stride=1,
                               padding=padding,
                               dilation=1,
                               groups=1,
                               bias=False,
                               padding_mode=padding_mode)
        init_param(conv_layer, bell=use_custom_bell_init)
        print(f'conv_layer weights shape: {conv_layer.weight.shape}')

        # A ramp filter
        if hand_coded_filter:
            conv_layer.weight.data = to.linspace(0, 1, kernel_size).repeat(
                2, 1).unsqueeze(0)

        # Mirrored weighs
        elif use_custom_mirr_layer:
            conv_layer = MirrConv1d(in_channels,
                                    out_channels,
                                    kernel_size,
                                    stride=1,
                                    padding=padding,
                                    dilation=1,
Beispiel #14
0
 def init_param(self, init_values: to.Tensor = None, **kwargs):
     if init_values is None:
         init_param(self.conds, **kwargs)
     else:
         self.param_values = init_values  # ignore the IntelliJ warning