Exemple #1
0
 def initialize(self, **kwargs):
     log.info('Ignoring input {} when initializing {}'.format(
         repr(kwargs), self.__class__.__name__))
     for linear_layer in [
             p for p in self.parameters() if isinstance(p, nn.Linear)
     ]:
         xavier_normal(linear_layer.weight)
 def __init__(self, neurons, input_channels=3, hidden_channels=10, layers=2, **kwargs):
     super().__init__()
     log.info('Ignoring input {} when creating {}'.format(repr(kwargs), self.__class__.__name__))
     feat = [nn.Linear(input_channels, hidden_channels), nn.ReLU()]
     for _ in range(layers - 1):
         feat.extend([nn.Linear(hidden_channels, hidden_channels), nn.ReLU()])
     self.mlp = nn.Sequential(*feat)
     self.linear = nn.Linear(hidden_channels, neurons)
    def forward(self, input, readoutput=None):
        mod = torch.exp(self.linear(self.mlp(input)))

        if readoutput is None:
            log.info('Nothing to modulate. Returning modulation only')
            return mod
        else:
            return readoutput * mod
Exemple #4
0
 def initialize(self, bias=None):
     self.weight.data.normal_(0, 1e-6)
     if self.bias is not None:
         if bias is not None:
             log.info('Setting bias to predefined value')
             self.bias.data = bias
         else:
             self.bias.data.normal_(0, 1e-6)
Exemple #5
0
 def initialize(self, bias=None):
     log.info('Initializing shifter weights' +
              (' and biases' if bias is not None else ''))
     for k in self:
         if bias is not None:
             self[k].initialize(bias=bias[k])
         else:
             self[k].initialize()
 def __init__(self, n_neurons, input_channels=3, hidden_channels=5,
              layers=2, gamma_modulator=0, **kwargs):
     log.info('Ignoring input {} when creating {}'.format(repr(kwargs), self.__class__.__name__))
     super().__init__()
     self.gamma_modulator = gamma_modulator
     for k, n in n_neurons.items():
         if isinstance(input_channels, OrderedDict):
             ic = input_channels[k]
         else:
             ic = input_channels
         self.add_module(k, self._base_modulator(n, ic, hidden_channels, layers=layers))
Exemple #7
0
 def __init__(self,
              data_keys,
              input_channels,
              bias=True,
              gamma_shifter=0,
              **kwargs):
     log.info('Ignoring input {} when creating {}'.format(
         repr(kwargs), self.__class__.__name__))
     super().__init__()
     self.gamma_shifter = gamma_shifter
     for k in data_keys:
         self.add_module(k, StaticAffine2d(input_channels, 2, bias=bias))
    def __init__(self, in_shape, neurons, positive=False, gamma_features=0, pool_steps=0, **kwargs):
        log.info('Ignoring input {} when creating {}'.format(repr(kwargs), self.__class__.__name__))
        super().__init__()

        self.in_shape = in_shape
        self.neurons = neurons
        self._positive = positive
        self.gamma_features = gamma_features
        self._pool_steps = pool_steps
        for k, neur in neurons.items():
            if isinstance(self.in_shape, dict):
                in_shape = self.in_shape[k]
            self.add_module(k, SpatialTransformerPooled2d(in_shape, neur, positive=positive, pool_steps=pool_steps))
    def __init__(self, in_shape, neurons, positive=False, gamma_features=0, scale_n=3, downsample=True,
                 type=None, _skip_upsampling=False, **kwargs):
        log.info('Ignoring input {} when creating {}'.format(repr(kwargs), self.__class__.__name__))
        super().__init__()

        self.in_shape = in_shape
        self.neurons = neurons
        self._positive = positive
        self.gamma_features = gamma_features
        for k, neur in neurons.items():
            if isinstance(self.in_shape, dict):
                in_shape = self.in_shape[k]
            self.add_module(k, self._BaseReadout(in_shape, neur, positive=positive, scale_n=scale_n,
                                                 downsample=downsample, _skip_upsampling=_skip_upsampling, type=type))
Exemple #10
0
 def __init__(self,
              data_keys,
              input_channels=2,
              hidden_channels_shifter=2,
              shift_layers=1,
              gamma_shifter=0,
              **kwargs):
     log.info('Ignoring input {} when creating {}'.format(
         repr(kwargs), self.__class__.__name__))
     super().__init__()
     self.gamma_shifter = gamma_shifter
     for k in data_keys:
         self.add_module(
             k, MLP(input_channels, hidden_channels_shifter, shift_layers))
    def __init__(self, in_shape, neurons, gamma_readout, positive=True, normalize=True, **kwargs):
        log.info('Ignoring input {} when creating {}'.format(repr(kwargs), self.__class__.__name__))
        super().__init__()

        self.in_shape = in_shape
        self.neurons = neurons
        self.positive = positive
        self.normalize = normalize
        self.gamma_readout = gamma_readout

        for k, neur in neurons.items():
            if isinstance(self.in_shape, dict):
                in_shape = self.in_shape[k]
            self.add_module(k, SpatialXFeatureLinear(in_shape, neur, normalize=normalize, positive=positive))
Exemple #12
0
    def __init__(self,
                 input_features=2,
                 hidden_channels=10,
                 shift_layers=1,
                 **kwargs):
        super().__init__()
        log.info('Ignoring input {} when creating {}'.format(
            repr(kwargs), self.__class__.__name__))

        feat = []
        if shift_layers > 1:
            feat = [nn.Linear(input_features, hidden_channels), nn.Tanh()]
        else:
            hidden_channels = input_features

        for _ in range(shift_layers - 2):
            feat.extend(
                [nn.Linear(hidden_channels, hidden_channels),
                 nn.Tanh()])

        feat.extend([nn.Linear(hidden_channels, 2), nn.Tanh()])
        self.mlp = nn.Sequential(*feat)
 def initialize(self):
     log.info('Initializing ' + self.__class__.__name__)
     for k, mu in self.items():
         self[k].initialize()
Exemple #14
0
 def __init__(self, input_channels, output_channels, bias=True, **kwargs):
     log.info('Ignoring input {} when creating {}'.format(
         repr(kwargs), self.__class__.__name__))
     super().__init__(input_channels, output_channels, bias=bias)
 def initialize(self, mu_dict):
     log.info('Initializing ' +  self.__class__.__name__)
     for k, mu in mu_dict.items():
         self[k].initialize(init_noise=1e-6)
         self[k].bias.data = mu.squeeze() - 1
 def __init__(self, in_shape, neurons, positive=False, gamma_features=0, **kwargs):
     log.info('Ignoring input {} when creating {}'.format(repr(kwargs), self.__class__.__name__))
     super().__init__(in_shape, neurons, positive=positive,
                      gamma_features=gamma_features,
                      _pool_steps=0, **kwargs)
    def initialize(self, mu_dict):
        log.info('Initializing with mu_dict: ' + ', '.join(['{}: {}'.format(k, len(m)) for k, m in mu_dict.items()]))

        for k, mu in mu_dict.items():
            self[k].initialize()
            self[k].bias.data = mu.squeeze() - 1