def __init__(self, in_channels, out_channels, debug=False): """ Args: in_channels (int): number of input channels out_channels (int): number of output channels vgg16_bn (torch.model): pretrained VGG-16 (with Batch Normalization) model """ super(Segnet, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.debug = debug # Encoder (VGG16 without Classifier) self.enc_block00 = segnetEncoderBlock2( self.encoder_dims('block00', 'in'), self.encoder_dims('block00', 'out')) self.enc_block01 = segnetEncoderBlock2( self.encoder_dims('block01', 'in'), self.encoder_dims('block01', 'out')) self.enc_block02 = segnetEncoderBlock3( self.encoder_dims('block02', 'in'), self.encoder_dims('block02', 'out')) self.enc_block03 = segnetEncoderBlock3( self.encoder_dims('block03', 'in'), self.encoder_dims('block03', 'out')) self.enc_block04 = segnetEncoderBlock3( self.encoder_dims('block04', 'in'), self.encoder_dims('block04', 'out')) # Initialize the encoder's weights self._load_encoder_weights() # Decoder self.dec_block04 = segnetDecoderBlock3( self.decoder_dims('block04', 'in'), self.decoder_dims('block04', 'out')) self.dec_block03 = segnetDecoderBlock3( self.decoder_dims('block03', 'in'), self.decoder_dims('block03', 'out')) self.dec_block02 = segnetDecoderBlock3( self.decoder_dims('block02', 'in'), self.decoder_dims('block02', 'out')) self.dec_block01 = segnetDecoderBlock2( self.decoder_dims('block01', 'in'), self.decoder_dims('block01', 'out')) self.dec_block00 = segnetDecoderBlock2( self.decoder_dims('block00', 'in'), self.decoder_dims('block00', 'out')) # Initialize the decoder's weights normal_init(self.dec_block04) normal_init(self.dec_block03) normal_init(self.dec_block02) normal_init(self.dec_block01) normal_init(self.dec_block00) # Softmax self.softmax = nn.Softmax(dim=1)
def __init__(self, n_inputs, hidden_size, n_outputs, dropout=0.0): super(SimpleRNN, self).__init__() self.hidden_size = hidden_size # self.inp = nn.Linear(n_inputs, hidden_size) self.rnn = nn.LSTM(n_inputs, hidden_size, num_layers=1, dropout=dropout) self.out = nn.Linear(hidden_size, n_outputs) self.softmax = nn.LogSoftmax() for w in self.parameters(): w.data = normal_init(w.data.size())
def weight_init(self, mean=0, std=0.01): for m in self._modules: utils.normal_init(self._modules[m], mean, std)