Example #1
0
 def __init__(self, input_dim=5, hidden_dim=1024):
     """
     Averaged embeddings of ending -> label
     :param embed_dim: dimension to use
     """
     super(LMFeatsModel, self).__init__()
     self.mapping = nn.Sequential(
         nn.Linear(input_dim, hidden_dim, bias=True),
         nn.SELU(),
         nn.AlphaDropout(p=0.2),
     )
     self.prediction = nn.Sequential(
         nn.Linear(hidden_dim, hidden_dim, bias=True),
         nn.SELU(),
         nn.AlphaDropout(p=0.2),
         nn.Linear(hidden_dim, 1, bias=False),
     )
Example #2
0
    def __init__(self):
        super(small_cnn, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 10, kernel_size=3, padding=1), nn.AlphaDropout(p=0.5),
            nn.SELU())

        self.fc1 = nn.Linear(10 * 28 * 28, 100)
        self.fc2 = nn.Linear(100, 10)
 def __init__(self,in_features,n_classes,n_fc_neurons=128):
     super().__init__()
     self.avg = nn.AdaptiveAvgPool2d((1,1))
     self.fc1 = nn.Linear(in_features,n_fc_neurons)
     self.fc2 = nn.Linear(n_fc_neurons,n_fc_neurons)
     self.fc3 = nn.Linear(n_fc_neurons,n_classes)
     self.activation = activation_func('selu')
     self.dropout = nn.AlphaDropout()
 def __init__(self):
     super(AnciBlock, self).__init__()
     self.main1 = nn.Sequential(
         OrderedDict([('conv7x7', ConvBN(8, 8, [5, 5])),
                      ('Lrelu', nn.SELU(inplace=True)),
                      ('DP', nn.AlphaDropout())]))
     self.path1 = nn.Sequential(
         OrderedDict([('conv5x5', ConvBN(8, 8, [3, 3])),
                      ('Lrelu', nn.SELU(inplace=True)),
                      ('DP', nn.AlphaDropout())]))
     self.path2 = nn.Sequential(
         OrderedDict([('conv3x3', ConvBN(8, 8, [3, 3])),
                      ('Lrelu', nn.SELU(inplace=True)),
                      ('DP', nn.AlphaDropout())]))
     self.identity = nn.Identity()
     self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
     self.dp = nn.Dropout2d()
Example #5
0
 def __init__(self, inp):
     super(DNN_ClassifierM, self).__init__()
     self.layer1 = nn.Sequential(nn.Linear(inp, 132), nn.SELU(True))
     self.layer2 = nn.Sequential(nn.Linear(132, 66), nn.SELU(True))
     self.layer3 = nn.Sequential(nn.Linear(66, 66), nn.SELU(True))
     self.layer4 = nn.Sequential(nn.Linear(66, 30), nn.SELU(True))
     self.layer5 = nn.Sequential(nn.Linear(30, 15), nn.SELU(True),
                                 nn.AlphaDropout(p=0.3))
     self.layer6 = nn.Sequential(nn.Linear(15, 4), nn.LogSoftmax(dim=1))
Example #6
0
 def __init__(self,
              n_code,
              n_hidden,
              n_output,
              dropout=(.2, .2),
              activation='ReLU'):
     super(Decoder, self).__init__()
     self.lin1 = nn.Linear(n_code, n_hidden)
     self.lin2 = nn.Linear(n_hidden, n_hidden)
     self.lin3 = nn.Linear(n_hidden, n_output)
     if activation == 'SELU':
         self.drop1 = nn.AlphaDropout(dropout[0])
         self.drop2 = nn.AlphaDropout(dropout[1])
     else:
         self.drop1 = nn.Dropout(dropout[0])
         self.drop2 = nn.Dropout(dropout[1])
     self.act1 = getattr(nn, activation)()
     self.act2 = getattr(nn, activation)()
Example #7
0
    def __init__(self, conv_dim, in_channels, out_channels, kernel_size,
                 weight_init=None, bias_init=None,
                 stride=1, padding=0, norm=None,
                 activation=None, pool_size=None, dropout=None):
        """Initialize a single ConvUnit (i.e. a conv layer)."""
        super(ConvUnit, self).__init__(weight_init, bias_init,
                                       norm, dropout)
        self.conv_dim = conv_dim
        self._init_layers()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size

        # Main layer
        self._kernel = self.conv_layer(
                              in_channels=self.in_channels,
                              kernel_size=self.kernel_size,
                              out_channels=self.out_channels,
                              stride=stride,
                              padding=padding,
                              bias=True
                              )
        self.add_module('_kernel', self._kernel)

        # Norm
        if self.norm:
            if self.norm == 'batch':
                self.add_module(
                    '_norm',
                    self.batch_norm(num_features=self.out_channels))
            elif self.norm == 'instance':
                self.add_module(
                    '_norm',
                    self.instance_norm(num_features=self.out_channels))

        # Activation/Non-Linearity
        if activation is not None:
            self.add_module('_activation', activation)
            if isinstance(activation, nn.SELU):
                self.weight_init = selu_weight_init_
                self.bias_init = selu_bias_init_
        # Pool
        if pool_size is not None:
            self.add_module(
                '_pool', self.pool_layer(kernel_size=pool_size))

        # Dropout
        if self.dropout is not None:
            if isinstance(activation, nn.SELU):
                self.add_module(
                    '_dropout', nn.AlphaDropout(self.dropout))
            else:
                self.add_module(
                    '_dropout', self.dropout_layer(self.dropout))
        self._init_weights()
        self._init_bias()
Example #8
0
def MakeDropout(in_shape, p, dropout_type):
    if dropout_type == DROPOUT_VANILLA:
        return nn.Dropout(p), in_shape
    elif dropout_type == DROPOUT_2D:
        return nn.Dropout2d(p), in_shape
    elif dropout_type == DROPOUT_ALPHA:
        return nn.AlphaDropout(p), in_shape
    else:
        assert False, 'Unknown dropout type: %s' % (dropout_type, )
Example #9
0
 def __init__(self, in_channels, middle_channels, out_channels, act_func=nn.SELU(), use_drop=.0):
     # nn.ReLU
     super(VGGBlock, self).__init__()
     self.act_func = act_func
     self.use_drop = use_drop
     self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)
     self.bn1 = nn.BatchNorm2d(middle_channels)
     self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)
     self.bn2 = nn.BatchNorm2d(out_channels)
     self.drop = nn.AlphaDropout(use_drop)
Example #10
0
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        planes: int = None,
        dropout: float = 0.0,
        activation: str = "relu",
        normalization: str = "bn",
        seblock: bool = False,
        sablock: bool = False,
    ):
        super(BottleneckBlock, self).__init__()
        if planes is None:
            planes = out_channels * 4

        bias = True
        if normalization is not None:
            bias = False

        if activation == "selu":
            bias = False

        # Define blocks
        self.norm1 = identity
        if normalization is not None:
            self.norm1 = NORMS[normalization.upper()](num_channels=in_channels)

        self.conv1 = nn.Conv1d(
            in_channels, planes, kernel_size=1, stride=1, padding=0, bias=bias
        )

        self.norm2 = identity
        if normalization is not None:
            self.norm2 = NORMS[normalization.upper()](num_channels=planes)

        self.conv2 = nn.Conv1d(
            planes, out_channels, kernel_size=3, stride=1, padding=1, bias=bias
        )

        self.act = getattr(acts, activation)
        if activation == "selu":
            self.dropout = nn.Sequential(
                *[nn.AlphaDropout(dropout / 5), nn.Dropout2d(dropout)]
            )
        else:
            self.dropout = nn.Sequential(*[nn.Dropout(dropout), nn.Dropout2d(dropout)])

        # Optional blocks
        self.seblock = None
        if seblock is True:
            self.seblock = SEBlock(in_channels=in_channels, activation=activation)

        self.sablock = None
        if sablock is True:
            self.sablock = SelfAttention1d(in_channels=in_channels)
Example #11
0
 def _get_layer(self, fan_in:int, fan_out:int) -> nn.Module:   
     layers = []
     layers.append(nn.Linear(fan_in, fan_out))
     self.lookup_init(self.act, fan_in, fan_out)(layers[-1].weight)
     nn.init.zeros_(layers[-1].bias)
     if self.act != 'linear': layers.append(self.lookup_act(self.act))
     if self.bn:  layers.append(nn.BatchNorm1d(fan_out))
     if self.do: 
         if self.act == 'selu': layers.append(nn.AlphaDropout(self.do))
         else:                  layers.append(nn.Dropout(self.do))
     return nn.Sequential(*layers)
Example #12
0
    def build_model(self):

        layer = lambda inp, out: \
                nn.Sequential(
                    nn.Linear(inp, out),
                    self.activation_fn(),
                    nn.AlphaDropout(self.dropout_prob))

        sizes = [self.feature_size] + [self.hidden_size] * self.num_layers
        mlp = [layer(h0, h1) for h0, h1 in zip(sizes[:-1], sizes[1:])]
        return nn.Sequential(*mlp)
Example #13
0
    def __init__(self):
        super(MNISTConvNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, 5, padding=2)
        self.conv2 = nn.Conv2d(16, 16, 5, padding=2)
        self.conv3 = nn.Conv2d(16, 8, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.res = []

        self.dropout = nn.AlphaDropout(p=0.25)
        self.fc1 = nn.Linear(8 * 12 * 12, 240)
        self.fc2 = nn.Linear(240, 10)
Example #14
0
def build_classifier_dict(convnet, num_hidden_layers, num_fc_neurons):
    """
    Accepts 'densenet' or 'vgg' as a valid CNN architecture, the number of additional 
    fully-connected layers and the number of neurons per layer and returns an OrderedDict 
    to be used with nn.Sequential to create the classifier of our network.
    Parameters:
      convnet - a string identifying the CNN architecture being used ('densenet' or 'vgg' only)
      num_hidden_layers - an int of the number of hidden layers to be used
      num_fc_neurons - an int of the number of neurons in each fully-connected layer
    Returns:
      classifier_dict - an OrderedDict describing the fully-connected layers, SELUs, AlphaDropout and LogSoftmax
    """

    classifier_dict = OrderedDict([])
    convnet_out = 1

    if convnet == 'densenet':
        convnet_out = 2208
    elif convnet == 'vgg':
        convnet_out = 25088
    else:
        # this should never run due to the else: statement in main() that exits if invalid CNN arch selected
        print(
            f"WARNING: Network type of '{convnet}' was parsed for which the number of output neurons is unknown.\n\t Your network will not be built correctly.\n"
        )

    classifier_dict['fc1'] = nn.Linear(convnet_out, num_fc_neurons)
    classifier_dict['selu1'] = nn.SELU()
    classifier_dict['dropout1'] = nn.AlphaDropout(p=0.5)

    for layer in range(num_hidden_layers):
        classifier_dict['fc' + str(layer + 2)] = nn.Linear(
            num_fc_neurons, num_fc_neurons)
        classifier_dict['selu' + str(layer + 2)] = nn.SELU()
        classifier_dict['dropout' + str(layer + 2)] = nn.AlphaDropout(p=0.5)

    classifier_dict['fc' + str(num_hidden_layers + 2)] = nn.Linear(
        num_fc_neurons, 102)
    classifier_dict['output'] = nn.LogSoftmax(dim=1)

    return classifier_dict
Example #15
0
 def __init__(self, hidden_size, dropout=0.5, softmax=True):
     super(global_attention, self).__init__()
     self.linear_in = nn.Linear(hidden_size, hidden_size)
     self.dropout1 = nn.Dropout(dropout)
     self.linear_out = nn.Linear(2 * hidden_size, hidden_size)
     self.dropout2 = nn.Dropout(dropout)
     if softmax:
         self.softmax = nn.Softmax(dim=-1)
     else:
         self.softmax = nn.Sigmoid()  #(dim=-1)
     self.tanh = nn.Tanh()
     self.SELU = nn.Sequential(nn.SELU(), nn.AlphaDropout(p=0.05))
Example #16
0
    def __init__(self, in_channels, out_channels, stride=1,
                 dilation_factor=1, act=nn.ReLU(inplace=True)):
        super(PreactBlock, self).__init__()

        self.bn1 = nn.BatchNorm1d(in_channels)
        self.act = act
        use_selu = type(act) == torch.nn.modules.activation.SELU
        self.drop1 = nn.AlphaDropout() if use_selu else nn.Dropout()
        self.conv1 = conv(in_channels, out_channels, stride, dilation=1)

        self.bn2 = nn.BatchNorm1d(out_channels)
        self.drop2 = nn.AlphaDropout() if use_selu else nn.Dropout()
        self.conv2 = conv(out_channels, out_channels, dilation=dilation_factor)
        self.downsample = nn.MaxPool1d(stride, ceil_mode=True) if stride != 1 else None
        self.skip_connection = lambda x: x
        if in_channels != out_channels:
            self.skip_connection = nn.Sequential(
                nn.BatchNorm1d(in_channels),
                nn.Conv1d(in_channels, out_channels,
                          kernel_size=1, bias=False))
        self.stride = stride
Example #17
0
    def __init__(self, latent_dim, sequence_length):
        super(Encoder, self).__init__()
        resnet_x = resnext50_32x4d(pretrained=True)
        resnet_y = resnext50_32x4d(pretrained=True)
        self.sequence_length = sequence_length
        self.latent_dim = latent_dim
        self.feature_extractor = nn.Sequential(*list(resnet_x.children())[:-1])
        self.feature_extractor_y = nn.Sequential(
            *list(resnet_y.children())[:-1])
        self.final = nn.Sequential(
            nn.AlphaDropout(0.4), nn.Linear(resnet_x.fc.in_features,
                                            latent_dim),
            nn.BatchNorm1d(latent_dim, momentum=0.01))

        self.final_y = nn.Sequential(
            nn.AlphaDropout(0.4), nn.Linear(resnet_y.fc.in_features,
                                            latent_dim),
            nn.BatchNorm1d(latent_dim, momentum=0.01))

        self.attention_x = Attention(latent_dim, sequence_length)
        self.attention_y = Attention(latent_dim, sequence_length)
Example #18
0
 def __init__(self):
     super().__init__()
     # self.hidden = nn.Sequential(*[nn.ReLU(), nn.Dropout(0.5), nn.ReLU()] )
     scale = 8
     hidden_layers = [nn.Linear(28 * 28, 128 * scale)]
     for _ in range(3):
         hidden_layers.append(nn.Linear(128 * scale, 128 * scale))
         hidden_layers.append(nn.SELU())  # SELU faster than ELU?
         hidden_layers.append(nn.AlphaDropout(0.1))
         #hidden_layers.append(nn.Dropout(0.1))
     self.hidden = nn.Sequential(*hidden_layers)
     self.output = nn.Linear(128 * scale, 10)
Example #19
0
 def __init__(self, drop_type):
     super(Drop, self).__init__()
     if drop_type is None:
         self.drop = keep_origin
     elif drop_type == 'alpha':
         self.drop = nn.AlphaDropout(p=0.5)
     elif drop_type == 'dropout':
         self.drop = nn.Dropout3d(p=0.5)
     elif drop_type == 'drop_block':
         self.drop = DropBlock3D(drop_prob=0.2, block_size=2)
     else:
         raise NotImplementedError('{} not implemented'.format(drop_type))
Example #20
0
    def __init__(self, window=200, subwindow=40, dropout=0):
        super().__init__()

        locn_start = float(window/2) - float(subwindow/2)
        locn_end = float(window/2) + float(subwindow/2)

        c1 = nn.Sequential(
            nn.Conv1d(1, 10, 5, padding=4),
            nn.MaxPool1d(2), nn.SELU())

        c2 = nn.Sequential(
            nn.Conv1d(10, 15, 3, padding=2, dilation=2),
            nn.MaxPool1d(2), nn.SELU())

        c3 = nn.Sequential(
            nn.Conv1d(15, 15, 3, padding=4, dilation=4),
            nn.MaxPool1d(2), nn.SELU())

        self.feature_extractor = nn.Sequential(c1, c2, c3)

        self.c4 = nn.Sequential(
            nn.Conv1d(15 * np.int32(window/subwindow), 30, 3, padding=4, dilation=4),
            nn.SELU())

        self.classifier = nn.Sequential(
            nn.Linear(150, 50), nn.SELU(),
            nn.AlphaDropout(dropout),
            nn.Linear(50, 10), nn.SELU(),
            nn.AlphaDropout(dropout),
            nn.Linear(10, 1))

        self.regressor = nn.Sequential(
            nn.Linear(150, 50), nn.SELU(),
            nn.AlphaDropout(dropout),
            nn.Linear(50, 10), nn.SELU(),
            nn.AlphaDropout(dropout),
            nn.Linear(10, 1),
            nn.Hardtanh(locn_start, locn_end))

        self._initialize_submodules()
Example #21
0
    def __init__(self, config, vocab_size, embedding=None):
        super(rnn_encoder, self).__init__()
        if embedding is not None:
            self.embedding = embedding
        else:
            self.embedding = nn.Embedding(vocab_size, config.emb_size)
        self.rnn = nn.LSTM(input_size=config.emb_size,
                           hidden_size=config.encoder_hidden_size,
                           num_layers=config.num_layers,
                           dropout=config.dropout,
                           bidirectional=config.bidirec)

        self.config = config
        # self.posenc = PositionalEncoding(config)
        self.hidden_size = config.encoder_hidden_size
        self.sigmoid = nn.Sigmoid()
        self.layers = nn.ModuleList()
        self.self_attention = nn.ModuleList()
        self.in1 = nn.InstanceNorm1d(config.decoder_hidden_size, eps=1e-10)
        self.in2 = nn.InstanceNorm1d(config.decoder_hidden_size, eps=1e-10)

        self.conv1 = nn.Conv1d(config.decoder_hidden_size,
                               config.decoder_hidden_size,
                               kernel_size=3,
                               padding=0,
                               dilation=1)
        self.selu1 = nn.Sequential(nn.SELU(), nn.AlphaDropout(p=0.05))
        self.conv2 = nn.Conv1d(config.decoder_hidden_size,
                               config.decoder_hidden_size,
                               kernel_size=3,
                               padding=0,
                               dilation=2)
        self.selu2 = nn.Sequential(nn.SELU(), nn.AlphaDropout(p=0.05))
        self.conv3 = nn.Conv1d(config.decoder_hidden_size,
                               config.decoder_hidden_size,
                               kernel_size=3,
                               padding=0,
                               dilation=3)
        self.selu3 = nn.Sequential(nn.SELU(), nn.AlphaDropout(p=0.05))
Example #22
0
 def __init__(self, hidden_size, dropout=0.1):
     super(self_attention, self).__init__()
     self.linear_in = nn.Linear(hidden_size, hidden_size)
     init.xavier_normal(self.linear_in.weight)
     init.constant(self.linear_in.bias, 0.0)
     self.dropout1 = nn.Dropout(dropout)
     self.linear_out = nn.Linear(2 * hidden_size, hidden_size)
     init.xavier_normal(self.linear_out.weight)
     init.constant(self.linear_in.bias, 0.0)
     self.dropout2 = nn.Dropout(dropout)
     self.softmax = nn.Softmax(dim=-1)
     self.tanh = nn.Tanh()
     self.SELU = nn.Sequential(nn.SELU(), nn.AlphaDropout(p=0.05))
Example #23
0
 def __init__(self, in_dim, out_dim, hidden_dim, n_layers, dropout_prob=0.0):
     super().__init__()
     layers = OrderedDict()
     for i in range(n_layers - 1):
         if i == 0:
             layers[f"fc{i}"] = nn.Linear(in_dim, hidden_dim, bias=False)
         else:
             layers[f"fc{i}"] = nn.Linear(hidden_dim, hidden_dim, bias=False)
         layers[f"selu_{i}"] = nn.SELU()
         layers[f"dropout_{i}"] = nn.AlphaDropout(p=dropout_prob)
     layers[f"fc_{i+1}"] = nn.Linear(hidden_dim, out_dim, bias=True)
     self.network = nn.Sequential(layers)
     self.reset_parameters()
Example #24
0
    def __init__(self, output_dim):
        super(Baseline_ResNet, self).__init__()
        # after res_stack1 (batch, 32, 64)
        self.res_stack1 = Res_Stack(input_dim=2, output_dim=32)
        # after res_stack1 (batch, 32, 32)
        self.res_stack2 = Res_Stack(input_dim=32, output_dim=32)
        # after res_stack1 (batch, 32, 16)
        self.res_stack3 = Res_Stack(input_dim=32, output_dim=32)
        # after res_stack1 (batch, 32, 8)
        self.res_stack4 = Res_Stack(input_dim=32, output_dim=32)

        self.fc1 = nn.Sequential(
            nn.Linear(32 * 8, 128),
            nn.SELU(),
            nn.AlphaDropout(0.5),
        )
        self.fc2 = nn.Sequential(
            nn.Linear(128, 64),
            nn.SELU(),
            nn.AlphaDropout(0.5),
        )
        self.fc3 = nn.Sequential(nn.Linear(64, output_dim), )
Example #25
0
File: rnn.py Project: wmlml/SU4MLC
    def __init__(self, config, embedding=None):
        super(rnn_encoder, self).__init__()

        self.embedding = embedding if embedding is not None else nn.Embedding(
            config.src_vocab_size, config.emb_size)
        self.hidden_size = config.hidden_size
        self.config = config
        if config.cell == 'gru':
            self.rnn = nn.GRU(input_size=config.emb_size,
                              hidden_size=config.hidden_size,
                              num_layers=config.enc_num_layers,
                              dropout=config.dropout,
                              bidirectional=config.bidirectional)
        else:
            self.rnn = nn.LSTM(input_size=config.emb_size,
                               hidden_size=config.hidden_size,
                               num_layers=config.enc_num_layers,
                               dropout=config.dropout,
                               bidirectional=config.bidirectional)

        self.dconv = nn.Sequential(
            nn.Conv1d(config.hidden_size,
                      config.hidden_size,
                      kernel_size=3,
                      padding=1,
                      dilation=1), nn.SELU(), nn.AlphaDropout(p=0.05),
            nn.Conv1d(config.hidden_size,
                      config.hidden_size,
                      kernel_size=3,
                      padding=1,
                      dilation=2), nn.SELU(), nn.AlphaDropout(p=0.05),
            nn.Conv1d(config.hidden_size,
                      config.hidden_size,
                      kernel_size=3,
                      padding=1,
                      dilation=3), nn.SELU(), nn.AlphaDropout(p=0.05))
        self.linear = nn.Linear(2 * config.hidden_size, 2 * config.hidden_size)
        self.glu = nn.GLU()
Example #26
0
    def __init__(self):
        super(CIFAR10VGG9, self).__init__()

        self.conv64 = nn.Sequential(
            nn.Conv2d(3, 64, 3, padding=1),
            nn.ReLU(True),
            nn.BatchNorm2d(64),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.ReLU(True),
            nn.BatchNorm2d(64),
        )
        self.add_module("conv64", self.conv64)
        self.conv128 = nn.Sequential(
            nn.Conv2d(64, 128, 3, padding=1),
            nn.ReLU(True),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.ReLU(True),
            nn.BatchNorm2d(128),
        )
        self.add_module("conv128", self.conv128)
        self.conv256 = nn.Sequential(
            nn.Conv2d(128, 256, 3, padding=1),
            nn.ReLU(True),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.ReLU(True),
            nn.BatchNorm2d(256),
        )
        self.add_module("conv256", self.conv256)
        self.mlp = nn.Sequential(nn.AlphaDropout(p=0.25),
                                 nn.Linear(256 * 4 * 4, 256 * 4 * 4),
                                 nn.ReLU(True), nn.AlphaDropout(p=0.25),
                                 nn.Linear(256 * 4 * 4, 256 * 4 * 4),
                                 nn.ReLU(True), nn.Linear(256 * 4 * 4, 10))
        self.add_module("vgg", self.mlp)

        self.maxPooling = nn.MaxPool2d(2)
Example #27
0
    def __init__(self,
                 inputs,
                 depth=4,
                 width=None,
                 outputs=1,
                 output_type='bounded',
                 activation='relu',
                 normalize=None,
                 dropout=False):
        super().__init__()
        if width is None:
            width = inputs * 2

        self.normalize = normalize
        if dropout and (activation == 'selu'):
            self.dropout = nn.AlphaDropout(p=0.1)
        elif dropout:
            self.dropout = nn.Dropout(p=0.5)
        else:
            self.dropout = dropout

        # Define the activation functions between layers.
        if activation == 'relu':
            self.activation = nn.ReLU()
        elif activation == 'leakyrelu':
            self.activation = nn.LeakyReLU()
        elif (activation == 'selu') or (activation == 'selu-relu'):
            self.relu_end = True
            self.activation = nn.SELU()

        # Define the final activation function.
        if output_type == 'bounded':
            self.final_act = nn.Sigmoid()
        elif output_type == 'logit':
            self.final_act = None
        else:
            self.final_act = self.activation

        # Build the layers
        self.layers = self.build_layers(inputs, depth, width, outputs)
        self.architecture = {
            'inputs': inputs,
            'depth': depth,
            'width': width,
            'outputs': outputs,
            'output_type': output_type,
            'activation': activation,
            'normalize': normalize,
            'dropout': dropout
        }
Example #28
0
    def __init__(
        self,
        input_size: int,
        output_size: int,
        hidden_size: int = 64,
        num_layers: int = 1,
        bias: bool = True,
        activation: str = 'ReLU',
        dropout: float = 0.,
        normalization: str = None,
        linear_first: bool = True,
        **absorb,
    ):
        activation = ACTIVATIONS[activation]
        selfnorm = normalization == 'self'

        if dropout == 0.:
            dropout = None
        elif selfnorm:
            dropout = nn.AlphaDropout(dropout)
        else:
            dropout = nn.Dropout(dropout)

        normalization = NORMALIZATIONS.get(normalization, lambda x: None)

        layers = [
            nn.Linear(input_size, hidden_size, bias) if linear_first else None,
            normalization(hidden_size),
            activation(),
        ]

        for _ in range(num_layers):
            layers.extend([
                dropout,
                nn.Linear(hidden_size, hidden_size, bias),
                normalization(hidden_size),
                activation(),
            ])

        layers.append(nn.Linear(hidden_size, output_size, bias))

        layers = filter(lambda l: l is not None, layers)

        super().__init__(*layers)

        if selfnorm:
            lecun_init(self)

        self.input_size = input_size
        self.output_size = output_size
Example #29
0
    def __init__(self, feature_maps, num_classes: int, dropout=0.):
        super().__init__()
        self.features_size = feature_maps[-1]
        self.rank_pool = GlobalRankPooling(self.features_size, 16 * 16)
        self.dropout = nn.AlphaDropout(dropout)
        self.logits = nn.Linear(self.features_size, num_classes)

        # Regression to grade using SSD-like module
        self.regression = nn.Sequential(nn.Linear(self.features_size,
                                                  16), nn.ELU(inplace=True),
                                        nn.Linear(16, 16),
                                        nn.ELU(inplace=True), nn.Linear(16, 1))

        self.ordinal = nn.Linear(self.features_size, num_classes - 1)
Example #30
0
 def __init__(self, n_input, n_hidden, n_code, final_activation=None,
              normalize_inputs=True, dropout=(.2,.2), activation='ReLU'):
     super(Encoder, self).__init__()
     self.lin1 = nn.Linear(n_input, n_hidden)
     self.act1 = getattr(nn, activation)()
     self.lin2 = nn.Linear(n_hidden, n_hidden)
     self.act2 = getattr(nn, activation)()
     if activation == 'SELU':
         self.drop1 = nn.AlphaDropout(dropout[0])
         self.drop2 = nn.AlphaDropout(dropout[1])
     else:
         self.drop1 = nn.Dropout(dropout[0])
         self.drop2 = nn.Dropout(dropout[1])
     self.lin3 = nn.Linear(n_hidden, n_code)
     self.normalize_inputs = normalize_inputs
     if final_activation == 'linear' or final_activation is None:
         self.final_activation = None
     elif final_activation == 'softmax':
         self.final_activation = nn.Softmax(dim=1)
     elif final_activation == 'sigmoid':
         self.final_activation = nn.Sigmoid()
     else:
         raise ValueError("Final activation unknown:", activation)