示例#1
0
    def __init__(self,
                 n_input,
                 n_output,
                 batch_norm=False,
                 non_linearity=None,
                 dropout=None):
        super(FullyConnectedLayer, self).__init__()
        self.linear = nn.Linear(n_input, n_output)

        if batch_norm:
            self.batch_norm = nn.BatchNorm1d(n_output)

        if non_linearity is None or non_linearity == 'linear':
            pass
        elif non_linearity == 'relu':
            self.non_linearity = nn.ReLU()
            self.init_gain = nn.init.calculate_gain('relu')
            self.bias_init = 0.1
        elif non_linearity == 'elu':
            self.non_linearity = nn.ELU()
        elif non_linearity == 'selu':
            self.non_linearity = nn.SELU()
        elif non_linearity == 'tanh':
            self.non_linearity = nn.Tanh()
            self.init_gain = nn.init.calculate_gain('tanh')
        elif non_linearity == 'sigmoid':
            self.non_linearity = nn.Sigmoid()
        else:
            raise Exception('Non-linearity ' + str(non_linearity) +
                            ' not found.')

        if dropout:
            self.dropout = nn.Dropout1d(dropout)

        self.initialize()
示例#2
0
    def __init__(self):
        super(Soheil, self).__init__()

        self.conv = nn.Sequential(
            # 121, 145, 121
            # padding tuple (padT, padH, padW)
            nn.Conv3d(1, 16, kernel_size=3, stride=1,
                      padding=1),  # b, 8, 121, 145, 121
            nn.BatchNorm3d(16),
            nn.ReLU(True),
            nn.MaxPool3d(kernel_size=2, stride=2,
                         padding=1),  # b, 16, 61, 73, 61
            nn.Conv3d(16, 32, kernel_size=3, stride=1,
                      padding=1),  # b, 16, 61, 73, 61
            nn.BatchNorm3d(32),
            nn.ReLU(True),
            nn.MaxPool3d(kernel_size=3, stride=2,
                         padding=1),  # b, 16, 31, 37, 31
            nn.Conv3d(32, 64, kernel_size=3, stride=1,
                      padding=1),  # b, 32, 31, 37, 31
            nn.BatchNorm3d(64),
            nn.ReLU(True),
            nn.MaxPool3d(kernel_size=4, stride=2,
                         padding=1),  # b, 32, 16, 19, 16
        )
        nn.init.xavier_uniform(self.conv[0].weight)
        nn.init.xavier_uniform(self.conv[4].weight)
        nn.init.xavier_uniform(self.conv[8].weight)
        self.fc = nn.Sequential(nn.Linear(64 * 13 * 16 * 14 + 2, 256),
                                nn.BatchNorm1d(256), nn.Dropout1d(p=0.4),
                                nn.ReLU(True), nn.Linear(256, 2),
                                nn.ReLU(True))
        nn.init.xavier_uniform(self.fc[0].weight)
        nn.init.xavier_uniform(self.fc[3].weight)
        nn.init.xavier_uniform(self.fc[6].weight)
示例#3
0
 def __init__(self, n_input, n_output, dropout=None):
     super(RecurrentLayer, self).__init__()
     self.lstm = nn.LSTMCell(n_input, n_output)
     if dropout:
         self.dropout = nn.Dropout1d(dropout)
     self.initial_hidden = nn.Parameter(torch.zeros(1, n_output))
     self.initial_cell = nn.Parameter(torch.zeros(1, n_output))
     self.hidden_state = self._hidden_state = None
     self.cell_state = self._cell_state = None
     self._detach = False
示例#4
0
def last_decoding(in_features, out_channels, drop_rate=0., upsample='nearest'):
    """Last transition up layer, which outputs directly the predictions.
    """
    last_up = nn.Sequential()
    last_up.add_module('norm1', nn.BatchNorm1d(in_features))
    last_up.add_module('relu1', nn.ReLU(True))
    last_up.add_module(
        'conv1',
        nn.Conv1d(in_features,
                  in_features // 2,
                  kernel_size=1,
                  stride=1,
                  padding=0,
                  bias=False))
    if drop_rate > 0.:
        last_up.add_module('dropout1', nn.Dropout1d(p=drop_rate))
    last_up.add_module('norm2', nn.BatchNorm1d(in_features // 2))
    last_up.add_module('relu2', nn.ReLU(True))
    # last_up.add_module('convT2', nn.ConvTranspose1d(in_features // 2,
    #                    out_channels, kernel_size=2*padding+stride, stride=stride,
    #                    padding=padding, output_padding=output_padding, bias=bias))
    if upsample == 'nearest':
        last_up.add_module('upsample', UpsamplingNearest1d(scale_factor=2))
    elif upsample == 'linear':
        last_up.add_module('upsample', UpsamplingLinear1d(scale_factor=2))
    last_up.add_module(
        'conv2',
        nn.Conv1d(in_features // 2,
                  in_features // 4,
                  kernel_size=3,
                  stride=1,
                  padding=1 * 2,
                  bias=False,
                  padding_mode='circular'))
    last_up.add_module('norm3', nn.BatchNorm1d(in_features // 4))
    last_up.add_module('relu3', nn.ReLU(True))
    last_up.add_module(
        'conv3',
        nn.Conv1d(in_features // 4,
                  out_channels,
                  kernel_size=5,
                  stride=1,
                  padding=2 * 2,
                  bias=False,
                  padding_mode='circular'))
    return last_up
示例#5
0
    def __init__(self,
                 input_size,
                 input_dim,
                 hidden_dim,
                 kernel_size,
                 non_linearity=None,
                 dropout=None):
        super(ConvRecurrentLayer, self).__init__()
        self.lstm = ConvLSTMCell(input_size,
                                 input_dim,
                                 hidden_dim,
                                 kernel_size,
                                 bias=True)
        if dropout:
            self.dropout = nn.Dropout1d(dropout)

        output_shape = [hidden_dim, input_size, input_size]
        self.output_shape = output_shape
        self.initial_hidden = nn.Parameter(torch.zeros([1] + output_shape))
        self.initial_cell = nn.Parameter(torch.zeros([1] + output_shape))
        self.hidden_state = self._hidden_state = None
        self.cell_state = self._cell_state = None
        self._detach = False

        if non_linearity is None or non_linearity == 'linear':
            self.non_linearity = None
        elif non_linearity == 'relu':
            self.non_linearity = nn.ReLU()
            self.init_gain = nn.init.calculate_gain('relu')
            self.bias_init = 0.1
        elif non_linearity == 'leaky_relu':
            self.non_linearity = nn.LeakyReLU(0.2, inplace=True)
        elif non_linearity == 'elu':
            self.non_linearity = nn.ELU()
        elif non_linearity == 'selu':
            self.non_linearity = nn.SELU()
        elif non_linearity == 'tanh':
            self.non_linearity = nn.Tanh()
            self.init_gain = nn.init.calculate_gain('tanh')
        elif non_linearity == 'sigmoid':
            self.non_linearity = nn.Sigmoid()
        else:
            raise Exception('Non-linearity ' + str(non_linearity) +
                            ' not found.')
示例#6
0
 def __init__(self,
              in_features,
              growth_rate,
              drop_rate=0.,
              bn_size=8,
              bottleneck=False,
              padding=1):
     super(_DenseLayer, self).__init__()
     if bottleneck and in_features > bn_size * growth_rate:
         self.add_module('norm1', nn.BatchNorm1d(in_features))
         self.add_module('relu1', nn.ReLU(inplace=True))
         self.add_module(
             'conv1',
             nn.Conv1d(in_features,
                       bn_size * growth_rate,
                       kernel_size=1,
                       stride=1,
                       bias=False))
         self.add_module('norm2', nn.BatchNorm1d(bn_size * growth_rate))
         self.add_module('relu2', nn.ReLU(inplace=True))
         self.add_module(
             'conv1',
             nn.Conv1d(in_features,
                       growth_rate,
                       kernel_size=2 * padding + 1,
                       stride=1,
                       padding=padding * 2,
                       bias=False,
                       padding_mode='circular'))
     else:
         self.add_module('norm1', nn.BatchNorm1d(in_features))
         self.add_module('relu1', nn.ReLU(inplace=True))
         self.add_module(
             'conv1',
             nn.Conv1d(in_features,
                       growth_rate,
                       kernel_size=2 * padding + 1,
                       stride=1,
                       padding=padding * 2,
                       bias=False,
                       padding_mode='circular'))
     if drop_rate > 0:
         self.add_module('dropout', nn.Dropout1d(p=drop_rate))
def C_BN_ACT(c_in, c_out, activation, transpose=False, dropout=None, bn=True):
    layers = []
    if transpose:
        layers.append(
            nn.ConvTranspose1d(c_in,
                               c_out,
                               kernel_size=4,
                               stride=2,
                               padding=1,
                               bias=False))
    else:
        layers.append(
            nn.Conv1d(c_in, c_out, kernel_size=4, stride=2, padding=1))
    if dropout:
        layers.append(nn.Dropout1d(dropout))
    if bn:
        layers.append(nn.BatchNorm1d(c_out))
    layers.append(activation)
    return nn.Sequential(*layers)
示例#8
0
 def __init__(self, dim_in, dim_h, batch_norm, dropout,
              desired_nonlinearity):
     super(Classifier, self).__init__()
     assert hasattr(
         nn, desired_nonlinearity
     ), '%s is not a valid nonlinearity' % desired_nonlinearity
     nonlinearity = getattr(nn, desired_nonlinearity)()
     model = nn.Sequential()
     dim = dim_in
     for i, h in enumerate(dim_h):
         model.add_module('Dense_%s' % i, nn.Linear(dim, h))
         dim = h
         print(dim)
         if batch_norm:
             model.add_module('BatchNorm_%s' % i, nn.BatchNorm1d(dim))
         if dropout:
             model.add_module('Dropout_%s' % i, nn.Dropout1d(p=dropout))
         model.add_module('Nonlinearity_%s' % i, nonlinearity)
     print(dim)
     model.add_module('Output', nn.Linear(dim, 1))
     model.add_module('Sigmoid', nn.Sigmoid())
     self.classifier = model
示例#9
0
    def __init__(self,
                 in_features,
                 out_features,
                 down,
                 bottleneck=True,
                 drop_rate=0,
                 padding=1,
                 upsample='nearest'):
        """Transition layer, either downsampling or upsampling, both reduce
        number of feature maps, i.e. `out_features` should be less than 
        `in_features`.
        Args:
            in_features (int):
            out_features (int):
            down (bool): If True, downsampling, else upsampling
            bottleneck (bool, True): If True, enable bottleneck design
            drop_rate (float, 0.):
        """
        super(_Transition, self).__init__()
        self.add_module('norm1', nn.BatchNorm1d(in_features))
        self.add_module('relu1', nn.ReLU(inplace=True))
        if down:
            # half feature resolution, reduce # feature maps
            if bottleneck:
                # bottleneck impl, save memory, add nonlinearity
                self.add_module(
                    'conv1',
                    nn.Conv1d(in_features,
                              out_features,
                              kernel_size=1,
                              stride=1,
                              padding=0,
                              bias=False))
                if drop_rate > 0:
                    self.add_module('dropout1', nn.Dropout1d(p=drop_rate))
                self.add_module('norm2', nn.BatchNorm1d(out_features))
                self.add_module('relu2', nn.ReLU(inplace=True))
                # self.add_module('pool', nn.AvgPool1d(kernel_size=2, stride=2))
                # not using pooling, fully convolutional...
                self.add_module(
                    'conv2',
                    nn.Conv1d(out_features,
                              out_features,
                              kernel_size=padding * 2 + 2,
                              stride=2,
                              padding=padding * 2,
                              bias=False,
                              padding_mode='circular'))
                if drop_rate > 0:
                    self.add_module('dropout2', nn.Dropout1d(p=drop_rate))
            else:
                self.add_module(
                    'conv1',
                    nn.Conv1d(in_features,
                              out_features,
                              kernel_size=padding * 2 + 2,
                              stride=2,
                              padding=padding * 2,
                              bias=False,
                              padding_mode='circular'))
                if drop_rate > 0:
                    self.add_module('dropout1', nn.Dropout1d(p=drop_rate))
        else:
            # transition up, increase feature resolution, half # feature maps
            if bottleneck:
                # bottleneck impl, save memory, add nonlinearity
                self.add_module(
                    'conv1',
                    nn.Conv1d(in_features,
                              out_features,
                              kernel_size=1,
                              stride=1,
                              padding=0,
                              bias=False))
                if drop_rate > 0:
                    self.add_module('dropout1', nn.Dropout1d(p=drop_rate))

                self.add_module('norm2', nn.BatchNorm1d(out_features))
                self.add_module('relu2', nn.ReLU(inplace=True))
                # output_padding=0, or 1 depends on the image size
                # if image size is of the power of 2, then 1 is good
                if upsample is None:
                    self.add_module(
                        'convT2',
                        nn.ConvTranspose1d(out_features,
                                           out_features,
                                           kernel_size=3,
                                           stride=2,
                                           padding=1,
                                           output_padding=1,
                                           bias=False))
                elif upsample == 'linear':
                    self.add_module('upsample',
                                    UpsamplingLinear1d(scale_factor=2))
                    self.add_module(
                        'conv2',
                        nn.Conv1d(out_features,
                                  out_features,
                                  3,
                                  1,
                                  1 * 2,
                                  bias=False,
                                  padding_mode='circular'))
                elif upsample == 'nearest':
                    self.add_module('upsample',
                                    UpsamplingNearest1d(scale_factor=2))
                    self.add_module(
                        'conv2',
                        nn.Conv1d(out_features,
                                  out_features,
                                  3,
                                  1,
                                  1 * 2,
                                  bias=False,
                                  padding_mode='circular'))

            else:
                if upsample is None:
                    self.add_module(
                        'convT2',
                        nn.ConvTranspose1d(in_features,
                                           out_features,
                                           kernel_size=3,
                                           stride=2,
                                           padding=1,
                                           output_padding=1,
                                           bias=False))
                elif upsample == 'linear':
                    self.add_module('upsample',
                                    UpsamplingLinear1d(scale_factor=2))
                    self.add_module(
                        'conv2',
                        nn.Conv1d(in_features,
                                  out_features,
                                  3,
                                  1,
                                  1 * 2,
                                  bias=False,
                                  padding_mode='circular'))
                elif upsample == 'nearest':
                    self.add_module('upsample',
                                    UpsamplingNearest1d(scale_factor=2))
                    self.add_module(
                        'conv2',
                        nn.Conv1d(in_features,
                                  out_features,
                                  3,
                                  1,
                                  1 * 2,
                                  bias=False,
                                  padding_mode='circular'))

            if drop_rate > 0:
                self.add_module('dropout1', nn.Dropout1d(p=drop_rate))