def __init__(self, conv_dropout: float, dense_dropout: float,force_dropout: bool):
        super(DropOutLenet_all,self).__init__()

        self.dense_dropout = dense_dropout
        self.force_dropout = force_dropout

        # features
        self.conv_1 = nn.Conv2d(1,20,kernel_size=5, padding=2 , bias=False) # conv1 - kernel
        self.conv_1_bias = nn.Parameter(torch.randn(20)) # conv1 - bias
        self.relu_1 = nn.ReLU(inplace=True)
        self.dropout_1 = MCDropout(conv_dropout, self.force_dropout)
        self.pool_1 = nn.MaxPool2d(2 , stride=2)
        self.conv_2 = nn.Conv2d(20,50,kernel_size=5, padding=2 , bias=False) # conv2 - kernel
        self.conv_2_bias = nn.Parameter(torch.randn(50)) # conv2 - bias
        self.relu_2 = nn.ReLU(inplace=True)
        self.dropout_2 = MCDropout(conv_dropout, self.force_dropout)
        self.pool_2 = nn.MaxPool2d(2, stride=2)

        # classifier
        self.dense_1 = nn.Linear(50*7*7,500, bias=False)
        self.dense_1_bias = nn.Parameter(torch.randn(500))
        self.relu_3 = nn.ReLU(inplace=True)
        self.dropout_3 = MCDropout(dense_dropout, self.force_dropout)
        self.dense_2 = nn.Linear(500,10, bias=False)
        self.dense_2_bias = nn.Parameter(torch.randn(10))
    def make_block(in_channels: int, out_channels: int, n: int,drop_out_rate: float , activation: str = 'relu'):
        """Convolutional decoder block, with upsampling."""

        if activation == 'relu':
            act_fn = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            act_fn = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'sigmoid':
            act_fn = nn.Sigmoid()
        elif activation == 'tanh':
            act_fn = nn.Tanh()
        else:
            raise ValueError

        block = nn.Sequential()
        strides = [2] + [1] * (n-1)
        for i, s in enumerate(strides):
            conv_kwargs = {'kernel_size': 4, 'padding': s-1, 'stride': s, 'bias': False}
            block.add_module(f'conv{i}', nn.ConvTranspose2d(in_channels, out_channels, **conv_kwargs))
            block.add_module(f'bnorm{i}', nn.BatchNorm2d(out_channels))
            block.add_module(f'act{i}', act_fn)
            block.add_module(f'mc_dropout{i}', MCDropout(drop_out_rate, True))
            in_channels = out_channels

        return block
 def __init__(self,
              in_channels: int,
              output_shape: tuple,
              latent_dim: int,
              drop_out_rate: float,
              num_blocks: int = 4,
              **kwargs):  # pylint: disable=unused-argument
     super(Decoder, self).__init__()
     self.in_channels = in_channels
     self.output_shape = output_shape
     self.out_channels = output_shape[0]
     self.latent_dim = latent_dim
     self.num_blocks = num_blocks
     self.drop_out_rate = drop_out_rate
     self.layers = nn.Sequential(
         collections.OrderedDict(
             [
                 ('linear', nn.Linear(self.latent_dim, self.in_channels)),
                 ('mc_dropout' , MCDropout( self.drop_out_rate , True) ),
                 ('reshape', Reshape(new_shape=(self.in_channels, 1, 1))),
                 ('reverse_gap', nn.UpsamplingBilinear2d(scale_factor=self.conv_input_shape[1])),
                 ('conv', self.make_layers(self.in_channels, self.out_channels, self.drop_out_rate ,1, self.num_blocks)),
             ]
         )
     )
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 drop_out_rate: float,
                 reparameterize: bool = True
                 ):

        super(Bottleneck, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.reparameterize = reparameterize
        self.mu = nn.Linear(self.in_features, self.out_features)
        self.drop_out_rate = drop_out_rate
        self.mc_dropout = MCDropout(self.drop_out_rate , True)

        if self.reparameterize:
            self.logvar = nn.Linear(self.in_features, self.out_features)
Beispiel #5
0
    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, dilation=(dilation, dilation)))
        if self.add_dropout:
            layers.append(MCDropout(p=0.5, force_dropout=True))
        return nn.Sequential(*layers)
    def make_block(in_channels: int, out_channels: int, drop_out_rate: float,n: int,activation: str = 'relu'):
        """Convolutional block."""
        if activation == 'relu':
            act_fn = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            act_fn = nn.LeakyReLU(0.2, inplace=True)
        else:
            raise ValueError

        conv_kwargs = {'kernel_size': 3, 'padding': 1, 'bias': False}

        block = nn.Sequential()
        strides = [2] + [1] * (n-1)
        for i, s in enumerate(strides):
            block.add_module(f'conv{i}', nn.Conv2d(in_channels, out_channels, stride=s, **conv_kwargs))
            block.add_module(f'bnorm{i}', nn.BatchNorm2d(out_channels))
            block.add_module(f'act{i}', act_fn)
            block.add_module(f'mc_dropout{i}', MCDropout(drop_out_rate ,True))
            in_channels = out_channels

        return block
Beispiel #7
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    dilation=1,
                    new_level=True,
                    residual=True):
        assert dilation == 1 or dilation % 2 == 0
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                BatchNorm(planes * block.expansion),
            )

        layers = list()
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  dilation=(1, 1) if dilation == 1 else
                  (dilation // 2 if new_level else dilation, dilation),
                  residual=residual))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      residual=residual,
                      dilation=(dilation, dilation)))
        if self.add_dropout:
            layers.append(MCDropout(p=0.5, force_dropout=True))
        return nn.Sequential(*layers)
x, y, x_test, y_test = data_loader(traindf, testdf)
num_data, num_feature = x.shape

# set training parameters
l = 1e-4
wr = l**2. / num_data
dr = 2. / num_data
learning_rate = 0.001
batch_size = 50
num_epoch = 1000
tolerance = 0.002
patience = 20

skip_training = False  # Set this flag to True before validation

mlp = MCDropout()

# training model
mlp.train()
training_MC(mlp, x, y, x_test, y_test, learning_rate, batch_size, num_epoch,
            tolerance, patience)

if skip_training:
    mlp.load_state_dict(torch.load('MC_mlp_01.pth'))

#Monte Carlo Sample
# MC sample
K_test = 100  # sample 20 times
mlp.train()
MC_samples = [mlp(x_test) for _ in range(K_test)]
# calculate the means