Ejemplo n.º 1
0
    def __init__(self):
        super(Discriminator, self).__init__()

        def discriminator_block(in_filters, out_filters, bn=True):
            'Returns layers of each discriminator block'
            block = [
                nn.Conv(in_filters, out_filters, 3, stride=2, padding=1),
                nn.LeakyReLU(scale=0.2),
                nn.Dropout(p=0.25)
            ]
            if bn:
                block.append(nn.BatchNorm(out_filters, eps=0.8))
            return block

        self.conv_blocks = nn.Sequential(
            *discriminator_block(opt.channels, 16, bn=False),
            *discriminator_block(16, 32), *discriminator_block(32, 64),
            *discriminator_block(64, 128))
        ds_size = (opt.img_size // (2**4))
        self.adv_layer = nn.Sequential(nn.Linear((128 * (ds_size**2)), 1))
        self.aux_layer = nn.Sequential(
            nn.Linear((128 * (ds_size**2)), opt.n_classes), nn.Softmax())
        self.latent_layer = nn.Sequential(
            nn.Linear((128 * (ds_size**2)), opt.code_dim))

        for m in self.modules():
            weights_init_normal(m)
Ejemplo n.º 2
0
 def __init__(self, channels):
     super(SA_Layer, self).__init__()
     self.q_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
     self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
     self.q_conv.conv.weight = self.k_conv.conv.weight
     self.v_conv = nn.Conv1d(channels, channels, 1)
     self.trans_conv = nn.Conv1d(channels, channels, 1)
     self.after_norm = nn.BatchNorm1d(channels)
     self.act = nn.ReLU()
     self.softmax = nn.Softmax(dim=-1)
Ejemplo n.º 3
0
 def __init__(self, in_channels, n_cls):
     super(OCRHead, self).__init__()
     self.relu = nn.ReLU()
     self.in_channels = in_channels
     self.softmax = nn.Softmax(dim=2)
     self.conv_1x1 = nn.Conv(in_channels, in_channels, kernel_size=1)
     self.last_conv = nn.Conv(in_channels * 2,
                              n_cls,
                              kernel_size=3,
                              stride=1,
                              padding=1)
     self._zero_init_conv()
Ejemplo n.º 4
0
    def __init__(self, in_dim):
        super(PAM_Module, self).__init__()
        self.chanel_in = in_dim

        self.query_conv = nn.Conv(in_channels=in_dim,
                                  out_channels=in_dim // 8,
                                  kernel_size=1)
        self.key_conv = nn.Conv(in_channels=in_dim,
                                out_channels=in_dim // 8,
                                kernel_size=1)
        self.value_conv = nn.Conv(in_channels=in_dim,
                                  out_channels=in_dim,
                                  kernel_size=1)
        self.gamma = jt.zeros(1)

        self.softmax = nn.Softmax(dim=-1)
Ejemplo n.º 5
0
    def __init__(self, in_dim):
        super(OC_Module, self).__init__()
        self.in_channels = in_dim

        self.query_conv = nn.Conv(in_channels=in_dim,
                                  out_channels=in_dim // 2,
                                  kernel_size=1)
        self.key_conv = nn.Conv(in_channels=in_dim,
                                out_channels=in_dim // 2,
                                kernel_size=1)
        self.value_conv = nn.Conv(in_channels=in_dim,
                                  out_channels=in_dim,
                                  kernel_size=1)
        self.scale_conv = nn.Conv(in_channels=in_dim,
                                  out_channels=in_dim,
                                  kernel_size=1)
        self.softmax = nn.Softmax(dim=-1)
        self._zero_init_conv()
Ejemplo n.º 6
0
 def __init__(self, channels):
     super(OffsetAttention, self).__init__()
     self.q_conv = nn.Conv1d(channels,
                             channels // 4,
                             kernel_size=1,
                             bias=False)
     self.k_conv = nn.Conv1d(channels,
                             channels // 4,
                             kernel_size=1,
                             bias=False)
     self.q_conv.weight = self.k_conv.weight
     self.v_conv = nn.Conv1d(channels, channels, kernel_size=1, bias=False)
     self.trans_conv = nn.Conv1d(channels,
                                 channels,
                                 kernel_size=1,
                                 bias=False)
     self.after_norm = nn.BatchNorm1d(channels)
     self.act = nn.ReLU()
     self.softmax = nn.Softmax(dim=-1)
Ejemplo n.º 7
0
def soft_cross_entropy_loss(output, target, smoothing=True):
    ''' Calculate cross entropy loss, apply label smoothing if needed. '''

    target = target.view(-1)
    softmax = nn.Softmax(dim=1)
    if smoothing:
        eps = 0.2
        b, n_class = output.shape

        one_hot = jt.zeros(output.shape)
        for i in range(b):
            one_hot[i, target[i].data] = 1

        one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
        # print (one_hot[0].data)
        log_prb = jt.log(softmax(output))
        loss = -(one_hot * log_prb).sum(dim=1).mean()
    else:
        loss = nn.cross_entropy_loss(output, target)

    return loss
Ejemplo n.º 8
0
    def __init__(self):
        super(Classifier, self).__init__()

        def block(in_features, out_features, normalization=True):
            'Classifier block'
            layers = [
                nn.Conv(in_features, out_features, 3, stride=2, padding=1),
                nn.LeakyReLU(scale=0.2)
            ]
            if normalization:
                layers.append(nn.InstanceNorm2d(out_features, affine=None))
            return layers

        self.model = nn.Sequential(
            *block(opt.channels, 64, normalization=False), *block(64, 128),
            *block(128, 256), *block(256, 512))
        input_size = (opt.img_size // (2**4))
        self.output_layer = nn.Sequential(
            nn.Linear((512 * (input_size**2)), opt.n_classes), nn.Softmax())

        for m in self.modules():
            weights_init_normal(m)
Ejemplo n.º 9
0
 def __init__(self, in_dim):
     super(CAM_Module, self).__init__()
     self.chanel_in = in_dim
     self.gamma = jt.zeros(1)
     self.softmax = nn.Softmax(dim=-1)