예제 #1
0
 def __init__(self,
              in_c,
              out_c,
              kernel=(1, 1),
              stride=(1, 1),
              padding=(0, 0),
              groups=1):
     super(Conv_block, self).__init__()
     # print(in_c, out_c, groups)
     self.conv = Conv2d(in_c,
                        out_c,
                        kernel_size=kernel,
                        groups=groups,
                        stride=stride,
                        padding=padding,
                        bias=False)
     self.bn = BatchNorm2d(out_c)
     self.prelu = PReLU(out_c)
    def __init__(self, num_blocks=1):
        super().__init__()
        print("Initialized generator network..")
        self.conv1 = Conv2d(1, 64, kernel_size=9, padding=4)
        self.prelu = PReLU()

        self.layers = self._get_residual_blocks(num_blocks)

        self.conv2 = Conv2d(64, 64, kernel_size=3, padding=1)
        self.bn2 = BatchNorm2d(64)

        self.conv3 = Conv2d(64, 256, kernel_size=3, padding=1)
        self.pxshuffle = PixelShuffle(upscale_factor=2)  # up-sampling

        # used in original SR-GAN paper, only for 4x up-sampling
        # self.conv4 = Conv2d(256, 256, kernel_size=3, padding=1)

        self.conv5 = Conv2d(64, 1, kernel_size=9, padding=4)
 def __init__(self, in_channels, out_channels, stride):
     super(BottleneckIRSE, self).__init__()
     self.identity = 0
     if in_channels == out_channels:
         if stride == 1:
             self.identity = 1
         else:
             self.shortcut_layer = MaxPool2d(1, stride)
     else:
         self.shortcut_layer = Sequential(
             Conv2d(in_channels, out_channels, (1, 1), stride, bias=False),
             BatchNorm2d(out_channels))
     self.res_layer = Sequential(
         BatchNorm2d(in_channels),
         Conv2d(in_channels, out_channels, (3, 3), (1, 1), 1, bias=False),
         BatchNorm2d(out_channels), PReLU(out_channels),
         Conv2d(out_channels, out_channels, (3, 3), stride, 1, bias=False),
         BatchNorm2d(out_channels), SEModule(out_channels, 16))
예제 #4
0
 def __init__(self,
              in_c,
              out_c,
              kernel=(1, 1),
              stride=(1, 1),
              padding=(0, 0),
              groups=1,
              use_hs=1):
     super(Conv_block, self).__init__()
     self.conv = Conv2d(in_c,
                        out_channels=out_c,
                        kernel_size=kernel,
                        groups=groups,
                        stride=stride,
                        padding=padding,
                        bias=False)
     self.bn = BatchNorm2d(out_c)
     self.unlinearity = h_swish() if use_hs else PReLU(out_c)
예제 #5
0
    def __init__(self, input_size, num_layers, mode='ir'):
        super(Backbone, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        assert num_layers in [50, 100,
                              152], "num_layers should be 50, 100 or 152"
        assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))
        if input_size[0] == 112:
            self.output_layer = Sequential(BatchNorm2d(512), Dropout(),
                                           Flatten(), Linear(512 * 7 * 7, 512),
                                           BatchNorm1d(512))

            # # for rknn
            # self.output_layer = Sequential(BatchNorm2d(512),
            #                                Conv2d(512,512,(7,7),1,0,bias=True),
            #                                BatchNorm2d(512)
            #                                )

        else:
            self.output_layer = Sequential(
                BatchNorm2d(512),
                # Dropout(),
                # Flatten(),
                Linear(512 * 14 * 14, 512),
                BatchNorm1d(512))

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self._initialize_weights()
예제 #6
0
    def __init__(self, scale_factor):
        upsample_block_num = int(math.log(scale_factor, 2))

        super(Generator, self).__init__()
        self.block1 = Sequential(
            Conv2d(3, 64, kernel_size=(9, 9), padding=(4, 4)), PReLU())
        self.block2 = ResidualBlock(64)
        self.block3 = ResidualBlock(64)
        self.block4 = ResidualBlock(64)
        self.block5 = ResidualBlock(64)
        self.block6 = ResidualBlock(64)
        self.block7 = Sequential(
            Conv2d(64, 64, kernel_size=(3, 3), padding=(1, 1)),
            BatchNorm2d(64))

        # up sampling
        block8 = [UpSampleBlock(64, 2) for _ in range(upsample_block_num)]
        block8.append(Conv2d(64, 3, kernel_size=(9, 9), padding=(4, 4)))
        self.block8 = Sequential(*block8)
예제 #7
0
 def __init__(self, opts=None):
     super(MntToVecEncoderEncoderIntoWPlus, self).__init__()
     print('Using MntToVecEncoderEncoderIntoWPlus')
     blocks = get_blocks(num_layers=50)
     unit_module = bottleneck_SE
     self.input_layer = Sequential(
         Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
         BatchNorm2d(64), PReLU(64))
     self.output_layer_2 = Sequential(BatchNorm2d(512),
                                      torch.nn.AdaptiveAvgPool2d((7, 7)),
                                      Flatten(), Linear(512 * 7 * 7, 512))
     self.linear = EqualLinear(512, 512 * 18, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
예제 #8
0
 def __init__(self, num_layers, mode='ir'):
     super(Backbone, self).__init__()
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                   BatchNorm2d(64), PReLU(64))
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
예제 #9
0
    def __init__(self, num_layers, mode='ir', opts=None):
        super(GradualStyleEncoder, self).__init__()
        assert num_layers in [50, 100,
                              152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(
            Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
            BatchNorm2d(64), PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self.styles = nn.ModuleList()
        self.style_count = opts.n_styles
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 16)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 32)
            else:
                style = GradualStyleBlock(512, 512, 64)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2d(256,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2d(128,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
예제 #10
0
    def __init__(self, num_layers=50, drop_ratio=0.6, mode='ir'):
        super(Backbone_bp, self).__init__()
        assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se','cbam','danet'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            print("THe mode is IR")
            unit_module = bottleneck_IR
        elif mode == 'cbam':
            print("The mode is CBAM")
            unit_module = bottleneck_CBAM
        elif mode == 'danet':
            print("The mode is danet")
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE

        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64, eps=2e-5, momentum=0.9),
                                      PReLU(64))
        self.output_layer = Sequential(BatchNorm2d(512, eps=2e-5, momentum=0.9),
                                       Dropout(drop_ratio),
                                       Flatten(),
                                       Linear(512 * 7 * 7, 256),
                                       BatchNorm1d(256, eps=2e-5, momentum=0.9, affine=False))
        modules = []
        item = 0
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel,
                                bottleneck.depth,
                                bottleneck.stride))
                # if item == 0:  # 修改第一个block的blockneck的结构
                #     modules[0].shortcut_layer = Sequential(
                #         Conv2d(bottleneck.in_channel, bottleneck.depth, (1, 1), bottleneck.stride, bias=False),
                #         BatchNorm2d(bottleneck.depth, eps=2e-5, momentum=0.9))
                # item += 1

        if mode == "danet":
            modules.append(DANetHead(512,512,BatchNorm2d))
        self.body = Sequential(*modules)
예제 #11
0
    def __init__(self,
                 num_layers,
                 drop_ratio,
                 mode='ir',
                 embedding_size=512,
                 classnum=51332,
                 s=64.,
                 m=0.5):
        super(IR_With_Head, self).__init__()
        # backbone part
        assert num_layers in [50, 100,
                              152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))
        self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio),
                                       Flatten(), Linear(512 * 7 * 7, 512),
                                       BatchNorm1d(512))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        # head part
        self.classnum = classnum
        self.kernel = Parameter(torch.Tensor(embedding_size, classnum))
        # initial kernel
        self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
        self.m = m  # the margin value, default is 0.5
        self.s = s  # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369
        self.cos_m = math.cos(m)
        self.sin_m = math.sin(m)
        self.mm = self.sin_m * m  # issue 1
        self.threshold = math.cos(math.pi - m)
예제 #12
0
    def __init__(self, in_channel, depth, stride):
        """Intermediate Resblock of bottleneck.

        Args:
            in_channel (int): Input channels.
            depth (int): Output channels.
            stride (int): Conv2d stride.
        """
        super(bottleneck_IR, self).__init__()
        if in_channel == depth:
            self.shortcut_layer = MaxPool2d(1, stride)
        else:
            self.shortcut_layer = Sequential(
                Conv2d(in_channel, depth, (1, 1), stride, bias=False),
                BatchNorm2d(depth))
        self.res_layer = Sequential(
            BatchNorm2d(in_channel),
            Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
            PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
            BatchNorm2d(depth))
예제 #13
0
 def __init__(self, in_channel, depth, stride):
     super(bottleneck_IR, self).__init__()
     # if in_channel == depth:
     if stride == 1:
         self.shortcut_layer = MaxPool2d(1, stride)
     else:
         self.shortcut_layer = Sequential(
             Conv2d(in_channel, depth, (1, 1), stride, bias=False),
             BatchNorm2d(depth))
     # self.res_layer = Sequential(
     #     BatchNorm2d(in_channel),
     #     Conv2d(in_channel, depth, (3, 3), (1, 1), 1 ,bias=False), PReLU(depth),
     #     Conv2d(depth, depth, (3, 3), stride, 1 ,bias=False), BatchNorm2d(depth))
     self.res_layer = Sequential(
         BatchNorm2d(in_channel),
         Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
         BatchNorm2d(depth),  # added by fengchen
         PReLU(depth),
         Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
         BatchNorm2d(depth))
예제 #14
0
    def __init__(self, in_channel, depth, stride,set_channel):
        super(bottleneck_IR, self).__init__()
        # if set_channel!=0:
            # print(set_channel)
        if in_channel == depth :
            # self.shortcut_layer = MaxPool2d(1, stride)
            self.shortcut_layer = Sequential()
        else:
            self.shortcut_layer = Sequential(
                Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth,eps=2e-5))
        if set_channel==64:
            self.shortcut_layer = Sequential(
                Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth,eps=2e-5))

        self.res_layer = Sequential(
            BatchNorm2d(in_channel,eps=2e-5),
            Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
            BatchNorm2d(depth,eps=2e-5), # new added
            PReLU(depth),
            Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth,eps=2e-5))
예제 #15
0
    def __init__(self, numOfLayer):

        super(Backbone_onlyGlobal, self).__init__()

        unit_module = bottleneck_IR

        self.input_layer = Sequential(
            Conv2d(in_channels=3,
                   out_channels=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding=(1, 1),
                   bias=False), BatchNorm2d(64), PReLU(64))

        blocks = get_blocks(numOfLayer)
        self.layer1 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[0]
        ])  #get_block(in_channel=64, depth=64, num_units=3)])
        self.layer2 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[1]
        ])  #get_block(in_channel=64, depth=128, num_units=4)])
        self.layer3 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[2]
        ])  #get_block(in_channel=128, depth=256, num_units=14)])
        self.layer4 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[3]
        ])  #get_block(in_channel=256, depth=512, num_units=3)])

        self.output_layer = Sequential(
            nn.Conv2d(in_channels=512,
                      out_channels=64,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)))

        self.fc = nn.Linear(64, 7)
        self.fc.apply(init_weights)
예제 #16
0
 def __init__(self, in_channel, depth, stride):
     super(bottleneck_IR_SE, self).__init__()
     if in_channel == depth:
         # 池化层函数
         self.shortcut_layer = MaxPool2d(1, stride)
     else:
         # 将层的列表传递给Sequential的构造函数,来创建一个Sequential模型
         self.shortcut_layer = Sequential(
             #  nn.BatchNorm2d() 的作用是根据统计的mean 和var来对数据进行标准化,并且这个mena和var在每个batch中都会进行,为了使得数据更有统计意义,
             # 使得整个训练数据的特征都能够被保存,则在每个batch过程中,都会对网络的mean和var进行更新,这里就涉及到新的 batch的统计数据mean和
             # var与网络已经保存的这两个统计数据之间的取舍问题了,而这个0.8就指定了保存的比例,这个参数名为momentum.
             Conv2d(in_channel, depth, (1, 1), stride ,bias=False), 
             BatchNorm2d(depth))
     self.res_layer = Sequential(
         BatchNorm2d(in_channel),
         Conv2d(in_channel, depth, (3,3), (1,1),1 ,bias=False),
         PReLU(depth),
         Conv2d(depth, depth, (3,3), stride, 1 ,bias=False),
         BatchNorm2d(depth),
         SEModule(depth,16)
         )
    def __init__(self, num_layers=50, drop_ratio=0.4, mode='ir_se'):
        super(ResnetFaceSTNLockedShear, self).__init__()
        assert num_layers in [50, 100, 152]
        assert mode in ['ir', 'ir_se']

        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE

        self.localization = Sequential(bottleneck_IR(3, 16, 2),
                                       bottleneck_IR(16, 32, 2),
                                       bottleneck_IR(32, 32, 2),
                                       bottleneck_IR(32, 64, 2),
                                       bottleneck_IR(64, 64, 1),
                                       torch.nn.AdaptiveAvgPool2d(1))

        self.fc_loc = Sequential(Flatten(), Linear(64 * 1 * 1, 6))

        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))
        self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio),
                                       Flatten(), Linear(512 * 7 * 7, 512),
                                       BatchNorm1d(512))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self.fc_loc[1].weight.data.zero_()
        # WARNING remember to change the bias according to input size
        # NOTE for img size 128 -> 112
        self.fc_loc[1].bias.data.copy_(
            torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float32))

        self.warp_param_adder = Parameter(torch.ones(1, 1))
예제 #18
0
 def __init__(self, num_layers, drop_ratio=0.4, mode='ir_se'):
     super(SE_IR, self).__init__()
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                   BatchNorm2d(64), PReLU(64))
     self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio),
                                    Flatten(), Linear(512 * 7 * 7, 512),
                                    BatchNorm1d(512))
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
예제 #19
0
 def __init__(self, num_layers, mode='ir', opts=None):
     super(BackboneEncoderUsingLastLayerIntoW, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoW')
     assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
                                   BatchNorm2d(64),
                                   PReLU(64))
     self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
     self.linear = EqualLinear(512, 512, lr_mul=1)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(unit_module(bottleneck.in_channel,
                                        bottleneck.depth,
                                        bottleneck.stride))
     self.body = Sequential(*modules)
예제 #20
0
    def __init__(self, input_size, feature_dim, num_layers, mode='ir'):
        super(Backbone, self).__init__()
        assert input_size[0] in [
            112, 128, 224
        ], "input_size should be [112, 112] or [224, 224]"
        assert num_layers in [34, 50, 100,
                              152], "num_layers should be 50, 100 or 152"
        assert mode in ['ir', 'ir_vconv',
                        'ir_se'], "mode should be ir or ir_se"
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        elif mode == 'ir_vconv':
            unit_module = bottleneck_IR_VConv
        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))
        self.output_layer = Sequential(
            BatchNorm2d(512),
            Dropout(),
            Flatten(),
            # Conv2d(512,512,(int(input_size[0]/16),int(input_size[1]/16)),padding=0, dilation=1,bias=False),
            Linear(512 * int(input_size[0] / 16) * int(input_size[1] / 16),
                   feature_dim),
            BatchNorm1d(feature_dim),
            # Flatten(),
        )

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(in_channel=bottleneck.in_channel,
                                depth=bottleneck.depth,
                                stride=bottleneck.stride))
        self.body = Sequential(*modules)

        self._initialize_weights()
예제 #21
0
    def __init__(self, classnum, num_layers, mode='ir'):
        super(Backbone, self).__init__()
        assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64,momentum=0.9,eps=2e-5),
                                      PReLU(64))
        self.output_layer = Sequential(BatchNorm2d(512,momentum=0.9,eps=2e-5),
                                       Dropout(0.4),
                                       Flatten(),
                                       Linear(512 * 7 * 7, 512),
                                       BatchNorm1d(512,momentum=0.9,eps=2e-5))
        # self.ac_fc=Arcface(classnum=classnum)
        self.ac_fc=Arcface(embedding_size=512,classnum=classnum)
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel,
                                bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(3. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                n = m.out_features
                m.weight.data.normal_(0, math.sqrt(3. / n))
예제 #22
0
    def __init__(self, opts=None):
        super(GradualMntToVecEncoder, self).__init__()
        print('Using GradualMntToVecEncoder')
        blocks = get_blocks(num_layers=50)
        unit_module = bottleneck_SE
        self.input_layer = Sequential(
            Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
            BatchNorm2d(64), PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self.styles = nn.ModuleList()
        self.style_count = opts.style_count
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 16)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 32)
            else:
                style = GradualStyleBlock(512, 512, 64)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2d(256,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2d(128,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
예제 #23
0
파일: testnet.py 프로젝트: Bsting/TestNet
    def __init__(self, num_layers, drop_ratio, mode='ir', output=512):
        super(Backbone, self).__init__()
        assert num_layers in [34, 50, 100,
                              152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'

        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))

        self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio),
                                       Flatten(), Linear(512 * 7 * 7, output),
                                       BatchNorm1d(output))

        self.body = torch.nn.ModuleList()
        for x in range(3):
            if x == 0:
                self.body.append(bottleneck_IR_SE(64, 64, 2))
            else:
                self.body.append(bottleneck_IR_SE(64, 64, 1))

        for x in range(4):
            if x == 0:
                self.body.append(bottleneck_IR_SE(64, 128, 2))
            else:
                self.body.append(bottleneck_IR_SE(128, 128, 1))

        for x in range(14):
            if x == 0:
                self.body.append(bottleneck_IR_SE(128, 256, 2))
            else:
                self.body.append(bottleneck_IR_SE(256, 256, 1))

        for x in range(3):
            if x == 0:
                self.body.append(bottleneck_IR_SE(256, 512, 2))
            else:
                self.body.append(bottleneck_IR_SE(512, 512, 1))
예제 #24
0
    def __init__(self):
        super().__init__()
        blocks = [
            get_block(in_channel=64, depth=64, num_units=3),
            get_block(in_channel=64, depth=128, num_units=4),
            get_block(in_channel=128, depth=256, num_units=14),
            get_block(in_channel=256, depth=512, num_units=3)
        ]
        unit_module = bottleneck_IR
        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))
        self.output_layer = Sequential(BatchNorm2d(512), Dropout(), Flatten(),
                                       Linear(512 * 7 * 7, 512),
                                       BatchNorm1d(512))

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self._initialize_weights()
예제 #25
0
    def __init__(self,
                 input_size,
                 num_layers,
                 mode='ir',
                 drop_ratio=0.4,
                 affine=True):
        super(Backbone, self).__init__()
        assert input_size in [112, 224], "input_size should be 112 or 224"
        assert num_layers in [50, 100,
                              152], "num_layers should be 50, 100 or 152"
        assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))
        if input_size == 112:
            self.output_layer = Sequential(BatchNorm2d(512),
                                           Dropout(drop_ratio), Flatten(),
                                           Linear(512 * 7 * 7, 512),
                                           BatchNorm1d(512, affine=affine))
        else:
            self.output_layer = Sequential(BatchNorm2d(512),
                                           Dropout(drop_ratio), Flatten(),
                                           Linear(512 * 14 * 14, 512),
                                           BatchNorm1d(512, affine=affine))

        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)
예제 #26
0
 def __init__(
     self, num_layers, drop_ratio, mode="ir", feat_dim=512, out_h=7, out_w=7
 ):
     super(Resnet, self).__init__()
     assert num_layers in [
         50,
         100,
         152,
     ], "num_layers should be 50,100, or 152"
     assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
     blocks = get_blocks(num_layers)
     if mode == "ir":
         unit_module = bottleneck_IR
     elif mode == "ir_se":
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(
         Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)
     )
     self.output_layer = Sequential(
         BatchNorm2d(512),
         Dropout(drop_ratio),
         Flatten(),
         Linear(512 * out_h * out_w, feat_dim),  # for eye
         BatchNorm1d(feat_dim),
     )
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(
                     bottleneck.in_channel,
                     bottleneck.depth,
                     bottleneck.stride,
                 )
             )
     self.body = Sequential(*modules)
예제 #27
0
def create_activation(act_type: str, inplace: bool, num_channels: int,
                      **kwargs) -> Module:
    """
    Create an activation function using the given parameters.

    :param act_type: the type of activation to replace with; options:
        [relu, relu6, prelu, lrelu, swish, hardswish, silu]
    :param inplace: True to create the activation as an inplace, False otherwise
    :param num_channels: The number of channels to create the activation for
    :param kwargs: Additional kwargs to pass to the activation constructor
    :return: the created activation layer
    """
    act_type = act_type.lower()

    if act_type == "relu":
        return ReLU(num_channels=num_channels, inplace=inplace)

    if act_type == "relu6":
        return ReLU6(num_channels=num_channels, inplace=inplace)

    if act_type == "prelu":
        return PReLU(num_parameters=num_channels, **kwargs)

    if act_type == "lrelu":
        return LeakyReLU(inplace=inplace, **kwargs)

    if act_type == "swish":
        return Swish(num_channels=num_channels)

    if act_type == "hardswish":
        return Hardswish(num_channels=num_channels, inplace=inplace)

    if act_type == "silu":
        return SiLU(**kwargs)

    raise ValueError("unknown act_type given of {}".format(act_type))
예제 #28
0
파일: irse.py 프로젝트: xieguochen/MTLFace
    def __init__(self, input_size, num_layers, mode='ir'):
        super(IResNet, self).__init__()
        assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"

        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        else:
            raise NotImplementedError

        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), PReLU(64))

        self.block1 = get_block(unit_module,
                                in_channel=64,
                                depth=64,
                                num_units=num_layers[0])
        self.block2 = get_block(unit_module,
                                in_channel=64,
                                depth=128,
                                num_units=num_layers[1])
        self.block3 = get_block(unit_module,
                                in_channel=128,
                                depth=256,
                                num_units=num_layers[2])
        self.block4 = get_block(unit_module,
                                in_channel=256,
                                depth=512,
                                num_units=num_layers[3])

        self.output_layer = Sequential(
            BatchNorm2d(512), Dropout(), Flatten(),
            Linear(512 * (input_size // 16)**2, 512), BatchNorm1d(512))

        self._initialize_weights()
예제 #29
0
    def __init__(self, numOfLayer):

        super(Backbone, self).__init__()

        unit_module = bottleneck_IR

        self.input_layer = Sequential(
            Conv2d(in_channels=3,
                   out_channels=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding=(1, 1),
                   bias=False), BatchNorm2d(64), PReLU(64))

        blocks = get_blocks(numOfLayer)
        self.layer1 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[0]
        ])  #get_block(in_channel=64, depth=64, num_units=3)])
        self.layer2 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[1]
        ])  #get_block(in_channel=64, depth=128, num_units=4)])
        self.layer3 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[2]
        ])  #get_block(in_channel=128, depth=256, num_units=14)])
        self.layer4 = Sequential(*[
            unit_module(bottleneck.in_channel, bottleneck.depth,
                        bottleneck.stride) for bottleneck in blocks[3]
        ])  #get_block(in_channel=256, depth=512, num_units=3)])

        self.output_layer = Sequential(
            nn.Conv2d(in_channels=512,
                      out_channels=64,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)))

        cropNet_modules = []
        cropNet_blocks = [
            get_block(in_channel=128, depth=256, num_units=2),
            get_block(in_channel=256, depth=512, num_units=2)
        ]
        for block in cropNet_blocks:
            for bottleneck in block:
                cropNet_modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        cropNet_modules += [
            nn.Conv2d(in_channels=512,
                      out_channels=64,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU()
        ]
        self.Crop_Net = nn.ModuleList(
            [copy.deepcopy(nn.Sequential(*cropNet_modules)) for i in range(5)])

        self.fc1 = nn.Linear(64 + 320, 7)
        self.fc1.apply(init_weights)

        self.fc2 = nn.Linear(64 + 320, 7)
        self.fc2.apply(init_weights)

        self.GAP = nn.AdaptiveAvgPool2d((1, 1))
예제 #30
0
import torch
import onnx
from torch.nn import ReLU, PReLU


def myprelu(input, weight=.25):
    for i in range(input.shape[0]):
        input[i] = input[i] if input[i] >= 0 else weight * input[i]

    return input


if __name__ == '__main__':

    # testing relu

    relu = ReLU()
    prelu = PReLU()
    td1 = np.array([9., 11., -4., -5., -9., -4., -7., 5., 0., 7.])

    res_relu = relu(torch.Tensor(td1))
    res_prelu = prelu(torch.Tensor(td1))
    myrelu = myprelu(td1)

    print("orig ", np.array(td1))
    print("relu ", res_relu.detach().numpy())
    print("prelu ", res_prelu.detach().numpy())
    print("my relu ", myrelu)

    print('finished!')