def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): super(Conv_block_no_bn, self).__init__() self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False) self.prelu = PReLU(out_c)
def __init__(self, blocks, mode='ir'): super(Backbone, self).__init__() if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) self.layer1 = _make_layer(blocks[0], unit_module) self.layer2 = _make_layer(blocks[1], unit_module) self.layer3 = _make_layer(blocks[2], unit_module) self._initialize_weights()
def __init__(self, in_channel, depth, stride): ''' This method is to initialize IR module ''' super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride ,bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3,3), (1,1),1 ,bias=False), PReLU(depth), Conv2d(depth, depth, (3,3), stride, 1 ,bias=False), BatchNorm2d(depth), SEModule(depth,16) )
def __init__(self, in_channel, depth, stride): super(bottleneck_CBAM, self).__init__() self.stride = stride self.res_layer = Sequential( BatchNorm2d(in_channel, eps=2e-5, momentum=0.9), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), BatchNorm2d(depth, eps=2e-5, momentum=0.9), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth, eps=2e-5, momentum=0.9)) self.ca_layer = ChannelAttention(depth) self.sa_layer = SpatialAttention() if stride == 2: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth, eps=2e-5, momentum=0.9))
def __init__(self, n_styles=18, opts=None): super(ResNetGradualStyleEncoder, self).__init__() self.conv1 = nn.Conv2d(opts.input_nc, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = BatchNorm2d(64) self.relu = PReLU(64) resnet_basenet = resnet34(pretrained=True) blocks = [ resnet_basenet.layer1, resnet_basenet.layer2, resnet_basenet.layer3, resnet_basenet.layer4 ] modules = [] for block in blocks: for bottleneck in block: modules.append(bottleneck) self.body = Sequential(*modules) self.styles = nn.ModuleList() self.style_count = n_styles self.coarse_ind = 3 self.middle_ind = 7 for i in range(self.style_count): if i < self.coarse_ind: style = GradualStyleBlock(512, 512, 16) elif i < self.middle_ind: style = GradualStyleBlock(512, 512, 32) else: style = GradualStyleBlock(512, 512, 64) self.styles.append(style) self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0) self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
def __init__(self, num_layers, mode='ir', opts=None): super(Encoder4Editing, self).__init__() assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules) self.styles = nn.ModuleList() log_size = int(math.log(opts.stylegan_size, 2)) self.style_count = 2 * log_size - 2 self.coarse_ind = 3 self.middle_ind = 7 for i in range(self.style_count): if i < self.coarse_ind: style = GradualStyleBlock(512, 512, 16) elif i < self.middle_ind: style = GradualStyleBlock(512, 512, 32) else: style = GradualStyleBlock(512, 512, 64) self.styles.append(style) self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0) self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0) self.progressive_stage = ProgressiveStage.Inference
def parse_activation(name): if name in relu_names: return ReLU() if name in leaky_relu_names: return LeakyReLU() if name in sigmoid_names: return Sigmoid() if name in log_sigmoid_name: return LogSigmoid() if name in p_relu_names: return PReLU() if name in tanh_names: return Tanh() if name in softmax_names: return Softmax(dim=1) if name in log_softmax_names: return LogSoftmax(dim=1)
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): super(Conv_block, self).__init__() self.conv = Conv2d(in_c, out_c, kernel, stride, padding, groups=groups, bias=False) self.bn = BatchNorm2d(out_c) self.prelu = PReLU(out_c)
def __init__(self, opts=None): super(MntToVecEncoderEncoderIntoW, self).__init__() print('Using MntToVecEncoderEncoderIntoW') blocks = get_blocks(num_layers=50) unit_module = bottleneck_SE self.input_layer = Sequential( Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1)) self.linear = EqualLinear(512, 512, lr_mul=1) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules)
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): super(ConvBlock, self).__init__() self.layers = nn.Sequential( Conv2d(in_c, out_c, kernel, groups=groups, stride=stride, padding=padding, bias=False), BatchNorm2d(num_features=out_c), PReLU(num_parameters=out_c))
def __init__(self, in_channels, out_channels, stride): super(BottleneckIR, self).__init__() self.identity = 0 if in_channels == out_channels: if stride == 1: self.identity = 1 else: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential(Conv2d(in_channels, out_channels, (1, 1), stride, bias=False), BatchNorm2d(out_channels)) self.res_layer = Sequential(BatchNorm2d(in_channels), Conv2d(in_channels, out_channels, (3, 3), (1, 1), 1, bias=False), BatchNorm2d(out_channels), PReLU(out_channels), Conv2d(out_channels, out_channels, (3, 3), stride, 1, bias=False), BatchNorm2d(out_channels))
def __init__(self, input_size, output_size, num_layers, mode='ir', pretrained_path='weights/backbone_ir50_ms1m_epoch120.pth'): super(Backbone, self).__init__() assert input_size[0] in [112, 224], "input_size should be [112, 112] or [224, 224]" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) if input_size[0] == 112: self.output_layer = Sequential(BatchNorm2d(512), Dropout(), Flatten(), Linear(512 * 7 * 7, 512), Linear(512, output_size) ) else: self.output_layer = Sequential(BatchNorm2d(512), Dropout(), Flatten(), Linear(512 * 14 * 14, 512), Linear(512, output_size) ) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules) self.pretrained_path = pretrained_path self._initialize_weights()
def __init__(self, num_layers=50, drop_ratio=0.6, mode='ir',use_checkpoint=True,chunks=12): super(Backbone, self).__init__() self.use_checkpoint=use_checkpoint self.chunks=chunks assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' assert mode in ['ir', 'ir_se','cbam','danet'], 'mode should be ir or ir_se' blocks = get_blocks(num_layers) if mode == 'ir': print("THe mode is IR") unit_module = bottleneck_IR elif mode == 'cbam': print("The mode is CBAM") unit_module = bottleneck_CBAM elif mode == 'danet': print("The mode is danet") unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64, eps=2e-5, momentum=0.9), PReLU(64)) # modules = [Conv2d(3, 64, (3, 3), 1, 1, bias=False), # BatchNorm2d(64, eps=2e-5, momentum=0.9), # PReLU(64)] self.output_layer = Sequential(BatchNorm2d(512, eps=2e-5, momentum=0.9), Dropout(drop_ratio), Flatten(), Linear(512 * 7 * 7, 256), BatchNorm1d(256, eps=2e-5, momentum=0.9, affine=False)) modules = [] item = 0 for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) if mode == "danet": modules.append(DANetHead(512,512,BatchNorm2d)) self.body = Sequential(*modules) #####to give information############ if self.use_checkpoint: print("Backbone use checkpoint")
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1, use_hs=1): super(Conv_block, self).__init__() self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False) self.bn = BatchNorm2d(out_c) self.unlinearity = h_swish() if use_hs else PReLU(out_c)
def __init__(self, num_blocks=1): super().__init__() print("Initialized generator network..") self.conv1 = Conv2d(1, 64, kernel_size=9, padding=4) self.prelu = PReLU() self.layers = self._get_residual_blocks(num_blocks) self.conv2 = Conv2d(64, 64, kernel_size=3, padding=1) self.bn2 = BatchNorm2d(64) self.conv3 = Conv2d(64, 256, kernel_size=3, padding=1) self.pxshuffle = PixelShuffle(upscale_factor=2) # up-sampling # used in original SR-GAN paper, only for 4x up-sampling # self.conv4 = Conv2d(256, 256, kernel_size=3, padding=1) self.conv5 = Conv2d(64, 1, kernel_size=9, padding=4)
def __init__(self, scale_factor): upsample_block_num = int(math.log(scale_factor, 2)) super(Generator, self).__init__() self.block1 = Sequential( Conv2d(3, 64, kernel_size=(9, 9), padding=(4, 4)), PReLU()) self.block2 = ResidualBlock(64) self.block3 = ResidualBlock(64) self.block4 = ResidualBlock(64) self.block5 = ResidualBlock(64) self.block6 = ResidualBlock(64) self.block7 = Sequential( Conv2d(64, 64, kernel_size=(3, 3), padding=(1, 1)), BatchNorm2d(64)) # up sampling block8 = [UpSampleBlock(64, 2) for _ in range(upsample_block_num)] block8.append(Conv2d(64, 3, kernel_size=(9, 9), padding=(4, 4))) self.block8 = Sequential(*block8)
def __init__(self, num_layers, mode='ir'): super(Backbone, self).__init__() assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules)
def __init__(self, num_layers, drop_ratio, mode='ir', embedding_size=512, classnum=51332, s=64., m=0.5): super(IR_With_Head, self).__init__() # backbone part assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(512 * 7 * 7, 512), BatchNorm1d(512)) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules) # head part self.classnum = classnum self.kernel = Parameter(torch.Tensor(embedding_size, classnum)) # initial kernel self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5) self.m = m # the margin value, default is 0.5 self.s = s # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369 self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.mm = self.sin_m * m # issue 1 self.threshold = math.cos(math.pi - m)
def __init__(self, num_layers=50, drop_ratio=0.6, mode='ir'): super(Backbone_bp, self).__init__() assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' assert mode in ['ir', 'ir_se','cbam','danet'], 'mode should be ir or ir_se' blocks = get_blocks(num_layers) if mode == 'ir': print("THe mode is IR") unit_module = bottleneck_IR elif mode == 'cbam': print("The mode is CBAM") unit_module = bottleneck_CBAM elif mode == 'danet': print("The mode is danet") unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64, eps=2e-5, momentum=0.9), PReLU(64)) self.output_layer = Sequential(BatchNorm2d(512, eps=2e-5, momentum=0.9), Dropout(drop_ratio), Flatten(), Linear(512 * 7 * 7, 256), BatchNorm1d(256, eps=2e-5, momentum=0.9, affine=False)) modules = [] item = 0 for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) # if item == 0: # 修改第一个block的blockneck的结构 # modules[0].shortcut_layer = Sequential( # Conv2d(bottleneck.in_channel, bottleneck.depth, (1, 1), bottleneck.stride, bias=False), # BatchNorm2d(bottleneck.depth, eps=2e-5, momentum=0.9)) # item += 1 if mode == "danet": modules.append(DANetHead(512,512,BatchNorm2d)) self.body = Sequential(*modules)
def __init__(self, in_channel, depth, stride): """Intermediate Resblock of bottleneck. Args: in_channel (int): Input channels. depth (int): Output channels. stride (int): Conv2d stride. """ super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
def __init__(self, in_channel, depth, stride): super(bottleneck_IR, self).__init__() # if in_channel == depth: if stride == 1: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) # self.res_layer = Sequential( # BatchNorm2d(in_channel), # Conv2d(in_channel, depth, (3, 3), (1, 1), 1 ,bias=False), PReLU(depth), # Conv2d(depth, depth, (3, 3), stride, 1 ,bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), BatchNorm2d(depth), # added by fengchen PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
def __init__(self, in_channel, depth, stride,set_channel): super(bottleneck_IR, self).__init__() # if set_channel!=0: # print(set_channel) if in_channel == depth : # self.shortcut_layer = MaxPool2d(1, stride) self.shortcut_layer = Sequential() else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth,eps=2e-5)) if set_channel==64: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth,eps=2e-5)) self.res_layer = Sequential( BatchNorm2d(in_channel,eps=2e-5), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), BatchNorm2d(depth,eps=2e-5), # new added PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth,eps=2e-5))
def __init__(self, input_size, num_layers, num_features, mode='ir'): super(Backbone, self).__init__() assert input_size[0] in [ 112, 224 ], "input_size should be [112, 112] or [224, 224]" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) if input_size[0] == 112: self.output_layer = Sequential(BatchNorm2d(512), Dropout(), Flatten(), Linear(512 * 7 * 7, num_features), BatchNorm1d(num_features)) else: self.output_layer = Sequential(BatchNorm2d(512), Dropout(), Flatten(), Linear(512 * 14 * 14, num_features), BatchNorm1d(num_features)) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules) self._initialize_weights() self.input_space = input_space self.input_range = input_range self.input_size = [input_sizes[0], input_size[0], input_size[0]] self.mean = means self.std = stds
def __init__(self, numOfLayer): super(Backbone_onlyGlobal, self).__init__() unit_module = bottleneck_IR self.input_layer = Sequential( Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), BatchNorm2d(64), PReLU(64)) blocks = get_blocks(numOfLayer) self.layer1 = Sequential(*[ unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride) for bottleneck in blocks[0] ]) #get_block(in_channel=64, depth=64, num_units=3)]) self.layer2 = Sequential(*[ unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride) for bottleneck in blocks[1] ]) #get_block(in_channel=64, depth=128, num_units=4)]) self.layer3 = Sequential(*[ unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride) for bottleneck in blocks[2] ]) #get_block(in_channel=128, depth=256, num_units=14)]) self.layer4 = Sequential(*[ unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride) for bottleneck in blocks[3] ]) #get_block(in_channel=256, depth=512, num_units=3)]) self.output_layer = Sequential( nn.Conv2d(in_channels=512, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1))) self.fc = nn.Linear(64, 7) self.fc.apply(init_weights)
def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: # 池化层函数 self.shortcut_layer = MaxPool2d(1, stride) else: # 将层的列表传递给Sequential的构造函数,来创建一个Sequential模型 self.shortcut_layer = Sequential( # nn.BatchNorm2d() 的作用是根据统计的mean 和var来对数据进行标准化,并且这个mena和var在每个batch中都会进行,为了使得数据更有统计意义, # 使得整个训练数据的特征都能够被保存,则在每个batch过程中,都会对网络的mean和var进行更新,这里就涉及到新的 batch的统计数据mean和 # var与网络已经保存的这两个统计数据之间的取舍问题了,而这个0.8就指定了保存的比例,这个参数名为momentum. Conv2d(in_channel, depth, (1, 1), stride ,bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3,3), (1,1),1 ,bias=False), PReLU(depth), Conv2d(depth, depth, (3,3), stride, 1 ,bias=False), BatchNorm2d(depth), SEModule(depth,16) )
def __init__(self, num_layers=50, drop_ratio=0.4, mode='ir_se'): super(ResnetFaceSTNLockedShear, self).__init__() assert num_layers in [50, 100, 152] assert mode in ['ir', 'ir_se'] blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.localization = Sequential(bottleneck_IR(3, 16, 2), bottleneck_IR(16, 32, 2), bottleneck_IR(32, 32, 2), bottleneck_IR(32, 64, 2), bottleneck_IR(64, 64, 1), torch.nn.AdaptiveAvgPool2d(1)) self.fc_loc = Sequential(Flatten(), Linear(64 * 1 * 1, 6)) self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(512 * 7 * 7, 512), BatchNorm1d(512)) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules) self.fc_loc[1].weight.data.zero_() # WARNING remember to change the bias according to input size # NOTE for img size 128 -> 112 self.fc_loc[1].bias.data.copy_( torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float32)) self.warp_param_adder = Parameter(torch.ones(1, 1))
def __init__(self, num_layers, drop_ratio=0.4, mode='ir_se'): super(SE_IR, self).__init__() assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(512 * 7 * 7, 512), BatchNorm1d(512)) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules)
def __init__(self, input_size, feature_dim, num_layers, mode='ir'): super(Backbone, self).__init__() assert input_size[0] in [ 112, 128, 224 ], "input_size should be [112, 112] or [224, 224]" assert num_layers in [34, 50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ['ir', 'ir_vconv', 'ir_se'], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE elif mode == 'ir_vconv': unit_module = bottleneck_IR_VConv self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) self.output_layer = Sequential( BatchNorm2d(512), Dropout(), Flatten(), # Conv2d(512,512,(int(input_size[0]/16),int(input_size[1]/16)),padding=0, dilation=1,bias=False), Linear(512 * int(input_size[0] / 16) * int(input_size[1] / 16), feature_dim), BatchNorm1d(feature_dim), # Flatten(), ) modules = [] for block in blocks: for bottleneck in block: modules.append( unit_module(in_channel=bottleneck.in_channel, depth=bottleneck.depth, stride=bottleneck.stride)) self.body = Sequential(*modules) self._initialize_weights()
def __init__(self, num_layers, mode='ir', opts=None): super(BackboneEncoderUsingLastLayerIntoW, self).__init__() print('Using BackboneEncoderUsingLastLayerIntoW') assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' blocks = get_blocks(num_layers) if mode == 'ir': unit_module = bottleneck_IR elif mode == 'ir_se': unit_module = bottleneck_IR_SE self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64)) self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1)) self.linear = EqualLinear(512, 512, lr_mul=1) modules = [] for block in blocks: for bottleneck in block: modules.append(unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride)) self.body = Sequential(*modules)