def __init__(self, block_expansion, num_segments, num_channels, max_features, num_blocks, temperature, estimate_affine_part=False, scale_factor=1): super(SegmentationModule, self).__init__() self.predictor = Hourglass(block_expansion, in_features=num_channels, max_features=max_features, num_blocks=num_blocks) self.num_segments = num_segments self.shift = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_segments, kernel_size=(7, 7), padding=(3, 3)) if estimate_affine_part: self.affine = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=4 * num_segments, kernel_size=(7, 7), padding=(3, 3)) self.affine.weight.data.zero_() self.affine.bias.data.copy_(torch.tensor([1, 0, 0, 1] * num_segments, dtype=torch.float)) else: self.affine = None self.segmentation = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=(1 + num_segments), kernel_size=(7, 7), padding=(3, 3)) self.temperature = temperature self.scale_factor = scale_factor if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def __init__(self, num_channels, num_kp, block_expansion, max_features, num_down_blocks, num_bottleneck_blocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False, scale_factor=0.25): super(Generator, self).__init__() self.source_first = AntiAliasInterpolation2d(num_channels, scale_factor) first_input = int(block_expansion / scale_factor) self.first = SameBlock2d(num_channels + 2, first_input, kernel_size=(7, 7), padding=(3, 3)) # +2 masks down_blocks = [] for i in range(num_down_blocks): in_features = min(max_features, block_expansion * (2 ** i)) out_features = min(max_features, block_expansion * (2 ** (i + 1))) down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) self.down_blocks = nn.ModuleList(down_blocks) up_blocks = [] for i in range(num_down_blocks): in_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i))) out_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i - 1))) up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) self.up_blocks = nn.ModuleList(up_blocks) self.bottleneck = torch.nn.Sequential() in_features = min(max_features, block_expansion * (2 ** num_down_blocks)) for i in range(num_bottleneck_blocks): self.bottleneck.add_module('r' + str(i), ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1))) self.final = nn.Conv2d(block_expansion, num_channels, kernel_size=(7, 7), padding=(3, 3)) self.num_channels = num_channels self.hourglass = Hourglass(block_expansion=block_expansion, in_features=8, max_features=1024, num_blocks=5) self.final_hourglass = nn.Conv2d(in_channels=self.hourglass.out_filters, out_channels=3, kernel_size=(7, 7), padding=(3, 3))
def __init__(self, block_expansion, num_blocks, max_features, mask_embedding_params, num_kp, num_channels, kp_variance, use_correction, use_mask, bg_init=2, num_group_blocks=0, scale_factor=1, use_attention=False): super(DenseMotionModule, self).__init__() self.mask_embedding = MovementEmbeddingModule(num_kp=num_kp, kp_variance=kp_variance, num_channels=num_channels, add_bg_feature_map=True, **mask_embedding_params) self.difference_embedding = MovementEmbeddingModule(num_kp=num_kp, kp_variance=kp_variance, num_channels=num_channels, add_bg_feature_map=True, use_difference=True, use_heatmap=False, use_deformed_source_image=False) group_blocks = [] for i in range(num_group_blocks): group_blocks.append(SameBlock3D(self.mask_embedding.out_channels, self.mask_embedding.out_channels, groups=num_kp + 1, kernel_size=(1, 1, 1), padding=(0, 0, 0))) self.group_blocks = nn.ModuleList(group_blocks) self.hourglass = Hourglass(block_expansion=block_expansion, in_features=self.mask_embedding.out_channels, out_features=(num_kp + 1) * use_mask + 2 * use_correction, max_features=max_features, num_blocks=num_blocks, use_attention=use_attention) self.hourglass.decoder.conv.weight.data.zero_() bias_init = ([bg_init] + [0] * num_kp) * use_mask + [0, 0] * use_correction self.hourglass.decoder.conv.bias.data.copy_(torch.tensor(bias_init, dtype=torch.float)) self.num_kp = num_kp self.use_correction = use_correction self.use_mask = use_mask self.scale_factor = scale_factor
def __init__(self, block_expansion, num_regions, num_channels, max_features, num_blocks, temperature, estimate_affine=False, scale_factor=1, pca_based=False, fast_svd=False, pad=3): super(RegionPredictor, self).__init__() self.predictor = Hourglass(block_expansion, in_features=num_channels, max_features=max_features, num_blocks=num_blocks) self.regions = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_regions, kernel_size=(7, 7), padding=pad) # FOMM-like regression based representation if estimate_affine and not pca_based: self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=4, kernel_size=(7, 7), padding=pad) self.jacobian.weight.data.zero_() self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 1], dtype=torch.float)) else: self.jacobian = None self.temperature = temperature self.scale_factor = scale_factor self.pca_based = pca_based self.fast_svd = fast_svd if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def __init__(self, block_expansion, num_kp, num_channels, max_features, num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False, pad=0, use_landmarks=False): super(KPDetector, self).__init__() self.use_landmarks = use_landmarks if use_landmarks: num_kp = 68 self.fan = Landmarks() #.requires_grad_(False) self.predictor = Hourglass(block_expansion, in_features=num_channels, max_features=max_features, num_blocks=num_blocks) self.kp = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=(7, 7), padding=pad) if estimate_jacobian: self.num_jacobian_maps = 1 if single_jacobian_map else num_kp if self.use_landmarks: self.jacobian = nn.Conv2d( in_channels=self.predictor.out_filters, out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=3) else: self.jacobian = nn.Conv2d( in_channels=self.predictor.out_filters, out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=pad) self.jacobian.weight.data.zero_() self.jacobian.bias.data.copy_( torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float)) else: self.jacobian = None self.temperature = temperature self.scale_factor = scale_factor if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def __init__(self, checkpoint_with_kp, block_expansion, num_kp, kp_after_softmax, num_channels, max_features, num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False, pad=0, softmax_mask=False): super(KPDetector, self).__init__() self.predictor = Hourglass(block_expansion, in_features=num_channels, max_features=max_features, num_blocks=num_blocks) self.kp = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=(7, 7), padding=pad) self.kp_after_softmax = kp_after_softmax self.softmax_mask = softmax_mask if estimate_jacobian: self.num_jacobian_maps = 1 if single_jacobian_map else num_kp self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=pad) self.jacobian.weight.data.zero_() self.jacobian.bias.data.copy_( torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float)) else: self.jacobian = None self.temperature = temperature self.scale_factor = scale_factor if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor) self.load_state_dict(checkpoint_with_kp['kp_detector'])
def __init__(self, block_expansion, num_kp, num_channels, max_features, num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False, pad=0): super(KPDetector, self).__init__() # 详情参考 Hourglass 网络结构 # 简单而言就是一个 encoder + 一个 decoder self.predictor = Hourglass(block_expansion, in_features=num_channels, max_features=max_features, num_blocks=num_blocks) # 声明一个 key point 的卷积层 self.kp = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=(7, 7), padding=pad) if estimate_jacobian: self.num_jacobian_maps = 1 if single_jacobian_map else num_kp self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=pad) self.jacobian.weight.data.zero_() self.jacobian.bias.data.copy_( torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float)) else: self.jacobian = None self.temperature = temperature self.scale_factor = scale_factor if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def __init__(self, block_expansion, num_blocks, max_features, num_kp, num_channels, estimate_occlusion_map=False, scale_factor=1, kp_variance=0.01): super(DenseMotionNetwork, self).__init__() self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp + 1) * (num_channels + 1), max_features=max_features, num_blocks=num_blocks) self.mask = nn.Conv2d(self.hourglass.out_filters, num_kp + 1, kernel_size=(7, 7), padding=(3, 3)) if estimate_occlusion_map: self.occlusion = nn.Conv2d(self.hourglass.out_filters, 1, kernel_size=(7, 7), padding=(3, 3)) else: self.occlusion = None self.num_kp = num_kp self.scale_factor = scale_factor self.kp_variance = kp_variance if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def __init__(self, block_expansion, num_kp, num_channels, max_features, num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False, pad=0): super(KPDetector, self).__init__() self.predictor = Hourglass(block_expansion, in_features=num_channels, max_features=max_features, num_blocks=num_blocks) self.kp = dygraph.Conv2D(num_channels=self.predictor.out_filters, num_filters=num_kp, filter_size=(7, 7), padding=pad) if estimate_jacobian: self.num_jacobian_maps = 1 if single_jacobian_map else num_kp self.jacobian = dygraph.Conv2D( num_channels=self.predictor.out_filters, num_filters=4 * self.num_jacobian_maps, filter_size=(7, 7), padding=pad) self.jacobian.weight.set_value( np.zeros(list(self.jacobian.weight.shape), dtype=np.float32)) self.jacobian.bias.set_value( np.array([1, 0, 0, 1] * self.num_jacobian_maps, dtype=np.float32)) else: self.jacobian = None self.temperature = temperature self.scale_factor = scale_factor if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def __init__(self, block_expansion, num_blocks, max_features, num_regions, num_channels, estimate_occlusion_map=False, scale_factor=1, region_var=0.01, use_covar_heatmap=False, use_deformed_source=True, revert_axis_swap=False): super(PixelwiseFlowPredictor, self).__init__() self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_regions + 1) * (num_channels * use_deformed_source + 1), max_features=max_features, num_blocks=num_blocks) self.mask = nn.Conv2d(self.hourglass.out_filters, num_regions + 1, kernel_size=(7, 7), padding=(3, 3)) if estimate_occlusion_map: self.occlusion = nn.Conv2d(self.hourglass.out_filters, 1, kernel_size=(7, 7), padding=(3, 3)) else: self.occlusion = None self.num_regions = num_regions self.scale_factor = scale_factor self.region_var = region_var self.use_covar_heatmap = use_covar_heatmap self.use_deformed_source = use_deformed_source self.revert_axis_swap = revert_axis_swap if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def __init__(self, block_expansion, num_kp, num_channels, max_features, num_blocks, temperature, kp_variance, scale_factor=1, clip_variance=None): super(KPDetector, self).__init__() self.predictor = Hourglass(block_expansion, in_features=num_channels, out_features=num_kp, max_features=max_features, num_blocks=num_blocks) self.temperature = temperature self.kp_variance = kp_variance self.scale_factor = scale_factor self.clip_variance = clip_variance