コード例 #1
0
    def __init__(self,
                 num_channels=3,
                 block_expansion=32,
                 num_blocks=3,
                 max_features=512):
        super(StylizerDiscrim, self).__init__()

        # first 维持输入和输出维度相同,channel数量不同
        self.first = SameBlock2d(num_channels,
                                 block_expansion,
                                 kernel_size=(7, 7),
                                 padding=(3, 3))

        # 声明下采样层
        down_blocks = []
        for i in range(num_blocks):
            in_features = min(max_features, block_expansion * (2**i))
            out_features = min(max_features, block_expansion * (2**(i + 1)))
            down_blocks.append(
                DownBlock2d(in_features,
                            out_features,
                            kernel_size=(3, 3),
                            padding=(1, 1)))
        # 注:此处才是用列表声明网络层的正确用法
        self.down_blocks = nn.ModuleList(down_blocks)
        self.conv = nn.Conv2d(min(max_features,
                                  block_expansion * (2**num_blocks)),
                              out_channels=1,
                              kernel_size=1)
コード例 #2
0
    def __init__(self, num_channels, num_kp, block_expansion, max_features, num_down_blocks,
                 num_bottleneck_blocks, estimate_occlusion_map=False, dense_motion_params=None,
                 estimate_jacobian=False, scale_factor=0.25):
        super(Generator, self).__init__()
        self.source_first = AntiAliasInterpolation2d(num_channels, scale_factor)
        first_input = int(block_expansion / scale_factor)
        self.first = SameBlock2d(num_channels + 2, first_input, kernel_size=(7, 7), padding=(3, 3))  # +2 masks
        down_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** i))
            out_features = min(max_features, block_expansion * (2 ** (i + 1)))
            down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        self.down_blocks = nn.ModuleList(down_blocks)

        up_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i)))
            out_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i - 1)))
            up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        self.up_blocks = nn.ModuleList(up_blocks)

        self.bottleneck = torch.nn.Sequential()
        in_features = min(max_features, block_expansion * (2 ** num_down_blocks))
        for i in range(num_bottleneck_blocks):
            self.bottleneck.add_module('r' + str(i), ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))

        self.final = nn.Conv2d(block_expansion, num_channels, kernel_size=(7, 7), padding=(3, 3))
        self.num_channels = num_channels
        self.hourglass = Hourglass(block_expansion=block_expansion, in_features=8, max_features=1024, num_blocks=5)
        self.final_hourglass = nn.Conv2d(in_channels=self.hourglass.out_filters, out_channels=3, kernel_size=(7, 7),
                                         padding=(3, 3))
コード例 #3
0
    def __init__(self, num_channels, block_expansion, max_features, num_down_blocks, num_bottleneck_blocks):
        super(StylizerGenerator, self).__init__()

        # first 维持输入和输出维度相同,channel数量不同
        self.first = SameBlock2d(num_channels, block_expansion, kernel_size=(7, 7), padding=(3, 3))

        # 声明下采样层
        down_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** i))
            out_features = min(max_features, block_expansion * (2 ** (i + 1)))
            down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        # 注:此处才是用列表声明网络层的正确用法
        # 储存时会通过储存对应self的名字所对应的内容
        self.down_blocks = nn.ModuleList(down_blocks)

        # 声明上采样层
        up_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i)))
            out_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i - 1)))
            up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        # 注:此处用列表声明模块同上
        self.up_blocks = nn.ModuleList(up_blocks)

        # 声明生成器中的瓶颈层
        self.bottleneck = torch.nn.Sequential()
        in_features = min(max_features, block_expansion * (2 ** num_down_blocks))
        for i in range(num_bottleneck_blocks):
            self.bottleneck.add_module('r' + str(i), ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))

        # 声明最后的输出层
        self.final = nn.Conv2d(block_expansion, num_channels, kernel_size=(7, 7), padding=(3, 3))
        self.num_channels = num_channels
コード例 #4
0
    def __init__(self, num_channels, num_kp, block_expansion, max_features, num_down_blocks,
                 num_bottleneck_blocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
        super(OcclusionAwareGenerator, self).__init__()

        if dense_motion_params is not None:
            self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, num_channels=num_channels,
                                                           estimate_occlusion_map=estimate_occlusion_map,
                                                           **dense_motion_params)
        else:
            self.dense_motion_network = None

        self.first = SameBlock2d(num_channels, block_expansion, kernel_size=(7, 7), padding=(3, 3))

        down_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** i))
            out_features = min(max_features, block_expansion * (2 ** (i + 1)))
            down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        self.down_blocks = nn.ModuleList(down_blocks)

        up_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i)))
            out_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i - 1)))
            up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        self.up_blocks = nn.ModuleList(up_blocks)

        self.bottleneck = torch.nn.Sequential()
        in_features = min(max_features, block_expansion * (2 ** num_down_blocks))
        for i in range(num_bottleneck_blocks):
            self.bottleneck.add_module('r' + str(i), ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))

        self.final = nn.Conv2d(block_expansion, num_channels, kernel_size=(7, 7), padding=(3, 3))
        self.estimate_occlusion_map = estimate_occlusion_map
        self.num_channels = num_channels
コード例 #5
0
    def __init__(self,
                 num_channels,
                 block_expansion,
                 max_features,
                 num_down_blocks,
                 num_bottleneck_blocks,
                 num_segments,
                 estimate_visibility=False,
                 **kwargs):
        super(ReconstructionModule, self).__init__()

        self.first = SameBlock2d(num_channels,
                                 block_expansion,
                                 kernel_size=(7, 7),
                                 padding=(3, 3))

        down_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2**i))
            out_features = min(max_features, block_expansion * (2**(i + 1)))
            down_blocks.append(
                DownBlock2d(in_features,
                            out_features,
                            kernel_size=(3, 3),
                            padding=(1, 1)))
        self.down_blocks = nn.ModuleList(down_blocks)

        up_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features,
                              block_expansion * (2**(num_down_blocks - i)))
            out_features = min(
                max_features, block_expansion * (2**(num_down_blocks - i - 1)))
            up_blocks.append(
                UpBlock2d(in_features,
                          out_features,
                          kernel_size=(3, 3),
                          padding=(1, 1)))
        self.up_blocks = nn.ModuleList(up_blocks)

        self.bottleneck = torch.nn.Sequential()
        in_features = min(max_features, block_expansion * (2**num_down_blocks))
        for i in range(num_bottleneck_blocks):
            self.bottleneck.add_module(
                'r' + str(i),
                ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))

        self.final = nn.Conv2d(block_expansion,
                               num_channels,
                               kernel_size=(7, 7),
                               padding=(3, 3))
        self.estimate_visibility = estimate_visibility
        self.num_channels = num_channels
        self.num_segments = num_segments
コード例 #6
0
    def __init__(self, num_channels, num_kp, block_expansion, max_features, num_down_blocks,
                 num_bottleneck_blocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
        super(OcclusionAwareGenerator, self).__init__()

        # 初始化一个稠密运动场网络模块
        if dense_motion_params is not None:
            self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, num_channels=num_channels,
                                                           estimate_occlusion_map=estimate_occlusion_map,
                                                           **dense_motion_params)
        else:
            self.dense_motion_network = None

        # first 维持输入和输出维度相同,channel数量不同
        self.first = SameBlock2d(num_channels, block_expansion, kernel_size=(7, 7), padding=(3, 3))

        # 声明下采样层
        down_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** i))
            out_features = min(max_features, block_expansion * (2 ** (i + 1)))
            down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        # 注:此处才是用列表声明网络层的正确用法
        # 储存时会通过储存对应self的名字所对应的内容
        self.down_blocks = nn.ModuleList(down_blocks)

        # 声明上采样层
        up_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i)))
            out_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i - 1)))
            up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
        # 注:此处用列表声明模块同上
        self.up_blocks = nn.ModuleList(up_blocks)

        # 声明生成器中的瓶颈层
        self.bottleneck = torch.nn.Sequential()
        in_features = min(max_features, block_expansion * (2 ** num_down_blocks))
        for i in range(num_bottleneck_blocks):
            self.bottleneck.add_module('r' + str(i), ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))

        # 声明最后的输出层
        self.final = nn.Conv2d(block_expansion, num_channels, kernel_size=(7, 7), padding=(3, 3))
        self.estimate_occlusion_map = estimate_occlusion_map
        self.num_channels = num_channels
コード例 #7
0
    def __init__(self,
                 num_channels,
                 num_regions,
                 block_expansion,
                 max_features,
                 num_down_blocks,
                 num_bottleneck_blocks,
                 pixelwise_flow_predictor_params=None,
                 skips=False,
                 revert_axis_swap=True):
        super(Generator, self).__init__()

        if pixelwise_flow_predictor_params is not None:
            self.pixelwise_flow_predictor = PixelwiseFlowPredictor(
                num_regions=num_regions,
                num_channels=num_channels,
                revert_axis_swap=revert_axis_swap,
                **pixelwise_flow_predictor_params)
        else:
            self.pixelwise_flow_predictor = None

        self.first = SameBlock2d(num_channels,
                                 block_expansion,
                                 kernel_size=(7, 7),
                                 padding=(3, 3))

        down_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features, block_expansion * (2**i))
            out_features = min(max_features, block_expansion * (2**(i + 1)))
            down_blocks.append(
                DownBlock2d(in_features,
                            out_features,
                            kernel_size=(3, 3),
                            padding=(1, 1)))
        self.down_blocks = nn.ModuleList(down_blocks)

        up_blocks = []
        for i in range(num_down_blocks):
            in_features = min(max_features,
                              block_expansion * (2**(num_down_blocks - i)))
            out_features = min(
                max_features, block_expansion * (2**(num_down_blocks - i - 1)))
            up_blocks.append(
                UpBlock2d(in_features,
                          out_features,
                          kernel_size=(3, 3),
                          padding=(1, 1)))
        self.up_blocks = nn.ModuleList(up_blocks)

        self.bottleneck = torch.nn.Sequential()
        in_features = min(max_features, block_expansion * (2**num_down_blocks))
        for i in range(num_bottleneck_blocks):
            self.bottleneck.add_module(
                'r' + str(i),
                ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))

        self.final = nn.Conv2d(block_expansion,
                               num_channels,
                               kernel_size=(7, 7),
                               padding=(3, 3))
        self.num_channels = num_channels
        self.skips = skips