Пример #1
0
    def __progressive_down_sampling(self, real_batch, depth, alpha):
        """
        private helper for down_sampling the original images in order to facilitate the
        progressive growing of the layers.
        :param real_batch: batch of real samples
        :param depth: depth at which training is going on
        :param alpha: current value of the fade-in alpha
        :return: real_samples => modified real batch of samples
        """

        from torch.nn import AvgPool2d
        from torch.nn.functional import interpolate

        if self.structure == 'fixed':
            return real_batch

        # down_sample the real_batch for the given depth
        down_sample_factor = int(np.power(2, self.depth - depth - 1))
        prior_down_sample_factor = max(int(np.power(2, self.depth - depth)), 0)

        ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)

        if depth > 0:
            prior_ds_real_samples = interpolate(
                AvgPool2d(prior_down_sample_factor)(real_batch),
                scale_factor=2)
        else:
            prior_ds_real_samples = ds_real_samples

        # real samples are a combination of ds_real_samples and prior_ds_real_samples
        real_samples = (alpha * ds_real_samples) + (
            (1 - alpha) * prior_ds_real_samples)

        # return the so computed real_samples
        return real_samples
Пример #2
0
 def __init__(self,
              kernel_size: int,
              stride: int = None,
              signed: bool = True,
              min_overall_bit_width: Optional[int] = 2,
              max_overall_bit_width: Optional[int] = 32,
              quant_type: QuantType = QuantType.FP,
              lsb_trunc_bit_width_impl_type=BitWidthImplType.CONST):
     QuantLayer.__init__(self,
                         compute_output_scale=True,
                         compute_output_bit_width=True,
                         return_quant_tensor=True)
     AvgPool2d.__init__(self, kernel_size=kernel_size, stride=stride)
     ls_bit_width_to_trunc = math.ceil(math.log2(kernel_size * kernel_size))
     self.signed = signed
     self.quant_type = quant_type
     explicit_rescaling = True  # we are explicitly rescaling as we are replacing the div in avg with trunc
     self.accumulator_quant = TruncQuantProxy(
         signed=signed,
         quant_type=quant_type,
         trunc_at_least_init_val=True,
         ls_bit_width_to_trunc=ls_bit_width_to_trunc,
         min_overall_bit_width=min_overall_bit_width,
         max_overall_bit_width=max_overall_bit_width,
         lsb_trunc_bit_width_impl_type=lsb_trunc_bit_width_impl_type,
         explicit_rescaling=explicit_rescaling,
         override_pretrained_bit_width=False)
    def __init__(self, width_factor=1, input_size=112, landmark_number=98):
        super(PFLD_Ultralight_Slim, self).__init__()

        self.conv1 = Conv_Block(3, int(64 * width_factor), 3, 2, 1)
        self.conv2 = Conv_Block(int(64 * width_factor), int(64 * width_factor), 3, 1, 1, group=int(64 * width_factor))

        self.conv3_1 = GhostBottleneck(int(64 * width_factor), int(96 * width_factor), int(80 * width_factor), stride=2)
        self.conv3_2 = GhostBottleneck(int(80 * width_factor), int(120 * width_factor), int(80 * width_factor), stride=1)
        self.conv3_3 = GhostBottleneck(int(80 * width_factor), int(120 * width_factor), int(80 * width_factor), stride=1)

        self.conv4_1 = GhostBottleneck(int(80 * width_factor), int(200 * width_factor), int(96 * width_factor), stride=2)
        self.conv4_2 = GhostBottleneck(int(96 * width_factor), int(240 * width_factor), int(96 * width_factor), stride=1)
        self.conv4_3 = GhostBottleneck(int(96 * width_factor), int(240 * width_factor), int(96 * width_factor), stride=1)

        self.conv5_1 = GhostBottleneck(int(96 * width_factor), int(336 * width_factor), int(144 * width_factor), stride=2)
        self.conv5_2 = GhostBottleneck(int(144 * width_factor), int(504 * width_factor), int(144 * width_factor), stride=1)
        self.conv5_3 = GhostBottleneck(int(144 * width_factor), int(504 * width_factor), int(144 * width_factor), stride=1)
        self.conv5_4 = GhostBottleneck(int(144 * width_factor), int(504 * width_factor), int(144 * width_factor), stride=1)

        self.conv6 = GhostBottleneck(int(144 * width_factor), int(216 * width_factor), int(16 * width_factor), stride=1)
        self.conv7 = Conv_Block(int(16 * width_factor), int(32 * width_factor), 3, 1, 1)
        self.conv8 = Conv_Block(int(32 * width_factor), int(128 * width_factor), input_size // 16, 1, 0, has_bn=False)

        self.avg_pool1 = AvgPool2d(input_size // 2)
        self.avg_pool2 = AvgPool2d(input_size // 4)
        self.avg_pool3 = AvgPool2d(input_size // 8)
        self.avg_pool4 = AvgPool2d(input_size // 16)

        self.fc = Linear(int(512 * width_factor), landmark_number * 2)
Пример #4
0
    def optimize_discriminator(self,
                               noise,
                               real_batch,
                               latent_vector,
                               depth,
                               alpha,
                               use_matching_aware=True):
        """
        performs one step of weight update on discriminator using the batch of data
        :param noise: input noise of sample generation
        :param real_batch: real samples batch
        :param latent_vector: (conditional latent vector)
        :param depth: current depth of optimization
        :param alpha: current alpha for fade-in
        :param use_matching_aware: whether to use matching aware discrimination
        :return: current loss (Wasserstein loss)
        """
        from torch.nn import AvgPool2d
        from torch.nn.functional import upsample

        # downsample the real_batch for the given depth
        down_sample_factor = int(np.power(2, self.depth - depth - 1))
        prior_downsample_factor = max(int(np.power(2, self.depth - depth)), 0)

        ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)

        if depth > 0:
            prior_ds_real_samples = upsample(
                AvgPool2d(prior_downsample_factor)(real_batch), scale_factor=2)
        else:
            prior_ds_real_samples = ds_real_samples

        # real samples are a combination of ds_real_samples and prior_ds_real_samples
        real_samples = (alpha * ds_real_samples) + (
            (1 - alpha) * prior_ds_real_samples)

        loss_val = 0
        for _ in range(self.n_critic):
            # generate a batch of samples
            fake_samples = self.gen(noise, depth, alpha).detach()

            loss = self.loss.dis_loss(real_samples, fake_samples,
                                      latent_vector, depth, alpha)

            if use_matching_aware:
                # calculate the matching aware distribution loss
                mis_match_text = latent_vector[
                    np.random.permutation(latent_vector.shape[0]), :]
                m_a_d = self.dis(real_samples, mis_match_text, depth, alpha)
                loss = loss + th.mean(m_a_d)

            # optimize discriminator
            self.dis_optim.zero_grad()
            loss.backward()
            self.dis_optim.step()

            loss_val += loss.item()

        return loss_val / self.n_critic
Пример #5
0
 def __init__(self,
              kernel_size: Union[int, Tuple[int, int]],
              stride: Union[int, Tuple[int, int]] = None,
              trunc_quant: Optional[AccQuantType] = TruncTo8bit,
              return_quant_tensor: bool = True,
              **kwargs):
     AvgPool2d.__init__(self, kernel_size=kernel_size, stride=stride)
     QuantLayerMixin.__init__(self, return_quant_tensor)
     QuantTruncMixin.__init__(self, trunc_quant=trunc_quant, **kwargs)
Пример #6
0
    def __init__(self, width_factor=1, input_size=112, landmark_number=98):
        super(PFLD, self).__init__()

        self.conv1 = Conv_Block(3, int(64 * width_factor), 3, 2, 1)
        self.conv2 = Conv_Block(int(64 * width_factor),
                                int(64 * width_factor),
                                3,
                                1,
                                1,
                                group=int(64 * width_factor))

        self.conv3_1 = InvertedResidual(int(64 * width_factor),
                                        int(64 * width_factor), 2, False, 2)
        self.conv3_2 = InvertedResidual(int(64 * width_factor),
                                        int(64 * width_factor), 1, True, 2)
        self.conv3_3 = InvertedResidual(int(64 * width_factor),
                                        int(64 * width_factor), 1, True, 2)
        self.conv3_4 = InvertedResidual(int(64 * width_factor),
                                        int(64 * width_factor), 1, True, 2)
        self.conv3_5 = InvertedResidual(int(64 * width_factor),
                                        int(64 * width_factor), 1, True, 2)

        self.conv4 = InvertedResidual(int(64 * width_factor),
                                      int(128 * width_factor), 2, False, 2)

        self.conv5_1 = InvertedResidual(int(128 * width_factor),
                                        int(128 * width_factor), 1, False, 4)
        self.conv5_2 = InvertedResidual(int(128 * width_factor),
                                        int(128 * width_factor), 1, True, 4)
        self.conv5_3 = InvertedResidual(int(128 * width_factor),
                                        int(128 * width_factor), 1, True, 4)
        self.conv5_4 = InvertedResidual(int(128 * width_factor),
                                        int(128 * width_factor), 1, True, 4)
        self.conv5_5 = InvertedResidual(int(128 * width_factor),
                                        int(128 * width_factor), 1, True, 4)
        self.conv5_6 = InvertedResidual(int(128 * width_factor),
                                        int(128 * width_factor), 1, True, 4)

        self.conv6 = InvertedResidual(int(128 * width_factor),
                                      int(16 * width_factor), 1, False, 2)
        self.conv7 = Conv_Block(int(16 * width_factor), int(32 * width_factor),
                                3, 2, 1)
        self.conv8 = Conv_Block(int(32 * width_factor),
                                int(128 * width_factor),
                                input_size // 16,
                                1,
                                0,
                                has_bn=False)

        self.avg_pool1 = AvgPool2d(input_size // 8)
        self.avg_pool2 = AvgPool2d(input_size // 16)
        self.fc = Linear(int(176 * width_factor), landmark_number * 2)
Пример #7
0
 def __init__(self,
              kernel_size: Union[int, Tuple[int, int]],
              stride: Union[int, Tuple[int, int]] = None,
              trunc_quant: Union[AccQuantProxyProtocol,
                                 Type[Injector]] = TruncTo8bit,
              return_quant_tensor: bool = True,
              update_injector: Callable = update_trunc_quant_injector,
              **kwargs):
     AvgPool2d.__init__(self, kernel_size=kernel_size, stride=stride)
     QuantLayerMixin.__init__(self, return_quant_tensor)
     QuantTruncMixin.__init__(self,
                              trunc_quant=trunc_quant,
                              update_injector=update_injector,
                              **kwargs)
Пример #8
0
    def __init__(self, input_nc=1, output_nc=1, depth=32, norm_type="batch", active_type="LeakyReLU"):
        super(Unet_G, self).__init__()
        self.norm = getNormLayer(norm_type)
        self.active = getActiveLayer(active_type)
        self.depth = depth
        self.d0 = Sequential(Conv2d(input_nc, depth, 7, 1, 3), self.active)  # 256, 256, 16
        self.d1 = Sequential(Conv2d(depth * 1, depth * 2, 5, 1, 2), self.norm(depth * 2, affine=True), self.active,
                             MaxPool2d(2, 2))  # 128, 128, 32
        self.d2 = Sequential(Conv2d(depth * 2, depth * 2, 3, 1, 1), self.norm(depth * 2, affine=True), self.active,
                             MaxPool2d(2, 2))  # 64, 64, 32
        self.d3 = Sequential(Conv2d(depth * 2, depth * 4, 3, 1, 1), self.norm(depth * 4, affine=True), self.active,
                             MaxPool2d(2, 2))  # 32, 32, 64
        self.d4 = Sequential(Conv2d(depth * 4, depth * 4, 3, 1, 1), self.norm(depth * 4, affine=True), self.active,
                             MaxPool2d(2, 2))  # 16, 16, 64
        self.d5 = Sequential(Conv2d(depth * 4, depth * 8, 3, 1, 1), self.norm(depth * 8, affine=True), self.active,
                             MaxPool2d(2, 2))  # 8, 8, 128
        self.d6 = Sequential(Conv2d(depth * 8, depth * 8, 3, 1, 1), self.norm(depth * 8, affine=True), self.active,
                             AvgPool2d(2, 2))  # 4, 4, 128
        self.d7 = Sequential(Conv2d(depth * 8, depth * 16, 3, 1, 1), self.norm(depth * 16, affine=True), self.active,
                             AvgPool2d(2, 2))  # 2, 2, 256
        self.d8 = Sequential(Conv2d(depth * 16, depth * 16, 3, 1, 1), self.norm(depth * 16, affine=True),
                             self.active)  # 2, 2, 256

        self.u0 = Sequential(Upsample(scale_factor=2),
                             Conv2d(depth * 16, depth * 16, 3, 1, 1), self.norm(depth * 16, affine=True), self.active)
        self.u1 = Sequential(Upsample(scale_factor=2),
                             Conv2d(depth * 8, depth * 8, 3, 1, 1), self.norm(depth * 8, affine=True), self.active)
        self.u2 = Sequential(Upsample(scale_factor=2),
                             Conv2d(depth * 4, depth * 4, 3, 1, 1), self.norm(depth * 4, affine=True), self.active)
        self.u3 = Sequential(Upsample(scale_factor=2),
                             Conv2d(depth * 4, depth * 4, 3, 1, 1), self.norm(depth * 4, affine=True), self.active)
        self.u4 = Sequential(Upsample(scale_factor=2),
                             Conv2d(depth * 2, depth * 2, 3, 1, 1), self.norm(depth * 2, affine=True), self.active)
        self.u5 = Sequential(Upsample(scale_factor=2),
                             Conv2d(depth * 2, depth * 2, 3, 1, 1), self.norm(depth * 2, affine=True), self.active)
        self.u6 = Sequential(Upsample(scale_factor=2),
                             Conv2d(depth * 1, depth * 1, 5, 1, 2), self.norm(depth * 1, affine=True), self.active)

        self.u7 = Sequential(Conv2d(depth * 1, output_nc, 7, 1, 3), Tanh())

        self.leaky_relu = self.active
        self.conv_32_8 = Conv2d(depth * 16 * 2, depth * 8, 3, 1, 1)
        self.conv_16_8 = Conv2d(depth * 8 * 2, depth * 8, 3, 1, 1)
        self.conv_16_4 = Conv2d(depth * 8 * 2, depth * 4, 3, 1, 1)
        self.conv_8_4 = Conv2d(depth * 4 * 2, depth * 4, 3, 1, 1)
        self.conv_8_2 = Conv2d(depth * 4 * 2, depth * 2, 3, 1, 1)
        self.conv_4_2 = Conv2d(depth * 2 * 2, depth * 2, 3, 1, 1)
        self.conv_4_1 = Conv2d(depth * 2 * 2, depth * 1, 3, 1, 1)
        self.conv_2_1 = Conv2d(depth * 1 * 2, depth * 1, 3, 1, 1)
Пример #9
0
    def __progressive_downsampling(self, real_batch, current_depth, alpha):
        down_sample_factor = 2 ** (self.depth - current_depth - 1)
        assert down_sample_factor <= (real_batch.shape[-1] / 4), \
            "Image size is too small for downsampling at this depth"
        prior_downsample_factor = max(2 ** (self.depth - current_depth), 0)

        ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)

        if current_depth > 0:
            prior_ds_real_samples = interpolate(AvgPool2d(prior_downsample_factor)(real_batch),
                                                              scale_factor=2)
        else:
            prior_ds_real_samples = ds_real_samples

        return (alpha * ds_real_samples) + ((1 - alpha) * prior_ds_real_samples)
Пример #10
0
 def __init__(self, in_channels: int, pool_features: int):
     super().__init__()
     self.branch_1x1 = Sequential(
         _ConvBNRelu(in_channels=in_channels,
                     out_channels=64,
                     kernel_size=1))
     self.branch_3x3_3x3 = Sequential(
         _ConvBNRelu(in_channels=in_channels,
                     out_channels=64,
                     kernel_size=1),
         _ConvBNRelu(in_channels=64,
                     out_channels=96,
                     kernel_size=3,
                     padding=1),
         _ConvBNRelu(in_channels=96,
                     out_channels=96,
                     kernel_size=3,
                     padding=1),
     )
     self.branch_5x5 = Sequential(
         _ConvBNRelu(in_channels=in_channels,
                     out_channels=48,
                     kernel_size=1),
         _ConvBNRelu(in_channels=48,
                     out_channels=64,
                     kernel_size=5,
                     padding=2),
     )
     self.branch_pool = Sequential(
         AvgPool2d(kernel_size=3, stride=1, padding=1),
         _ConvBNRelu(in_channels=in_channels,
                     out_channels=pool_features,
                     kernel_size=1),
     )
Пример #11
0
    def __init__(self):
        super(SGM_NET, self).__init__()
        self.conv_1 = Conv2d(in_channels=1,
                             out_channels=32,
                             kernel_size=3,
                             stride=1,
                             padding=0)
        self.conv_2 = Conv2d(in_channels=32,
                             out_channels=64,
                             kernel_size=3,
                             stride=1,
                             padding=0)
        self.conv_3 = Conv2d(in_channels=64,
                             out_channels=128,
                             kernel_size=3,
                             stride=1,
                             padding=0)
        self.fc1 = nn.Linear(in_features=128, out_features=128)
        self.fc2 = nn.Linear(in_features=128, out_features=8)
        self.relu = ReLU()
        self.avgpool = AvgPool2d(kernel_size=2, stride=2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_normal_(m.weight.data)
                init.constant_(m.bias.data, 0.01)
Пример #12
0
 def __init__(self,
              kernel_size: int,
              stride: int = None,
              trunc_quant: Union[AccQuantProxyProtocol,
                                 Type[Injector]] = DefaultTruncQI,
              return_quant_tensor: bool = True,
              update_injector: Callable = update_trunc_quant_injector,
              **kwargs):
     AvgPool2d.__init__(self, kernel_size=kernel_size, stride=stride)
     QuantLayerMixin.__init__(self, return_quant_tensor)
     QuantTruncMixin.__init__(self,
                              trunc_quant=trunc_quant,
                              update_injector=update_injector,
                              ls_bit_width_to_trunc=math.ceil(
                                  math.log2(kernel_size * kernel_size)),
                              **kwargs)
Пример #13
0
 def __init__(self, num_classes=2):
     super(CovidRENet, self).__init__()
     self.name = 'CovidRENet'
     self.num_classes = num_classes
     self.encoder_1 = Sequential(
         Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),
         BatchNorm2d(64), ReLU(inplace=True), LocalResponseNorm(5),
         MaxPool2d(kernel_size=2, stride=2))
     self.encoder_2 = Sequential(
         Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
         BatchNorm2d(128), ReLU(inplace=True), LocalResponseNorm(5),
         AvgPool2d(kernel_size=2, stride=2))
     self.encoder_3 = Sequential(
         Conv2d(in_channels=128, out_channels=256, kernel_size=3,
                padding=1), BatchNorm2d(256), ReLU(inplace=True),
         LocalResponseNorm(5), MaxPool2d(kernel_size=2, stride=2))
     self.encoder_4 = Sequential(
         Conv2d(in_channels=256, out_channels=256, kernel_size=3,
                padding=1), BatchNorm2d(256), ReLU(inplace=True),
         MaxPool2d(kernel_size=2, stride=2))
     self.dropout_1_n_relu = Sequential(Dropout(0.5), ReLU(inplace=True))
     self.fc_1 = Linear(8 * 8 * 256, 4096)
     self.dropout_2_n_relu = Sequential(Dropout(0.2), ReLU(inplace=True))
     self.fc_2 = Linear(4096, 256)
     self.dropout_3_n_relu = Sequential(Dropout(0.2), ReLU(inplace=True))
     self.fc_3 = Linear(256, 64)
     self.dropout_4_n_relu = Sequential(Dropout(0.2), ReLU(inplace=True))
     self.fc_4 = Linear(64, self.num_classes)
Пример #14
0
    def __init__(self, num_layers, drop_ratio, mode='ir_se'):
        super(Backbone_work, self).__init__()
        assert num_layers in [18, 50, 100,
                              152], 'num_layers should be 50,100, or 152'
        blocks = get_blocks(num_layers)
        unit_module = bottleneck_IR_SE_NEW
        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64), ReLU(64))
        # self.output_layer1 = Sequential(BatchNorm2d(128),
        #                                 Conv2d(128, 128, (7, 7), 1, 0, bias=False),
        #                                 ReLU(128))
        #                                 #Dropout(drop_ratio))   # 128 * 1 * 1

        self.output_layer2 = Sequential(
            AvgPool2d(7, 1),
            BatchNorm2d(128),
            #Flatten(),
            #Linear(128, 512),
            Conv2d(128, 512, 1, 1, 0, bias=False),
            BatchNorm2d(512))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride, bottleneck.feature_shape))
        self.body = Sequential(*modules)
Пример #15
0
        def initBlocks(self, params, countFlopsFlag):
            widthRatioList, nClasses, input_size, partition = params

            blocksPlanes = self.initBlocksPlanes()

            # init parameters
            kernel_size = 3
            stride = 1

            # create list of blocks from blocksPlanes
            blocks = ModuleList()
            prevLayer = Input(3, input_size)

            for i, (blockType, out_planes) in enumerate(blocksPlanes):
                layerWidthRatioList = widthRatioList.copy()
                # build layer
                l = blockType(layerWidthRatioList, out_planes, kernel_size,
                              stride, prevLayer, countFlopsFlag)
                # add layer to blocks list
                blocks.append(l)
                # update previous layer
                prevLayer = l.outputLayer()

            self.avgpool = AvgPool2d(8)
            self.fc = Linear(64, nClasses).cuda()

            return blocks
Пример #16
0
    def __init__(self, in_channels, out_channels):
        """
        constructor of the class
        :param in_channels: number of input channels
        :param out_channels: number of output channels
        """
        from torch.nn import AvgPool2d, LeakyReLU
        from torch.nn import Conv2d

        super().__init__()

        # convolutional modules
        self.self_attention = SelfAttention(in_channels, squeeze_factor=8)
        self.conv_1 = Conv2d(in_channels,
                             in_channels, (3, 3),
                             padding=1,
                             bias=True)
        self.conv_2 = Conv2d(in_channels,
                             out_channels, (3, 3),
                             padding=1,
                             bias=True)
        self.downSampler = AvgPool2d(2)  # downsampler

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Пример #17
0
    def __init__(self, resolution=224, num_classes=1000, multiplier=1):

        super(MobileNetV1, self).__init__()
        self.name = "MobileNetV1_%d_%03d" % (resolution, int(multiplier * 100))
        assert (resolution % 32 == 0)
        self.first_in_channel = _make_divisible(32 * multiplier, 8)
        self.last_out_channel = _make_divisible(1024 * multiplier, 8)
        self.features = nn.Sequential(
            Conv2d(3,
                   self.first_in_channel,
                   kernel_size=3,
                   stride=2,
                   padding=1),
            DepthSepConv(32, 64, stride=1, multiplier=multiplier),
            DepthSepConv(64, 128, stride=2, multiplier=multiplier),
            DepthSepConv(128, 128, stride=1, multiplier=multiplier),
            DepthSepConv(128, 256, stride=2, multiplier=multiplier),
            DepthSepConv(256, 256, stride=1, multiplier=multiplier),
            DepthSepConv(256, 512, stride=2, multiplier=multiplier),
            DepthSepConv(512, 512, stride=1, multiplier=multiplier),
            DepthSepConv(512, 512, stride=1, multiplier=multiplier),
            DepthSepConv(512, 512, stride=1, multiplier=multiplier),
            DepthSepConv(512, 512, stride=1, multiplier=multiplier),
            DepthSepConv(512, 512, stride=1, multiplier=multiplier),
            DepthSepConv(512, 1024, stride=2, multiplier=multiplier),
            DepthSepConv(1024, 1024, stride=1, multiplier=multiplier))

        self.classifier = nn.Sequential(
            # 7 x 7 x 1024
            AvgPool2d(kernel_size=resolution // 32),
            # 1 x 1 x 1024
            Conv2d(self.last_out_channel, num_classes, kernel_size=1),
            # 1 x 1 x num_classes
            Softmax2d())
Пример #18
0
    def __init__(self, channels_in):
        super(InceptionE, self).__init__()
        self.branch1x1 = Conv2d_BN(channels_in, 320, 1, stride=1,
                                   padding=0)  # 320 channels

        self.branch3x3_1 = Conv2d_BN(channels_in, 384, 1, stride=1, padding=0)
        self.branch3x3_2a = Conv2d_BN(384,
                                      384, (1, 3),
                                      stride=1,
                                      padding=(0, 1))
        self.branch3x3_2b = Conv2d_BN(384,
                                      384, (3, 1),
                                      stride=1,
                                      padding=(1, 0))
        # 768 channels

        self.branch3x3dbl_1 = Sequential(
            Conv2d_BN(channels_in, 448, 1, stride=1, padding=0),
            Conv2d_BN(448, 384, 3, stride=1, padding=1))
        self.branch3x3dbl_2a = Conv2d_BN(384,
                                         384, (1, 3),
                                         stride=1,
                                         padding=(0, 1))
        self.branch3x3dbl_2b = Conv2d_BN(384,
                                         384, (3, 1),
                                         stride=1,
                                         padding=(1, 0))
        # 768 channels

        self.branch_pool = Sequential(AvgPool2d(3, stride=1, padding=1),
                                      Conv2d_BN(channels_in,
                                                192,
                                                1,
                                                stride=1,
                                                padding=0))  # 192 channels
Пример #19
0
    def __init__(self, in_channels, out_channels, use_eql=True):
        """
        constructor of the class
        :param in_channels: number of input channels
        :param out_channels: number of output channels
        :param use_eql: whether to use equalized learning rate
        """
        from torch.nn import LeakyReLU, Conv2d, AvgPool2d

        super().__init__()

        if use_eql:
            self.conv_1 = _equalized_conv2d(in_channels, in_channels, (3, 3),
                                            pad=1, bias=True)
            self.conv_2 = _equalized_conv2d(in_channels, out_channels, (3, 3),
                                            pad=1, bias=True)
        else:
            # convolutional modules
            self.conv_1 = Conv2d(in_channels, in_channels, (3, 3),
                                 padding=1, bias=True)
            self.conv_2 = Conv2d(in_channels, out_channels, (3, 3),
                                 padding=1, bias=True)

        # downsapmler
        self.downsampler = AvgPool2d(2)
 
        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Пример #20
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 img_channel=3,
                 dimension_reduction=2,
                 kernel_size=(3, 3),
                 stride=(1, 1),
                 padding=(1, 1),
                 activation=LeakyReLU(0.2),
                 scheme="simple") -> Tensor:
        super().__init__()

        self.activation = activation
        self.concat = PhiScheme(img_channels=img_channel,
                                in_channels=in_channel,
                                scheme=scheme)
        self.conv_first = Conv2d(in_channels=img_channel + in_channel + 1,
                                 out_channels=in_channel,
                                 kernel_size=kernel_size,
                                 stride=stride,
                                 padding=padding)
        self.conv_second = Conv2d(in_channels=in_channel,
                                  out_channels=out_channel,
                                  kernel_size=kernel_size,
                                  stride=stride,
                                  padding=padding)
        self.avg_pool = AvgPool2d(kernel_size=dimension_reduction,
                                  stride=dimension_reduction)
Пример #21
0
 def __init__(self, input_nc=1, output_nc=1, depth=64, use_sigmoid=True, use_liner=True, norm_type="batch",
              active_type="ReLU"):
     super(NLayer_D, self).__init__()
     self.norm = getNormLayer(norm_type)
     self.active = getActiveLayer(active_type)
     self.use_sigmoid = use_sigmoid
     self.use_liner = use_liner
     # 256 x 256
     self.layer1 = Sequential(Conv2d(input_nc + output_nc, depth, kernel_size=7, stride=1, padding=3),
                              LeakyReLU(0.2))
     # 128 x 128
     self.layer2 = Sequential(Conv2d(depth, depth * 2, kernel_size=3, stride=1, padding=1),
                              self.norm(depth * 2, affine=True),
                              LeakyReLU(0.2), MaxPool2d(2, 2))
     # 64 x 64
     self.layer3 = Sequential(Conv2d(depth * 2, depth * 4, kernel_size=3, stride=1, padding=1),
                              self.norm(depth * 4, affine=True),
                              LeakyReLU(0.2), MaxPool2d(2, 2))
     # 32 x 32
     self.layer4 = Sequential(Conv2d(depth * 4, depth * 8, kernel_size=3, stride=1, padding=1),
                              self.norm(depth * 8, affine=True),
                              LeakyReLU(0.2), AvgPool2d(2, 2))
     # 16 x 16
     self.layer5 = Sequential(Conv2d(depth * 8, output_nc, kernel_size=7, stride=1, padding=3))
     # 16 x 16 ,1
     self.liner = Linear(256, 1)
     self.sigmoid = Sigmoid()
Пример #22
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
        self.bn1 = BatchNorm2d(64)
        self.relu = ReLU(inplace=True)
        self.maxpool = MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = AvgPool2d(4, stride=1)
        self.fc = Linear(512 * block.expansion, num_classes)
        self.bn2 = BatchNorm1d(num_classes)

        for m in self.modules():
            if isinstance(m, Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, sqrt(2. / n))
            elif isinstance(m, BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Пример #23
0
    def __init__(self, in_channels, out_channels, use_coord_conv=True):
        """
        constructor of the class
        :param in_channels: number of input channels
        :param out_channels: number of output channels
        :param use_coord_conv: whether to use coord_conv [default=True]
        """
        from torch.nn import AvgPool2d, LeakyReLU

        if use_coord_conv:
            Conv = CoordConv
        else:
            from torch.nn import Conv2d as Conv

        super().__init__()

        # convolutional modules
        self.conv_1 = Conv(in_channels,
                           in_channels, (3, 3),
                           padding=1,
                           bias=True)
        self.conv_2 = Conv(in_channels,
                           out_channels, (3, 3),
                           padding=1,
                           bias=True)
        self.downSampler = AvgPool2d(2)  # downsampler

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Пример #24
0
    def __init__(
        self,
        in_channel,
        out_channel,
        img_channel=3,
        dimension_reduction=2,
        kernel_size=(3, 3),
        stride=(1, 1),
        padding=(1, 1),
        activation=LeakyReLU(0.2)
    ) -> Tensor:
        super().__init__()

        self.activation = activation
        self.from_rgb = FromRGB(img_channels=img_channel,
                                out_channels=in_channel)
        self.conv_first = Conv2d(in_channels=in_channel + 1,
                                 out_channels=in_channel,
                                 kernel_size=kernel_size,
                                 stride=stride,
                                 padding=padding)
        self.conv_second = Conv2d(in_channels=in_channel,
                                  out_channels=out_channel,
                                  kernel_size=kernel_size,
                                  stride=stride,
                                  padding=padding)
        self.avg_pool = AvgPool2d(kernel_size=dimension_reduction,
                                  stride=dimension_reduction)
Пример #25
0
    def __init__(self, in_channels, out_channels, dilation=1):
        """
        constructor of the class
        :param in_channels: number of input channels
        :param out_channels: number of output channels
        """
        from torch.nn import AvgPool2d, LeakyReLU
        from torch.nn import Conv2d
        super().__init__()
        self.batch_discriminator = MinibatchStdDev()

        # convolutional modules
        self.conv_1 = Conv2d(in_channels + 1,
                             in_channels, (3, 3),
                             dilation=dilation,
                             padding=dilation,
                             bias=True)
        self.conv_2 = Conv2d(in_channels,
                             out_channels, (3, 3),
                             dilation=dilation,
                             padding=dilation,
                             bias=True)
        self.downSampler = AvgPool2d(2)  # downsampler

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Пример #26
0
    def __init__(self, in_channels, out_channels, use_eql):
        """
        constructor of the class
        :param in_channels: number of input channels
        :param out_channels: number of output channels
        :param use_eql: whether to use equalized learning rate
        """
        from torch.nn import AvgPool2d, LeakyReLU

        super(DisGeneralConvBlock, self).__init__()

        if use_eql:
            self.conv_1 = _equalized_conv2d(in_channels,
                                            in_channels, (3, 3),
                                            pad=1,
                                            bias=True)
            self.conv_2 = _equalized_conv2d(in_channels,
                                            out_channels, (3, 3),
                                            pad=1,
                                            bias=True)
        else:
            from torch.nn import Conv2d
            self.conv_1 = Conv2d(in_channels,
                                 in_channels, (3, 3),
                                 padding=1,
                                 bias=True)
            self.conv_2 = Conv2d(in_channels,
                                 out_channels, (3, 3),
                                 padding=1,
                                 bias=True)

        self.downSampler = AvgPool2d(2)

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Пример #27
0
 def __init__(self, channels_in, channels_7x7):
     super(InceptionC, self).__init__()
     self.branch1x1 = Conv2d_BN(channels_in, 192, 1, stride=1,
                                padding=0)  # 192 channels
     self.branch7x7 = Sequential(
         Conv2d_BN(channels_in, channels_7x7, 1, stride=1, padding=0),
         Conv2d_BN(channels_7x7,
                   channels_7x7, (1, 7),
                   stride=1,
                   padding=(0, 3)),
         Conv2d_BN(channels_7x7, 192, (7, 1), stride=1,
                   padding=(3, 0)))  # 192 channels
     self.branch7x7dbl = Sequential(
         Conv2d_BN(channels_in, channels_7x7, 1, stride=1, padding=0),
         Conv2d_BN(channels_7x7,
                   channels_7x7, (7, 1),
                   stride=1,
                   padding=(3, 0)),
         Conv2d_BN(channels_7x7,
                   channels_7x7, (1, 7),
                   stride=1,
                   padding=(0, 3)),
         Conv2d_BN(channels_7x7,
                   channels_7x7, (7, 1),
                   stride=1,
                   padding=(3, 0)),
         Conv2d_BN(channels_7x7, 192, (1, 7), stride=1,
                   padding=(0, 3)))  # 192 channels
     self.branch_pool = Sequential(AvgPool2d(3, stride=1, padding=1),
                                   Conv2d_BN(channels_in,
                                             192,
                                             1,
                                             stride=1,
                                             padding=0))  # 192 channels
Пример #28
0
    def initLayers(self, params):
        bitwidths, kernel_sizes, nClasses = params
        bitwidths = bitwidths.copy()

        layersPlanes = self.initLayersPlanes()

        # init previous layer
        prevLayer = None

        # create list of layers from layersPlanes
        # supports bitwidth as list of ints, i.e. same bitwidths to all layers
        layers = ModuleList()
        for i, (layerType, in_planes, out_planes,
                input_size) in enumerate(layersPlanes):
            # build layer
            l = layerType(bitwidths, in_planes, out_planes, kernel_sizes, 1,
                          input_size, prevLayer)
            # add layer to layers list
            layers.append(l)

        self.avgpool = AvgPool2d(8)
        # self.fc = MixedLinear(bitwidths, 64, 10)
        self.fc = Linear(64, nClasses).cuda()

        # # turn off gradients in Linear layer
        # for p in self.fc.parameters():
        #     p.requires_grad = False

        return layers
Пример #29
0
 def _iterate(input_, of):
     outs = list(model(input_))
     loss = []
     for i, out in enumerate(outs):
         factor = of.shape[2] // out.shape[2]
         gt = AvgPool2d(factor, factor)(of).detach().data
         loss += [criterion(out, gt) * loss_weight[i]]
     return sum(loss).item(), outs[-1]
Пример #30
0
    def __init__(self, height=7, feature_size=512, use_eql=True):
        """
        constructor for the class
        :param height: total height of the discriminator (Must be equal to the Generator depth)
        :param feature_size: size of the deepest features extracted
                             (Must be equal to Generator latent_size)
        :param use_eql: whether to use equalized learning rate
        """
        from torch.nn import ModuleList, AvgPool2d
        from pro_gan_pytorch.CustomLayers import DisGeneralConvBlock, DisFinalBlock

        super(Discriminator, self).__init__()

        assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \
            "latent size not a power of 2"
        if height >= 4:
            assert feature_size >= np.power(2, height - 4), "feature size cannot be produced"

        # create state of the object
        self.use_eql = use_eql
        self.height = height
        self.feature_size = feature_size

        self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql)

        # create a module list of the other required general convolution blocks
        self.layers = ModuleList([])  # initialize to empty list

        # create the fromRGB layers for various inputs:
        if self.use_eql:
            from pro_gan_pytorch.CustomLayers import _equalized_conv2d
            self.fromRGB = lambda out_channels: \
                _equalized_conv2d(3, out_channels, (1, 1), bias=True)
        else:
            from torch.nn import Conv2d
            self.fromRGB = lambda out_channels: Conv2d(3, out_channels, (1, 1), bias=True)

        self.rgb_to_features = ModuleList([self.fromRGB(self.feature_size)])

        # create the remaining layers
        for i in range(self.height - 1):
            if i > 2:
                layer = DisGeneralConvBlock(
                    int(self.feature_size // np.power(2, i - 2)),
                    int(self.feature_size // np.power(2, i - 3)),
                    use_eql=self.use_eql
                )
                rgb = self.fromRGB(int(self.feature_size // np.power(2, i - 2)))
            else:
                layer = DisGeneralConvBlock(self.feature_size,
                                            self.feature_size, use_eql=self.use_eql)
                rgb = self.fromRGB(self.feature_size)

            self.layers.append(layer)
            self.rgb_to_features.append(rgb)

        # register the temporary downSampler
        self.temporaryDownsampler = AvgPool2d(2)