Пример #1
0
 def __init__(self, opt):
     super(BaseEncoder, self).__init__()
     self.enc_channels = opt.enc_channels
     self.num_downsamples = opt.num_downsamples
     self.padding_layer = get_padding(opt.enc_padding)
     self.activ_layer = get_activ(opt.enc_activ)
     self.norm_layer = get_norm(opt.enc_norm)
     self.p_dropout = opt.enc_dropout
     self._build_layers(opt)
Пример #2
0
 def __init__(self, opt):
     super(BasePatchDiscriminator, self).__init__()
     self.dsc_channels = opt.dsc_channels
     self.dsc_layers = opt.dsc_layers
     self.dsc_scales = opt.dsc_scales
     self.norm_layer = get_norm(opt.dsc_norm)
     self.activ_layer = get_activ(opt.dsc_activ)
     self.padding_layer = get_padding(opt.dsc_padding)
     self.p_dropout = opt.dsc_dropout
     self.num_scales = len(self.dsc_scales)
     self._build_layers(opt)
 def __init__(self, in_channels, out_channels, num_blocks, num_layers, opt):
     super(ScalingResidualBlock, self).__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.num_blocks = num_blocks
     self.num_layers = num_layers
     self.num_channels = max(self.in_channels, self.out_channels)
     self.padding_layer = get_padding(opt.latent_padding)
     self.activ_layer = get_activ(opt.latent_activ)
     self.norm_layer = get_norm(opt.latent_norm)
     self.p_dropout = opt.latent_dropout
     self._build_layers(opt)
 def __init__(self,
              in_channels,
              out_channels,
              dropout=0.,
              activ='gelu',
              norm='instance',
              padding='reflection'):
     super().__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.norm_layer = get_norm(norm)
     self.activ_layer = get_activ(activ)
     self.padding_layer = get_padding(padding)
     self.p_dropout = dropout
     self._build_layers()
Пример #5
0
 def __init__(self, opt, out_channels, type='continous'):
     super(BaseDecoder, self).__init__()
     self.out_channels = out_channels
     self.enc_channels = opt.enc_channels
     self.dec_channels = opt.dec_channels
     self.num_upsamples = opt.num_downsamples
     # these are layer constructors, not implicit layers
     self.norm_layer = get_norm(opt.dec_norm)
     self.activ_layer = get_activ(opt.dec_activ)
     self.padding_layer = get_padding(opt.dec_padding)
     self.p_dropout = opt.dec_dropout
     if type == 'continous':
         self.activ_final = nn.Tanh()
     if type == 'probabilities':
         self.activ_final = nn.Sigmoid()
     self._build_layers(opt)
Пример #6
0
 def __init__(self, opt):
     super(AggregatedLargeDilationEncoder, self).__init__()
     self.enc_channels = opt.enc_channels
     self.num_downsamples = opt.num_downsamples
     self.dilations = opt.enc_dilations
     final_dilation = self.dilations[-1]
     for i in range(1, self.num_downsamples + 1):
         # for each downsample add an increasing dilation
         # why? no reason
         self.dilations = self.dilations + [final_dilation + i * 2]
     self.dil_channels = opt.dil_channels
     self.padding_layer = get_padding(opt.enc_padding)
     self.activ_layer = get_activ(opt.enc_activ)
     self.norm_layer = get_norm(opt.enc_norm)
     self.p_dropout = opt.enc_dropout
     self._build_layers(opt)
Пример #7
0
 def __init__(self, opt, out_channels, type='continous'):
     super(AggregatedLargeDilationDecoder, self).__init__()
     self.out_channels = out_channels
     self.enc_channels = opt.enc_channels
     self.dec_channels = opt.dec_channels
     self.num_upsamples = opt.num_downsamples
     self.dil_channels = opt.dil_channels
     self.dilations = opt.dec_dilations
     final_dilation = self.dilations[-1]
     for i in range(1, self.num_upsamples + 1):
         self.dilations = self.dilations + [final_dilation + i * 2]
     # these are layer constructors, not implicit layers
     self.norm_layer = get_norm(opt.dec_norm)
     self.activ_layer = get_activ(opt.dec_activ)
     if type == 'continous':
         self.activ_final = nn.Tanh()
     if type == 'probabilities':
         self.activ_final = nn.Sigmoid()
     self.padding_layer = get_padding(opt.dec_padding)
     self.p_dropout = opt.dec_dropout
     self._build_layers(opt)
 def __init__(self,
              in_channels,
              out_channels,
              dil_channels,
              dilations,
              dropout=0.,
              activ='gelu',
              norm='instance',
              padding='reflection',
              residual=False):
     super().__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.dil_channels = dil_channels
     self.dilations = dilations
     self.num_dilations = len(dilations)
     self.residual = residual
     self.norm_layer = get_norm(norm)
     self.activ_layer = get_activ(activ)
     self.padding_layer = get_padding(padding)
     self.p_dropout = dropout
     self._build_layers()