Пример #1
0
    def __init__(self):
        super(ImgEncoderFg, self).__init__()

        assert arch.G in [4, 8, 16]
        # Adjust stride such that the output dimension of the volume matches (G, G, ...)
        last_stride = 2 if arch.G in [8, 4] else 1
        second_to_last_stride = 2 if arch.G in [4] else 1

        # Foreground Image Encoder in the paper
        # Encoder: (B, C, Himg, Wimg) -> (B, E, G, G)
        # G is H=W in the paper
        self.enc = nn.Sequential(
            nn.Conv2d(3, 16, 4, 2, 1), nn.CELU(), nn.GroupNorm(4, 16),
            nn.Conv2d(16, 32, 4, 2, 1), nn.CELU(), nn.GroupNorm(8, 32),
            nn.Conv2d(32, 64, 4, 2, 1), nn.CELU(), nn.GroupNorm(8, 64),
            nn.Conv2d(64, 128, 3, second_to_last_stride, 1), nn.CELU(),
            nn.GroupNorm(16, 128), nn.Conv2d(128, 256, 3, last_stride, 1),
            nn.CELU(), nn.GroupNorm(32, 256),
            nn.Conv2d(256, arch.img_enc_dim_fg, 1), nn.CELU(),
            nn.GroupNorm(16, arch.img_enc_dim_fg))

        # Residual Connection in the paper
        # Remark: this residual connection is not important
        # Lateral connection (B, E, G, G) -> (B, E, G, G)
        self.enc_lat = nn.Sequential(
            nn.Conv2d(arch.img_enc_dim_fg, arch.img_enc_dim_fg, 3, 1, 1),
            nn.CELU(), nn.GroupNorm(16, arch.img_enc_dim_fg),
            nn.Conv2d(arch.img_enc_dim_fg, arch.img_enc_dim_fg, 3, 1, 1),
            nn.CELU(), nn.GroupNorm(16, arch.img_enc_dim_fg))

        # Residual Encoder in the paper
        # Remark: also not important
        # enc + lateral -> enc (B, 2*E, G, G) -> (B, 128, G, G)
        self.enc_cat = nn.Sequential(
            nn.Conv2d(arch.img_enc_dim_fg * 2, 128, 3, 1, 1), nn.CELU(),
            nn.GroupNorm(16, 128))

        # Image encoding -> latent distribution parameters (B, 128, G, G) -> (B, D, G, G)
        self.z_scale_net = nn.Conv2d(128, (arch.z_where_scale_dim) * 2, 1)
        self.z_shift_net = nn.Conv2d(128, (arch.z_where_shift_dim) * 2, 1)
        self.z_pres_net = nn.Conv2d(128, arch.z_pres_dim, 1)
        self.z_depth_net = nn.Conv2d(128, arch.z_depth_dim * 2, 1)

        # (G, G). Grid center offset. (offset_x[i, j], offset_y[i, j]) is the center for cell (i, j)
        offset_y, offset_x = torch.meshgrid(
            [torch.arange(arch.G), torch.arange(arch.G)])

        # (2, G, G). I do this just to ensure that device is correct.
        self.register_buffer('offset',
                             torch.stack((offset_x, offset_y), dim=0).float())
Пример #2
0
 def __init__(self, c_in, c_out, kernel_size, stride, padding, dilation, affine=True):
     super(DilConv, self).__init__()
     self.op = nn.Sequential(
         nn.CELU(0.075),
         nn.Conv2d(c_in, c_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation,
                   groups=c_in, bias=False),
         nn.Conv2d(c_in, c_out, kernel_size=1, padding=0, bias=False),
         nn.BatchNorm2d(c_out, affine=affine),
     )
Пример #3
0
 def __init__(self, inp_chnl, out_chnl):
     super(ConvCEluGrNorm, self).__init__()
     self.conv = nn.Conv2d(in_channels=inp_chnl,
                           out_channels=out_chnl,
                           kernel_size=3,
                           padding=1,
                           bias=False)
     self.norm = nn.GroupNorm(num_groups=16, num_channels=out_chnl)
     self.celu = nn.CELU(inplace=True)
Пример #4
0
    def __init__(self,
                 input_shape,
                 num_layers,
                 neurons,
                 activator_id=5,
                 optimizer_id=0):
        super().__init__()
        self.num_layers = num_layers  # number of layers
        self.neurons = neurons  # number of neurons in each layer e.g. for 2 layers, neurons=[10,20]
        self.activator_id = activator_id  # activation function, can be one of the following: ELU, Hardshrink, LeakyReLU, LogSigmoid, PReLU, ReLU, ReLU6, RReLU, SELU, CELU, Sigmoid
        self.optimizer_id = optimizer_id  # optimizer id, can be one of the following: Adadelta, Adagrad, Adam, Adamax, ASGD, RMSprop, Rprop, SGD

        # set activation function
        if (activator_id == 0):
            self.activator = nn.ELU()
        elif (activator_id == 1):
            self.activator = nn.Hardshrink()
        elif (activator_id == 2):
            self.activator = nn.LeakyReLU()
        elif (activator_id == 3):
            self.activator = nn.LogSigmoid()
        elif (activator_id == 4):
            self.activator = nn.PReLU()
        elif (activator_id == 5):
            self.activator = nn.ReLU()
        elif (activator_id == 6):
            self.activator = nn.ReLU6()
        elif (activator_id == 7):
            self.activator = nn.RReLU()
        elif (activator_id == 8):
            self.activator = nn.SELU()
        elif (activator_id == 9):
            self.activator = nn.CELU()

        # network architecture
        if (num_layers == 1):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], 1))
        elif (num_layers == 2):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], self.neurons[1]), self.activator,
                nn.Linear(self.neurons[1], 1))
        elif (num_layers == 3):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], self.neurons[1]), self.activator,
                nn.Linear(self.neurons[1], self.neurons[2]), self.activator,
                nn.Linear(self.neurons[2], 1))
        elif (num_layers == 4):
            self.layers = nn.Sequential(
                nn.Linear(input_shape, self.neurons[0]), self.activator,
                nn.Linear(self.neurons[0], self.neurons[1]), self.activator,
                nn.Linear(self.neurons[1], self.neurons[2]), self.activator,
                nn.Linear(self.neurons[2], self.neurons[3]), self.activator,
                nn.Linear(self.neurons[3], 1))
Пример #5
0
 def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, padding=None):
     if padding is None:
         padding = (kernel_size - 1) // 2
     super(ConvBNCELU, self).__init__(
         nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups,
                   bias=False),
         nn.BatchNorm2d(out_planes),
         nn.CELU(inplace=True),
     )
Пример #6
0
 def __init__(self, dim, low_dim=16, kernel_size=3, padding=1, stride=1):
     super(Low_ResBlock, self).__init__()
     self.conv = nn.Sequential(
         nn.Conv2d(dim, low_dim, kernel_size=1, stride=1, padding=0),
         nn.BatchNorm2d(low_dim),
         nn.CELU(),
         nn.Conv2d(low_dim,
                   low_dim,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   groups=low_dim),
         nn.BatchNorm2d(low_dim),
         nn.CELU(),
         nn.Conv2d(low_dim, dim, kernel_size=1, stride=1, padding=0),
         nn.BatchNorm2d(dim),
         nn.CELU(),
     )
    def __init__(self,
                 in_planes,
                 planes,
                 stride=1,
                 activation='ReLU',
                 softplus_beta=1):
        super(PreActBlock, self).__init__()
        self.bn1 = normal_func(in_planes,
                               track_running_stats=track_running_stats,
                               affine=affine)
        self.conv1 = nn.Conv2d(in_planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn2 = normal_func(planes,
                               track_running_stats=track_running_stats,
                               affine=affine)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)

        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          self.expansion * planes,
                          kernel_size=1,
                          stride=stride,
                          bias=False))
        if activation == 'ReLU':
            self.relu = nn.ReLU(inplace=True)
            print('ReLU')
        elif activation == 'Softplus':
            self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
            print('Softplus')
        elif activation == 'GELU':
            self.relu = nn.GELU()
            print('GELU')
        elif activation == 'ELU':
            self.relu = nn.ELU(alpha=1.0, inplace=True)
            print('ELU')
        elif activation == 'LeakyReLU':
            self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
            print('LeakyReLU')
        elif activation == 'SELU':
            self.relu = nn.SELU(inplace=True)
            print('SELU')
        elif activation == 'CELU':
            self.relu = nn.CELU(alpha=1.2, inplace=True)
            print('CELU')
        elif activation == 'Tanh':
            self.relu = nn.Tanh()
            print('Tanh')
Пример #8
0
 def function(self):
     if self.value == 0:
         return nn.ReLU()
     elif self.value == 1:
         return nn.GELU()
     elif self.value == 2:
         return nn.CELU()
     else:
         raise ValueError("Invalid activation function type.")
Пример #9
0
    def __init__(self,
                 atom_vertex_dim,
                 atom_edge_dim,
                 orbital_vertex_dim=NotImplemented,
                 orbital_edge_dim=NotImplemented,
                 output_dim=NotImplemented,
                 mp_step=6,
                 s2s_step=6):
        super(MultiNet, self).__init__()
        self.atom_vertex_dim = atom_vertex_dim
        self.atom_edge_dim = atom_edge_dim
        self.orbital_vertex_dim = orbital_vertex_dim
        self.orbital_edge_dim = orbital_edge_dim
        self.output_dim = output_dim
        self.mp_step = mp_step
        self.s2s_step = s2s_step

        # atom net
        atom_edge_gc = nn.Sequential(nn.Linear(atom_edge_dim[1], atom_vertex_dim[1] ** 2), nn.Dropout(0.2))

        self.atom_vertex_conv = NNConv(atom_vertex_dim[1], atom_vertex_dim[1], atom_edge_gc, aggr="mean", root_weight=True)
        self.atom_vertex_gru = nn.GRU(atom_vertex_dim[1], atom_vertex_dim[1])

        self.atom_s2s = Set2Set(atom_vertex_dim[1], processing_steps=s2s_step)
        self.atom_lin0 = nn.Sequential(nn.Linear(atom_vertex_dim[0], 2 * atom_vertex_dim[0]), nn.CELU(),
                                       nn.Linear(2 * atom_vertex_dim[0], atom_vertex_dim[1]), nn.CELU())
        self.atom_lin1 = nn.Sequential(nn.Linear(atom_edge_dim[0], 2 * atom_edge_dim[0]), nn.CELU(),
                                       nn.Linear(2 * atom_edge_dim[0], atom_edge_dim[1]), nn.CELU())
        self.atom_lin2 = nn.Sequential(nn.Linear(2 * atom_vertex_dim[1], 4 * atom_vertex_dim[1]), nn.CELU())

        # orbital net
        orbital_edge_gc = nn.Sequential(nn.Linear(orbital_edge_dim[1], orbital_vertex_dim[1] ** 2), nn.Dropout(0.2))

        self.orbital_vertex_conv = NNConv(orbital_vertex_dim[1], orbital_vertex_dim[1], orbital_edge_gc, aggr="mean", root_weight=True)
        self.orbital_vertex_gru = nn.GRU(orbital_vertex_dim[1], orbital_vertex_dim[1])

        self.orbital_s2s = Set2Set(orbital_vertex_dim[1], processing_steps=s2s_step)
        self.orbital_lin0 = nn.Sequential(nn.Linear(orbital_vertex_dim[0], 2 * orbital_vertex_dim[0]), nn.CELU(),
                                          nn.Linear(2 * orbital_vertex_dim[0], orbital_vertex_dim[1]), nn.CELU())
        self.orbital_lin1 = nn.Sequential(nn.Linear(orbital_edge_dim[0], 2 * orbital_edge_dim[0]), nn.CELU(),
                                          nn.Linear(2 * orbital_edge_dim[0], orbital_edge_dim[1]), nn.CELU())
        self.orbital_lin2 = nn.Sequential(nn.Linear(2 * orbital_vertex_dim[1], 4 * orbital_vertex_dim[1]), nn.CELU())

        # cross net
        self.cross_lin0 = nn.Sequential(
            nn.Linear(4 * atom_vertex_dim[1] + 4 * orbital_vertex_dim[1], 4 * output_dim),
            nn.CELU(),
            nn.Linear(4 * output_dim, output_dim)
        )
        self.cross_o2a_lin = nn.Sequential(nn.Linear(orbital_vertex_dim[1], 2 * orbital_vertex_dim[1]), nn.CELU(),
                                           nn.Linear(2 * orbital_vertex_dim[1], int(atom_vertex_dim[1] / 2)), nn.CELU())
        self.cross_o2a_s2s = Set2Set(int(atom_vertex_dim[1] / 2), processing_steps=s2s_step)
        self.cross_o2a_gru = nn.GRU(atom_vertex_dim[1], atom_vertex_dim[1])
        self.cross_a2o_lin = nn.Sequential(nn.Linear(atom_vertex_dim[1], 2 * atom_vertex_dim[1]), nn.CELU(),
                                           nn.Linear(2 * atom_vertex_dim[1], orbital_vertex_dim[1]), nn.CELU())
        self.cross_a2o_gru = nn.GRU(orbital_vertex_dim[1], orbital_vertex_dim[1])
Пример #10
0
    def __init__(self):
        nn.Module.__init__(self)
        embed_size = ARCH.GLIMPSE_SIZE // 16
        self.enc = nn.Sequential(
            nn.Conv2d(3, 16, 3, 2, 1),
            nn.CELU(),
            nn.GroupNorm(1, 16),
            nn.Conv2d(16, 32, 3, 2, 1),
            nn.CELU(),
            nn.GroupNorm(2, 32),
            nn.Conv2d(32, 64, 3, 2, 1),
            nn.CELU(),
            nn.GroupNorm(4, 64),
            nn.Conv2d(64, 128, 3, 2, 1),
            nn.CELU(),
            nn.GroupNorm(8, 128),
        )

        self.enc_what = nn.Linear(128 * embed_size**2, ARCH.BG_PROPOSAL_DIM)
Пример #11
0
def conv_bn_relu(c_in, c_out, kernel_size=(3, 3), padding=(1, 1)):
    return nn.Sequential(
        nn.Conv2d(c_in,
                  c_out,
                  kernel_size=kernel_size,
                  padding=padding,
                  bias=False),
        GhostBatchNorm(c_out, num_splits=16),
        nn.CELU(alpha=0.3),
    )
Пример #12
0
def get_act(act="relu"):
    if act == "relu": return nn.ReLU()
    elif act == "elu": return nn.ELU()
    elif act == "celu": return nn.CELU()
    elif act == "leaky_relu": return nn.LeakyReLU()
    elif act == "sigmoid": return nn.Sigmoid()
    elif act == "tanh": return nn.Tanh()
    elif act == "linear": return nn.Identity()
    elif act == 'softplus': return nn.modules.activation.Softplus()
    else: return None
Пример #13
0
 def __init__(self, dimensions):
     super(sdae, self).__init__()
     self.dimensions = dimensions
     encoder_units = build_units(dimensions)
     self.encoder = nn.Sequential(*encoder_units)
     decoder_units = build_units(list(reversed(dimensions)))
     decoder_units[-1].add_module('activation', nn.CELU())
     # unit = [('activation', nn.Softplus())]
     # decoder_units.append(nn.Sequential(OrderedDict(unit)))
     self.decoder = nn.Sequential(*decoder_units)
Пример #14
0
def str2act(act):
    activations = {
        'relu': nn.ReLU(),
        'selu': nn.SELU(),
        'celu': nn.CELU(),
        'softplus': nn.Softplus(),
        'softmax': nn.Softmax(),
        'sigmoid': nn.Sigmoid()
    }
    return activations[act]
Пример #15
0
    def __init__(self, inplanes, planes, stride=1, norm_layer=None):
        super().__init__()

        if norm_layer is None: norm_layer = nn.BatchNorm2d
        self.stride = stride

        self.conv1 = conv3x3(inplanes, planes, stride=1)
        self.pool = nn.MaxPool2d(2)
        self.bn = norm_layer(planes)
        self.relu = nn.CELU(inplace=True, alpha=.075)
Пример #16
0
 def block(input_dim, output_dim):
     return nn.Sequential(
         nn.ConvTranspose2d(input_dim,
                            output_dim,
                            5,
                            stride=2,
                            padding=2,
                            output_padding=1,
                            bias=False), nn.BatchNorm2d(output_dim),
         nn.CELU())
Пример #17
0
    def __init__(self, in_channel, out_channel, kernel_size, ratio, groups=1):
        super(resblock, self).__init__()
        self.layers = nn.Sequential(
            # nn.GroupNorm(num_groups=groups, num_channels=in_channel),
            nn.CELU(),
            nn.Conv1d(in_channel,
                      out_channel,
                      kernel_size,
                      padding=(kernel_size - 1) // 2,
                      groups=groups),
            # nn.GroupNorm(num_groups=groups, num_channels=out_channel),
            nn.CELU(),
            nn.Conv1d(out_channel,
                      out_channel,
                      kernel_size,
                      padding=(kernel_size - 1) // 2,
                      groups=groups))

        self.ratio = ratio
Пример #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 ks=4,
                 stride=2,
                 padding=1,
                 dilation=1,
                 bn_enable=False,
                 upsample=False,
                 act_enable=True,
                 act_type="relu"):
        super(DecoderBlockM1, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.ks = ks
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.bn_enable = bn_enable
        self.upsample = upsample
        self.act_enable = act_enable
        self.act_type = act_type

        assert self.act_type in [
            "relu", "celu", "fts+"
        ], "Error. Unknown activation function: {}".format(self.act_type)

        self.deconv = nn.Upsample(
            scale_factor=2,
            mode='bilinear') if self.upsample else nn.ConvTranspose2d(
                in_channels=self.in_channels,
                out_channels=self.out_channels,
                kernel_size=self.ks,
                stride=self.stride,
                padding=self.padding,
                dilation=self.dilation)

        if self.upsample:
            self.conv = nn.Conv2d(in_channels=self.in_channels,
                                  out_channels=self.out_channels,
                                  kernel_size=1)

        if self.bn_enable:
            self.bn = nn.BatchNorm2d(self.out_channels)

        if self.act_enable:
            if self.act_type == "relu":
                self.act = nn.ReLU(inplace=True)
            elif self.act_type == "celu":
                self.act = nn.CELU(inplace=True)
            elif self.act_type == "fts+":
                self.act = FTSwishPlus()
            else:
                raise ValueError("Unknown value: {}".format(self.act_type))
Пример #19
0
def get_act(name: str,
            inplace: bool = True,
            params: Optional[dict] = None) -> activation:
    """
    get activation module from pytorch
    must be one of: relu, lrelu, linear, tanh, sigmoid

    Args:
        name (str): name of activation function desired
        inplace (bool): flag activation to do operations in-place (if option available)
        params (dict): dictionary of parameters (as per pytorch documentation)

    Returns:
        act (activation): instance of activation class
    """
    if name.lower() == 'relu':
        act = nn.ReLU(inplace=inplace)
    elif name.lower() == 'lrelu':
        act = nn.LeakyReLU(
            inplace=inplace) if params is None else nn.LeakyReLU(
                inplace=inplace, **params)
    elif name.lower() == 'prelu':
        act = nn.PReLU() if params is None else nn.PReLU(**params)
    elif name.lower() == 'elu':
        act = nn.ELU(inplace=inplace) if params is None else nn.ELU(
            inplace=inplace, **params)
    elif name.lower() == 'celu':
        act = nn.CELU(inplace=inplace) if params is None else nn.CELU(
            inplace=inplace, **params)
    elif name.lower() == 'selu':
        act = nn.SELU(inplace=inplace)
    elif name.lower() == 'linear':
        act = nn.LeakyReLU(1, inplace=inplace)  # hack to get linear output
    elif name.lower() == 'tanh':
        act = nn.Tanh()
    elif name.lower() == 'sigmoid':
        act = nn.Sigmoid()
    else:
        raise SynthNNError(
            f'Activation: "{name}" not a valid activation function or not supported.'
        )
    return act
Пример #20
0
 def __init__(self, in_plane, out_plane):
     super(HourGlass_upBlock, self).__init__()
     self.conv = nn.Sequential(
         nn.ConvTranspose2d(in_plane,
                            out_plane,
                            kernel_size=4,
                            stride=2,
                            padding=1),
         nn.BatchNorm2d(out_plane),
         nn.CELU(inplace=True),
     )
Пример #21
0
    def __init__(self, config):
        """
        in_channels      : 一般就是 word embedding 的维度,或者 hidden size 的维度
        out_channels     : int
        kernel_sizes     : list 为了保证输出长度=输入长度,必须为奇数: 3, 5, 7...
        activation       : [relu, lrelu, prelu, selu, celu, gelu, sigmoid, tanh]
        pooling_strategy : [max, avg, cls]
        dropout:         : float
        """
        super(CNN, self).__init__()

        # self.xxx = config.xxx
        self.in_channels = config.in_channels
        self.out_channels = config.out_channels
        self.kernel_sizes = config.kernel_sizes
        self.activation = config.activation
        self.pooling_strategy = config.pooling_strategy
        self.dropout = config.dropout
        self.keep_length = config.keep_length
        for kernel_size in self.kernel_sizes:
            assert kernel_size % 2 == 1, "kernel size has to be odd numbers."

        # convolution
        self.convs = nn.ModuleList([
            nn.Conv1d(in_channels=self.in_channels,
                      out_channels=self.out_channels,
                      kernel_size=k,
                      stride=1,
                      padding=k // 2 if self.keep_length else 0,
                      dilation=1,
                      groups=1,
                      bias=False) for k in self.kernel_sizes
        ])

        # activation function
        assert self.activation in ['relu', 'lrelu', 'prelu', 'selu', 'celu', 'gelu', 'sigmoid', 'tanh'], \
            'activation function must choose from [relu, lrelu, prelu, selu, celu, gelu, sigmoid, tanh]'
        self.activations = nn.ModuleDict([
            ['relu', nn.ReLU()],
            ['lrelu', nn.LeakyReLU()],
            ['prelu', nn.PReLU()],
            ['selu', nn.SELU()],
            ['celu', nn.CELU()],
            ['gelu', GELU()],
            ['sigmoid', nn.Sigmoid()],
            ['tanh', nn.Tanh()],
        ])

        # pooling
        assert self.pooling_strategy in [
            'max', 'avg', 'cls'
        ], 'pooling strategy must choose from [max, avg, cls]'

        self.dropout = nn.Dropout(self.dropout)
    def __init__(self, args):
        self.args = args
        super(AttEncoder, self).__init__()

        self.temporal_img_conv_net = nn.Sequential(
            nn.Conv2d(img_encode_dim, temporal_img_enc_hid_dim, 1), nn.CELU(),
            nn.GroupNorm(8, temporal_img_enc_hid_dim))

        self.temporal_img_enc_net = nn.Linear(
            temporal_img_enc_hid_dim * self.args.num_cell_h // 2 *
            self.args.num_cell_w // 2, temporal_img_enc_dim)
Пример #23
0
    def __init__(self, config, device='cpu'):
        super(DSSMFour, self).__init__()

        # self.device = device
        # # 此部分的信息有待处理
        # self.latent_out = config.latent_out_1
        # self.hidden_size = config.hidden_size
        # self.kernel_out = config.kernel_out_1
        # self.kernel_size = config.kernel_size
        # self.max_len = config.max_len
        # self.kmax = config.kmax
        #
        # self.embeddings = nn.Embedding(config.vocab_size, self.hidden_size)
        # # layers for query
        # self.query_conv = nn.Conv1d(self.hidden_size, self.kernel_out, self.kernel_size)  # 16* 64 * 1
        # self.pool_1 = nn.MaxPool1d(2)
        # self.query_conv_2 = nn.Conv1d(16, 32, self.kernel_size)
        # self.query_sem = nn.Linear(self.max_len, self.latent_out)  ## config.latent_out_1  需要输出的语义维度中间
        #
        # # learning gamma
        # if config.loss == 'bce':
        #     self.learn_gamma = nn.Conv1d(self.latent_out * 2, 1, 1)
        # else:
        #     self.learn_gamma = nn.Linear(2 * self.latent_out, 2)
        # # self.soft = nn.Softmax(dim=1)
        #
        # self.norm = nn.BatchNorm1d(2 * self.latent_out)

        # 此部分的信息有待处理
        self.latent_out = config.latent_out_1
        self.hidden_size = config.hidden_size
        self.kernel_out = config.kernel_out_1
        self.kernel_size = config.kernel_size
        self.max_len = config.max_len

        self.embeddings = nn.Embedding(config.vocab_size, self.hidden_size)

        self.convs = nn.Sequential(
            nn.Conv1d(in_channels=self.hidden_size,
                      out_channels=self.kernel_out,
                      kernel_size=self.kernel_size), nn.LeakyReLU(),
            nn.BatchNorm1d(self.kernel_out),
            nn.Conv1d(in_channels=self.kernel_out,
                      out_channels=16,
                      kernel_size=1), nn.LeakyReLU(), nn.MaxPool1d(2),
            nn.BatchNorm1d(16))
        #                              nn.BatchNorm1d(num_features=config.feature_size),
        # nn.ReLU(),
        # nn.MaxPool1d(kernel_size=self.max_len + 1))]))

        self.learn_gamma = nn.Conv1d(32, 16, 32)
        self.lrelu = nn.CELU()
        self.norm = nn.BatchNorm1d(16)
        self.fc = nn.Linear(in_features=16, out_features=2)
Пример #24
0
    def __init__(self):
        super(ZWhatEnc, self).__init__()

        if glimpse_size == 32:
            self.enc_cnn = nn.Sequential(
                nn.Conv2d(3, 16, 4, 2, 1),
                nn.CELU(),
                nn.GroupNorm(4, 16),
                nn.Conv2d(16, 32, 4, 2, 1),
                nn.CELU(),
                nn.GroupNorm(8, 32),
                nn.Conv2d(32, 64, 4, 2, 1),
                nn.CELU(),
                nn.GroupNorm(8, 64),
                nn.Conv2d(64, 64, 3, 1, 1),
                nn.CELU(),
                nn.GroupNorm(8, 64),
                nn.Conv2d(64, z_what_enc_dim, 4),
                nn.CELU(),
                nn.GroupNorm(16, z_what_enc_dim)
            )
        elif glimpse_size == 64:
            self.enc_cnn = nn.Sequential(
                nn.Conv2d(3, 16, 4, 2, 1),
                nn.CELU(),
                nn.GroupNorm(4, 16),
                nn.Conv2d(16, 32, 4, 2, 1),
                nn.CELU(),
                nn.GroupNorm(8, 32),
                nn.Conv2d(32, 64, 4, 2, 1),
                nn.CELU(),
                nn.GroupNorm(8, 64),
                nn.Conv2d(64, 128, 4, 2, 1),
                nn.CELU(),
                nn.GroupNorm(16, 128),
                nn.Conv2d(128, 128, 4),
                nn.CELU(),
                nn.GroupNorm(16, 128),
            )

        self.enc_what = nn.Linear(128, z_what_dim * 2)
Пример #25
0
    def __init__(self, z_size=16):
        super(encoder, self).__init__()
        self.contract_layers = nn.Sequential(
            nn.Conv2d(4, 32, 3, stride=2, bias=False),
            nn.CELU(),
            # nn.BatchNorm2d(32),
            nn.Conv2d(32, 32, 3, stride=2, bias=False),
            nn.CELU(),
            # nn.BatchNorm2d(32),
            nn.Conv2d(32, 64, 3, stride=2, bias=False),
            nn.CELU(),
            # nn.BatchNorm2d(64),
            nn.Conv2d(64, 64, 3, stride=2, bias=False),
            nn.CELU(),
            # nn.BatchNorm2d(64)
        )

        self.linear1 = nn.Linear(64 * 7 * 7, 256)
        self.linear2_logvar = nn.Linear(256, z_size)
        self.linea2_mu = nn.Linear(256, z_size)
        self.relu = nn.ReLU()
Пример #26
0
    def __init__(self,
                 input_size,
                 output_size,
                 use_multiply=True,
                 linear_block=nn.Linear):
        super().__init__()

        self._activation = nn.CELU()
        self._linear = linear_block(input_size, output_size)
        self._use_multiply = use_multiply
        if self._use_multiply:
            self._to_multiplier = linear_block(output_size, output_size)
Пример #27
0
    def __init__(self, in_out_nz, mapper_inter_nz, mapper_inter_layer):
        super(Mapping, self).__init__()
        linear = nn.ModuleList()
        linear.append(nn.BatchNorm1d(in_out_nz))
        if mapper_inter_layer >= 2:
            linear.append(
                nn.Linear(in_features=in_out_nz, out_features=mapper_inter_nz))
            linear.append(nn.BatchNorm1d(mapper_inter_nz))
            linear.append(nn.CELU())
            for i in range(mapper_inter_layer - 2):
                linear.append(
                    nn.Linear(in_features=mapper_inter_nz,
                              out_features=mapper_inter_nz))
                linear.append(nn.CELU())

            linear.append(
                nn.Linear(in_features=mapper_inter_nz, out_features=in_out_nz))
        else:
            linear.append(
                nn.Linear(in_features=in_out_nz, out_features=in_out_nz))
        self.linear = linear
Пример #28
0
    def __init__(self):
        super(ZWhatEnc, self).__init__()

        self.enc_cnn = nn.Sequential(
            nn.Conv2d(3, 16, 3, 1, 1),
            nn.CELU(),
            nn.GroupNorm(4, 16),
            nn.Conv2d(16, 32, 4, 2, 1),
            nn.CELU(),
            nn.GroupNorm(8, 32),
            nn.Conv2d(32, 32, 3, 1, 1),
            nn.CELU(),
            nn.GroupNorm(4, 32),
            nn.Conv2d(32, 64, 4, 2, 1),
            nn.CELU(),
            nn.GroupNorm(8, 64),
            nn.Conv2d(64, 128, 4, 2, 1),
            nn.CELU(),
            nn.GroupNorm(8, 128),
            nn.Conv2d(128, 256, 4),
            nn.CELU(),
            nn.GroupNorm(16, 256),
        )

        self.enc_what = nn.Linear(256, arch.z_what_dim * 2)
Пример #29
0
 def __init__(self, cfg):
     super(Deeplabv3plus, self).__init__()
     self.backbone = resnet50(pretrained=True, os=cfg.OUTPUT_STRIDE)
     in_plane = 2048
     self.aspp = ASPP(in_plane, cfg.ASPP_OUTDIM)
     self.dropout = nn.Dropout(0.5)
     self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=4)
     self.upsample_aspp = nn.UpsamplingBilinear2d(
         scale_factor=cfg.OUTPUT_STRIDE // 4)
     in_dim = 256
     self.shortconv = nn.Sequential(
         nn.Conv2d(in_dim, cfg.SHORTCUT_DIM, 1, 1, 0, bias=False),
         nn.BatchNorm2d(cfg.SHORTCUT_DIM), nn.ReLU(inplace=True))
     self.cat_conv = nn.Sequential(
         nn.Conv2d(cfg.ASPP_OUTDIM + cfg.SHORTCUT_DIM,
                   cfg.ASPP_OUTDIM,
                   kernel_size=3,
                   stride=1,
                   padding=2,
                   dilation=2,
                   bias=False), nn.BatchNorm2d(cfg.ASPP_OUTDIM),
         nn.CELU(alpha=0.075, inplace=False), nn.Dropout(0.5),
         nn.Conv2d(cfg.ASPP_OUTDIM, cfg.ASPP_OUTDIM, 3, 1, 1, bias=False),
         nn.BatchNorm2d(cfg.ASPP_OUTDIM), nn.CELU(alpha=0.075,
                                                  inplace=False),
         nn.Dropout(0.1))
     self.clt_conv = nn.Conv2d(cfg.ASPP_OUTDIM,
                               cfg.NUM_CLASS,
                               1,
                               1,
                               0,
                               bias=False)
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             nn.init.kaiming_normal_(m.weight,
                                     mode='fan_out',
                                     nonlinearity='relu')
         if isinstance(m, nn.BatchNorm2d):
             nn.init.constant_(m.weight, 1)
             nn.init.constant_(m.bias, 0)
    def __init__(self):
        super(Anti_spoof_net_CNN, self).__init__()

        self.resize_32 = nn.Upsample(size=32, mode='nearest')
        self.resize_64 = nn.Upsample(size=64, mode='nearest')

        self.cnn0 = nn.Conv2d(in_channels=3,
                              out_channels=64,
                              kernel_size=3,
                              stride=1,
                              padding=1)
        nn.init.xavier_normal(self.cnn0.weight)
        self.bn0 = nn.BatchNorm2d(64)
        self.non_linearity0 = nn.CELU(alpha=1.0, inplace=False)

        self.block1 = block(True)
        self.block2 = block(False)
        self.block3 = block(False)

        #Feature map:
        self.cnn4 = nn.Conv2d(in_channels=384,
                              out_channels=128,
                              kernel_size=3,
                              stride=1,
                              padding=1)
        self.cnn5 = nn.Conv2d(in_channels=128,
                              out_channels=3,
                              kernel_size=3,
                              stride=1,
                              padding=1)
        self.cnn6 = nn.Conv2d(in_channels=3,
                              out_channels=1,
                              kernel_size=3,
                              stride=1,
                              padding=1)

        #Depth map:
        self.cnn7 = nn.Conv2d(in_channels=384,
                              out_channels=128,
                              kernel_size=3,
                              stride=1,
                              padding=1)
        self.cnn8 = nn.Conv2d(in_channels=128,
                              out_channels=64,
                              kernel_size=3,
                              stride=1,
                              padding=1)
        self.cnn9 = nn.Conv2d(in_channels=64,
                              out_channels=1,
                              kernel_size=3,
                              stride=1,
                              padding=1)