Beispiel #1
0
    def __init__(self, in_channels, nclass):
        super().__init__()
        self.nclass = nclass
        inter_channels = in_channels // 4
        self.inp = paddle.zeros(shape=(nclass, 300), dtype='float32')
        self.inp = paddle.create_parameter(
            shape=self.inp.shape,
            dtype=str(self.inp.numpy().dtype),
            default_initializer=paddle.nn.initializer.Assign(self.inp))
        self.inp.stop_gradient = True

        self.fc1 = nn.Sequential(nn.Linear(300, 128), nn.BatchNorm1D(128),
                                 nn.ReLU())
        self.fc2 = nn.Sequential(nn.Linear(128, 256), nn.BatchNorm1D(256),
                                 nn.ReLU())
        self.conv5 = layers.ConvBNReLU(in_channels,
                                       inter_channels,
                                       3,
                                       padding=1,
                                       bias_attr=False,
                                       stride=1)

        self.gloru = GlobalReasonUnit(in_channels=inter_channels,
                                      num_state=256,
                                      num_node=84,
                                      nclass=nclass)
        self.conv6 = nn.Sequential(nn.Dropout(0.1),
                                   nn.Conv2D(inter_channels, nclass, 1))
Beispiel #2
0
    def __init__(self,
                 num_layers,
                 emb_dim,
                 drop_ratio=0.5,
                 JK="last",
                 residual=False,
                 gnn_type='gin'):
        '''
            emb_dim (int): node embedding dimensionality
        '''

        super(GNN_node_Virtualnode, self).__init__()
        self.num_layers = num_layers
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        self.residual = residual

        if self.num_layers < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ### set the initial virtual node embedding to 0.
        #  self.virtualnode_embedding = paddle.nn.Embedding(1, emb_dim)
        self.virtualnode_embedding = self.create_parameter(
            shape=[1, emb_dim],
            dtype='float32',
            default_initializer=nn.initializer.Constant(value=0.0))

        ### List of GNNs
        self.convs = []
        ### batch norms applied to node embeddings
        self.batch_norms = []

        ### List of MLPs to transform virtual node at every layer
        self.mlp_virtualnode_list = []

        for layer in range(num_layers):
            if gnn_type == 'gin':
                self.convs.append(GINConv(emb_dim))
            elif gnn_type == 'gcn':
                self.convs.append(GCNConv(emb_dim))
            else:
                ValueError('Undefined GNN type called {}'.format(gnn_type))

            self.batch_norms.append(paddle.nn.BatchNorm1D(emb_dim))

        for layer in range(num_layers - 1):
            self.mlp_virtualnode_list.append(
                nn.Sequential(nn.Linear(emb_dim, emb_dim),
                              nn.BatchNorm1D(emb_dim), nn.ReLU(),
                              nn.Linear(emb_dim, emb_dim),
                              nn.BatchNorm1D(emb_dim), nn.ReLU()))

        self.pool = gnn.GraphPool(pool_type="sum")

        self.convs = nn.LayerList(self.convs)
        self.batch_norms = nn.LayerList(self.batch_norms)
        self.mlp_virtualnode_list = nn.LayerList(self.mlp_virtualnode_list)
Beispiel #3
0
def batch_norm_1d(num_channels):
    """tbd"""
    if dist.get_world_size() > 1:
        return nn.SyncBatchNorm.convert_sync_batchnorm(
            nn.BatchNorm1D(num_channels))
    else:
        return nn.BatchNorm1D(num_channels)
Beispiel #4
0
 def __init__(self,
              name_scope='PointNet2_SSG_Clas_',
              num_classes=16,
              normal_channel=False):
     super(PointNet2_SSG_Clas, self).__init__()
     in_channel = 6 if normal_channel else 3
     self.normal_channel = normal_channel
     self.sa1 = PointNetSetAbstraction(npoint=512,
                                       radius=0.2,
                                       nsample=32,
                                       in_channel=in_channel,
                                       mlp=[64, 64, 128],
                                       group_all=False)
     self.sa2 = PointNetSetAbstraction(npoint=128,
                                       radius=0.4,
                                       nsample=64,
                                       in_channel=128 + 3,
                                       mlp=[128, 128, 256],
                                       group_all=False)
     self.sa3 = PointNetSetAbstraction(npoint=None,
                                       radius=None,
                                       nsample=None,
                                       in_channel=256 + 3,
                                       mlp=[256, 512, 1024],
                                       group_all=True)
     self.fc1 = nn.Linear(1024, 512)
     self.bn1 = nn.BatchNorm1D(512)
     self.drop1 = nn.Dropout(0.4)
     self.fc2 = nn.Linear(512, 256)
     self.bn2 = nn.BatchNorm1D(256)
     self.drop2 = nn.Dropout(0.4)
     self.fc3 = nn.Linear(256, num_classes)
Beispiel #5
0
    def __init__(self,
                 vocab_size,
                 emb_dim=256,
                 hidden_size=512,
                 kernel_size=9,
                 n_layers=32,
                 padding_idx=0,
                 dropout_rate=0.1,
                 epsilon=1e-6):
        super(ResnetEncoderModel, self).__init__()

        self.hidden_size = hidden_size
        self.n_layers = n_layers

        self.token_embedding = nn.Embedding(vocab_size,
                                            emb_dim,
                                            padding_idx=padding_idx)
        max_pos_len = 3000
        self.pos_embedding = nn.Embedding(max_pos_len,
                                          emb_dim,
                                          padding_idx=padding_idx)

        self.padded_conv = nn.Sequential(
            nn.BatchNorm1D(emb_dim, data_format="NLC"),
            nn.ReLU(),
            nn.Conv1D(in_channels=emb_dim, out_channels=hidden_size, kernel_size=kernel_size, padding="same", \
                      data_format="NLC", weight_attr=nn.initializer.KaimingNormal()),
            nn.Dropout(p=dropout_rate)
        )

        self.residual_block_1 = nn.Sequential(
            nn.BatchNorm1D(hidden_size, data_format="NLC"),
            nn.ReLU(),
            nn.Conv1D(in_channels=hidden_size, out_channels=hidden_size, kernel_size=kernel_size, padding="same", \
                      data_format="NLC", weight_attr=nn.initializer.KaimingNormal()),
            nn.Dropout(p=dropout_rate),
            nn.BatchNorm1D(hidden_size, data_format="NLC"),
            nn.ReLU(),
            nn.Conv1D(in_channels=hidden_size, out_channels=hidden_size, kernel_size=kernel_size, padding="same", \
                      data_format="NLC", weight_attr=nn.initializer.KaimingNormal()),
            nn.Dropout(p=dropout_rate)
        )

        self.residual_block_n = nn.Sequential(
            nn.BatchNorm1D(hidden_size, data_format="NLC"),
            nn.ReLU(),
            nn.Conv1D(in_channels=hidden_size, out_channels=hidden_size, kernel_size=kernel_size, dilation=2, \
                      padding="same", data_format="NLC", weight_attr=nn.initializer.KaimingNormal()),
            nn.Dropout(p=dropout_rate),
            nn.BatchNorm1D(hidden_size, data_format="NLC"),
            nn.ReLU(),
            nn.Conv1D(in_channels=hidden_size, out_channels=hidden_size, kernel_size=kernel_size, dilation=2, \
                      padding="same", data_format="NLC", weight_attr=nn.initializer.KaimingNormal()),
            nn.Dropout(p=dropout_rate)
        )
Beispiel #6
0
    def __init__(self, label_list: list = None, load_checkpoint: str = None):
        super(SpinalNet_ResNet101, self).__init__()

        if label_list is not None:
            self.labels = label_list
            class_dim = len(self.labels)
        else:
            label_list = []
            label_file = os.path.join(self.directory, 'label_list.txt')
            files = open(label_file)
            for line in files.readlines():
                line = line.strip('\n')
                label_list.append(line)
            self.labels = label_list
            class_dim = len(self.labels)

        self.backbone = ResNet()

        half_in_size = round(2048 / 2)
        layer_width = 20

        self.half_in_size = half_in_size

        self.fc_spinal_layer1 = nn.Sequential(
            nn.Dropout(p=0.5), nn.Linear(half_in_size, layer_width),
            nn.BatchNorm1D(layer_width), nn.ReLU())
        self.fc_spinal_layer2 = nn.Sequential(
            nn.Dropout(p=0.5), nn.Linear(half_in_size + layer_width,
                                         layer_width),
            nn.BatchNorm1D(layer_width), nn.ReLU())
        self.fc_spinal_layer3 = nn.Sequential(
            nn.Dropout(p=0.5), nn.Linear(half_in_size + layer_width,
                                         layer_width),
            nn.BatchNorm1D(layer_width), nn.ReLU())
        self.fc_spinal_layer4 = nn.Sequential(
            nn.Dropout(p=0.5), nn.Linear(half_in_size + layer_width,
                                         layer_width),
            nn.BatchNorm1D(layer_width), nn.ReLU())
        self.fc_out = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(layer_width * 4, class_dim),
        )

        if load_checkpoint is not None:
            self.model_dict = paddle.load(load_checkpoint)[0]
            self.set_dict(self.model_dict)
            print("load custom checkpoint success")

        else:
            checkpoint = os.path.join(self.directory,
                                      'spinalnet_res101.pdparams')
            self.model_dict = paddle.load(checkpoint)
            self.set_dict(self.model_dict)
            print("load pretrained checkpoint success")
Beispiel #7
0
 def __init__(self, dim, conv_type="gin"):
     super(VNAgg, self).__init__()
     self.conv_type = conv_type
     if "gin" in conv_type:
         self.mlp = nn.Sequential(
             MLP(dim, dim), nn.BatchNorm1D(dim), nn.ReLU())
     elif "gcn" in conv_type:
         self.W0 = nn.Linear(dim, dim)
         self.W1 = nn.Linear(dim, dim)
         self.nl_bn = nn.Sequential(nn.BatchNorm1D(dim), nn.ReLU())
     else:
         raise NotImplementedError('Unrecognised model conv : {}'.format(
             conv_type))
Beispiel #8
0
 def __init__(self,
              name_scope='PointNet2_MSG_Seg_',
              num_classes=16,
              num_parts=50,
              normal_channel=False):
     super(PointNet2_MSG_Seg, self).__init__()
     if normal_channel:
         additional_channel = 3
     else:
         additional_channel = 0
     self.num_classes = num_classes
     self.normal_channel = normal_channel
     self.sa1 = PointNetSetAbstractionMsg(
         512, [0.1, 0.2, 0.4], [32, 64, 128], 3 + additional_channel,
         [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
     self.sa2 = PointNetSetAbstractionMsg(
         128, [0.4, 0.8], [64, 128], 128 + 128 + 64,
         [[128, 128, 256], [128, 196, 256]])
     self.sa3 = PointNetSetAbstraction(npoint=None,
                                       radius=None,
                                       nsample=None,
                                       in_channel=512 + 3,
                                       mlp=[256, 512, 1024],
                                       group_all=True)
     self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256])
     self.fp2 = PointNetFeaturePropagation(in_channel=576, mlp=[256, 128])
     self.fp1 = PointNetFeaturePropagation(in_channel=150 +
                                           additional_channel,
                                           mlp=[128, 128])
     self.conv1 = nn.Conv1D(128, 128, 1)
     self.bn1 = nn.BatchNorm1D(128)
     self.drop1 = nn.Dropout(0.5)
     self.conv2 = nn.Conv1D(128, num_parts, 1)
Beispiel #9
0
 def __init__(self,
              inp_dim,
              hidden_dim,
              num_layers,
              batch_norm=False,
              dropout=0.):
     super(MLP, self).__init__()
     layer_list = OrderedDict()
     in_dim = inp_dim
     for l in range(num_layers):
         layer_list['fc{}'.format(l)] = nn.Linear(in_dim, hidden_dim)
         if l < num_layers - 1:
             if batch_norm:
                 layer_list['norm{}'.format(l)] = nn.BatchNorm1D(
                     num_features=hidden_dim)
             layer_list['relu{}'.format(l)] = nn.LeakyReLU()
             if dropout > 0:
                 layer_list['drop{}'.format(l)] = nn.Dropout(p=dropout)
         in_dim = hidden_dim
     if num_layers > 0:
         self.network = nn.Sequential()
         for i in layer_list:
             self.network.add_sublayer(i, layer_list[i])
     else:
         self.network = nn.Identity()
Beispiel #10
0
    def __init__(self, config):
        super(GINEncoder, self).__init__()
        self.hidden_size = config['hidden_size']
        self.num_layers = config['num_layers']
        self.embed_dim = config['embed_dim']
        self.atom_type_num = config['atom_type_num']
        self.chirality_tag_num = config['chirality_tag_num']
        self.bond_type_num = config['bond_type_num']
        self.bond_direction_num = config['bond_direction_num']
        self.readout = config['readout']
        self.activation = config['activation']

        self.atom_names = config['atom_names']
        self.bond_names = config['bond_names']

        self.atom_embedding = AtomEmbedding(self.atom_names, self.embed_dim)
        self.gin_list = nn.LayerList()
        self.norm_list = nn.LayerList()

        for layer_id in range(self.num_layers):
            self.gin_list.append(
                pgl.nn.GINConv(self.embed_dim,
                               self.embed_dim,
                               activation=self.activation))
            self.norm_list.append(nn.BatchNorm1D(self.embed_dim))

        if self.readout == 'mean':
            self.graph_pool = MeanPool()
        else:
            self.graph_pool = pgl.nn.GraphPool(pool_type=self.readout)
Beispiel #11
0
    def __init__(self,
                 dim,
                 dropout=0.5,
                 activation=F.relu,
                 virtual_node=False,
                 virtual_node_agg=True,
                 k=4,
                 last_layer=False,
                 conv_type='gin',
                 edge_embedding=None):
        super().__init__()
        self.edge_embed = edge_embedding
        self.conv_type = conv_type
        if conv_type == 'gin+':
            self.conv = GINEPLUS(MLP(dim, dim), dim, k=k)
        elif conv_type == 'gcn':
            self.conv = gnn.GCNConv(dim, dim)
        self.norm = nn.BatchNorm1D(dim)
        self.act = activation or None
        self.last_layer = last_layer

        self.dropout_ratio = dropout

        self.virtual_node = virtual_node
        self.virtual_node_agg = virtual_node_agg
        if self.virtual_node and self.virtual_node_agg:
            self.vn_aggregator = VNAgg(dim, conv_type=conv_type)
Beispiel #12
0
 def __init__(self, num_classes=751):
     super(Net, self).__init__()
     # 3 128 64
     self.conv = nn.Sequential(
         nn.Conv2D(3, 64, 3, stride=1, padding=1),
         nn.BatchNorm2D(64),
         nn.ReLU(),
         # nn.Conv2d(32,32,3,stride=1,padding=1),
         # nn.BatchNorm2d(32),
         # nn.ReLU(inplace=True),
         nn.MaxPool2D(3, 2, padding=1),
     )
     # 32 64 32
     self.layer1 = make_layers(64, 64, 2, False)
     # 32 64 32
     self.layer2 = make_layers(64, 128, 2, True)
     # 64 32 16
     self.layer3 = make_layers(128, 256, 2, True)
     # 128 16 8
     self.layer4 = make_layers(256, 512, 2, True)
     # 256 8 4
     self.avgpool = nn.AvgPool2D((8, 4), 1)
     # 256 1 1
     self.classifier = nn.Sequential(
         nn.Linear(512, 256),
         nn.BatchNorm1D(256),
         nn.ReLU(),
         nn.Dropout(),
         nn.Linear(256, num_classes),
     )
    def __init__(self, block, depth):
        super(ResNet, self).__init__()
        layer_cfg = {
            18: [2, 2, 2, 2],
            34: [3, 4, 6, 3],
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3]
        }
        layers = layer_cfg[depth]
        self._norm_layer = nn.BatchNorm2D

        self.inplanes = 64
        self.dilation = 1

        self.conv1 = nn.Conv2D(1,
                               self.inplanes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias_attr=False)
        self.bn1 = self._norm_layer(self.inplanes)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.pool = nn.AdaptiveMaxPool2D((1, 1))
        self.bn4 = nn.BatchNorm2D(512 * block.expansion)
        self.dropout = nn.Dropout()
        self.fc5 = nn.Linear(512 * block.expansion, 512)
        self.bn5 = nn.BatchNorm1D(512)
Beispiel #14
0
    def __init__(self,
                 block,
                 layers,
                 num_filters,
                 feature_dim,
                 encoder_type='SAP',
                 n_mels=40,
                 log_input=True,
                 **kwargs):
        super(ResNetSE, self).__init__()

        print('Embedding size is %d, encoder %s.' %
              (feature_dim, encoder_type))

        self.inplanes = num_filters[0]
        self.encoder_type = encoder_type
        self.n_mels = n_mels
        self.log_input = log_input

        self.conv1 = nn.Conv2D(1,
                               num_filters[0],
                               kernel_size=3,
                               stride=1,
                               padding=1)
        self.relu = nn.ReLU()
        self.bn1 = nn.BatchNorm2D(num_filters[0])

        self.layer1 = self._make_layer(block, num_filters[0], layers[0])
        self.layer2 = self._make_layer(block,
                                       num_filters[1],
                                       layers[1],
                                       stride=(2, 2))
        self.layer3 = self._make_layer(block,
                                       num_filters[2],
                                       layers[2],
                                       stride=(2, 2))
        self.layer4 = self._make_layer(block,
                                       num_filters[3],
                                       layers[3],
                                       stride=(2, 2))

        outmap_size = int(self.n_mels / 8)

        self.attention = nn.Sequential(
            nn.Conv1D(num_filters[3] * outmap_size, 128, kernel_size=1),
            nn.ReLU(),
            nn.BatchNorm1D(128),
            nn.Conv1D(128, num_filters[3] * outmap_size, kernel_size=1),
            nn.Softmax(axis=2),
        )

        if self.encoder_type == "SAP":
            out_dim = num_filters[3] * outmap_size
        elif self.encoder_type == "ASP":
            out_dim = num_filters[3] * outmap_size * 2
        else:
            raise ValueError('Undefined encoder')

        self.fc = nn.Linear(out_dim, feature_dim)
    def __init__(self, model_config={}):
        super(PretrainGNNModel, self).__init__()

        self.embed_dim = model_config.get('embed_dim', 300)
        self.dropout_rate = model_config.get('dropout_rate', 0.5)
        self.norm_type = model_config.get('norm_type', 'batch_norm')
        self.graph_norm = model_config.get('graph_norm', False)
        self.residual = model_config.get('residual', False)
        self.layer_num = model_config.get('layer_num', 5)
        self.gnn_type = model_config.get('gnn_type', 'gin')
        self.JK = model_config.get('JK', 'last')
        self.readout = model_config.get('readout', 'mean')

        self.atom_names = model_config['atom_names']
        self.bond_names = model_config['bond_names']

        self.atom_embedding = AtomEmbedding(self.atom_names, self.embed_dim)
        self.bond_embedding_list = nn.LayerList()
        self.gnn_list = nn.LayerList()
        self.norm_list = nn.LayerList()
        self.graph_norm_list = nn.LayerList()
        self.dropout_list = nn.LayerList()
        for layer_id in range(self.layer_num):
            self.bond_embedding_list.append(BondEmbedding(self.bond_names, self.embed_dim))

            if self.gnn_type == 'gin':
                self.gnn_list.append(GIN(self.embed_dim))
            else:
                raise ValueError(self.gnn_type)

            if self.norm_type == 'batch_norm':
                self.norm_list.append(nn.BatchNorm1D(self.embed_dim))
            elif self.norm_type == 'layer_norm':
                self.norm_list.append(nn.LayerNorm(self.embed_dim))
            else:
                raise ValueError(self.norm_type)

            if self.graph_norm:
                self.graph_norm_list.append(GraphNorm())    # TODO: pgl.nn.GraphNorm not implemented in pgl==2.1.2

            self.dropout_list.append(nn.Dropout(self.dropout_rate))
        
        # TODO: use self-implemented MeanPool due to pgl bug.
        if self.readout == 'mean':
            self.graph_pool = MeanPool()
        else:
            self.graph_pool = pgl.nn.GraphPool(pool_type=self.readout)

        print('[PretrainGNNModel] embed_dim:%s' % self.embed_dim)
        print('[PretrainGNNModel] dropout_rate:%s' % self.dropout_rate)
        print('[PretrainGNNModel] norm_type:%s' % self.norm_type)
        print('[PretrainGNNModel] graph_norm:%s' % self.graph_norm)
        print('[PretrainGNNModel] residual:%s' % self.residual)
        print('[PretrainGNNModel] layer_num:%s' % self.layer_num)
        print('[PretrainGNNModel] gnn_type:%s' % self.gnn_type)
        print('[PretrainGNNModel] JK:%s' % self.JK)
        print('[PretrainGNNModel] readout:%s' % self.readout)
        print('[PretrainGNNModel] atom_names:%s' % str(self.atom_names))
        print('[PretrainGNNModel] bond_names:%s' % str(self.bond_names))
Beispiel #16
0
 def __init__(self, a, b, bias=True, std=0.02):
     super().__init__()
     self.add_sublayer('bn', nn.BatchNorm1D(a))
     l = nn.Linear(a, b, bias_attr=bias)
     trunc_normal_(l.weight)
     if bias:
         zeros_(l.bias)
     self.add_sublayer('l', l)
 def __init__(self,
              inplanes=256,
              planes=256,
              kernel_size=9,
              dilation=1,
              dropout_rate=0.1):
     super(ResnetBasicBlock, self).__init__()
     self.conv1 = nn.Conv1D(in_channels=inplanes, out_channels=planes, kernel_size=kernel_size, dilation=dilation, \
                            padding="same", data_format="NLC", weight_attr=nn.initializer.KaimingNormal())
     self.bn1 = nn.BatchNorm1D(planes, data_format="NLC")
     self.gelu1 = nn.GELU()
     self.dropout1 = nn.Dropout(p=dropout_rate)
     self.conv2 = nn.Conv1D(in_channels=planes, out_channels=planes, kernel_size=kernel_size, dilation=dilation, \
                            padding="same", data_format="NLC", weight_attr=nn.initializer.KaimingNormal())
     self.bn2 = nn.BatchNorm1D(planes, data_format="NLC")
     self.gelu2 = nn.GELU()
     self.dropout2 = nn.Dropout(p=dropout_rate)
 def __init__(self, model_name='ResNet50', last_stride=1):
     super(ResNetEmbedding, self).__init__()
     assert model_name in ['ResNet50', 'ResNet101'
                           ], "Unsupported ReID arch: {}".format(model_name)
     self.base = eval(model_name)(last_conv_stride=last_stride)
     self.gap = nn.AdaptiveAvgPool2D(output_size=1)
     self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
     self.bn = nn.BatchNorm1D(self.in_planes, bias_attr=False)
Beispiel #19
0
 def __init__(self, in_dim, out_dim):
     super(MLP, self).__init__()
     self.main = [
         nn.Linear(in_dim, 2 * in_dim), nn.BatchNorm1D(2 * in_dim),
         nn.ReLU()
     ]
     self.main.append(nn.Linear(2 * in_dim, out_dim))
     self.main = nn.Sequential(*self.main)
 def __init__(self, in_channel, mlp):
     super(PointNetFeaturePropagation, self).__init__()
     self.mlp_convs = []
     self.mlp_bns = []
     last_channel = in_channel
     for out_channel in mlp:
         self.mlp_convs.append(nn.Conv1D(last_channel, out_channel, 1))
         self.mlp_bns.append(nn.BatchNorm1D(out_channel))
         last_channel = out_channel
Beispiel #21
0
    def __init__(self, i_size: int, h_size: int):
        super().__init__()
        hidden_size = h_size * 3

        self.fw_fc = nn.Linear(i_size, hidden_size, bias_attr=False)
        self.fw_bn = nn.BatchNorm1D(hidden_size,
                                    bias_attr=None,
                                    data_format='NLC')
        self.bw_fc = nn.Linear(i_size, hidden_size, bias_attr=False)
        self.bw_bn = nn.BatchNorm1D(hidden_size,
                                    bias_attr=None,
                                    data_format='NLC')

        self.fw_cell = nn.GRUCell(input_size=hidden_size, hidden_size=h_size)
        self.bw_cell = nn.GRUCell(input_size=hidden_size, hidden_size=h_size)
        self.fw_rnn = nn.RNN(self.fw_cell, is_reverse=False,
                             time_major=False)  # [B, T, D]
        self.bw_rnn = nn.RNN(self.fw_cell, is_reverse=True,
                             time_major=False)  # [B, T, D]
Beispiel #22
0
 def __init__(self, a, b, bn_weight_init=1):
     super().__init__()
     self.add_sublayer('c', nn.Linear(a, b, bias_attr=False))
     bn = nn.BatchNorm1D(b)
     if bn_weight_init == 0:
         zeros_(bn.weight)
     else:
         ones_(bn.weight)
     zeros_(bn.bias)
     self.add_sublayer('bn', bn)
Beispiel #23
0
    def __init__(self, a, b, bn_weight_init=1, resolution=-100000):
        super().__init__()
        self.add_sublayer("c", nn.Linear(a, b, bias_attr=False))

        bn = nn.BatchNorm1D(b)

        Constant(bn_weight_init)(bn.weight)
        zeros_(bn.bias)

        self.add_sublayer("bn", bn)
Beispiel #24
0
 def __init__(self, in_channels=3, out_channels=3, kernel_size=3, *args):
     super(Block, self).__init__()
     self.nn = nn.Sequential(
         nn.Conv1D(in_channels=in_channels,
                   out_channels=out_channels,
                   kernel_size=kernel_size,
                   padding=0,
                   bias_attr=False),
         nn.BatchNorm1D(num_features=out_channels), nn.LeakyReLU(.2),
         nn.Dropout(p=.2))
    def __init__(self,
                 vocab_size,
                 emb_dim=128,
                 hidden_size=256,
                 kernel_size=9,
                 n_layers=35,
                 padding_idx=0,
                 dropout_rate=0.1,
                 epsilon=1e-6):
        super(ResnetEncoderModel, self).__init__()

        self.hidden_size = hidden_size
        self.n_layers = n_layers

        self.token_embedding = nn.Embedding(vocab_size,
                                            emb_dim,
                                            padding_idx=padding_idx)
        max_pos_len = 3000
        self.pos_embedding = nn.Embedding(max_pos_len,
                                          emb_dim,
                                          padding_idx=padding_idx)

        self.layer_norm = nn.BatchNorm1D(emb_dim, data_format="NLC")
        self.dropout = nn.Dropout(dropout_rate)

        self.padded_conv = nn.Sequential(
            nn.Conv1D(in_channels=emb_dim, out_channels=hidden_size, kernel_size=kernel_size, padding="same", \
                      data_format="NLC", weight_attr=nn.initializer.KaimingNormal()),
            nn.BatchNorm1D(hidden_size, data_format="NLC"),
            nn.GELU(),
            nn.Dropout(p=dropout_rate)
        )
        self.residual_block_1 = ResnetBasicBlock(inplanes=hidden_size,
                                                 planes=hidden_size,
                                                 kernel_size=kernel_size,
                                                 dropout_rate=dropout_rate)
        self.residual_block_n = nn.Sequential()
        for i in range(1, n_layers):
            self.residual_block_n.add_sublayer("residual_block_%d" % i, \
                ResnetBasicBlock(inplanes=hidden_size, planes=hidden_size, kernel_size=kernel_size, dilation=2, dropout_rate=dropout_rate))

        self.apply(self.init_weights)
Beispiel #26
0
    def __init__(self,
                 in_channels=2,
                 edge_importance_weighting=True,
                 data_bn=True,
                 layout='fsd10',
                 strategy='spatial',
                 **kwargs):
        super(STGCN, self).__init__()
        self.data_bn = data_bn
        # load graph
        self.graph = Graph(
            layout=layout,
            strategy=strategy,
        )
        A = paddle.to_tensor(self.graph.A, dtype='float32')
        self.register_buffer('A', A)

        # build networks
        spatial_kernel_size = A.shape[0]
        temporal_kernel_size = 9
        kernel_size = (temporal_kernel_size, spatial_kernel_size)
        self.data_bn = nn.BatchNorm1D(in_channels *
                                      A.shape[1]) if self.data_bn else iden
        kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'}
        self.st_gcn_networks = nn.LayerList((
            st_gcn_block(in_channels,
                         64,
                         kernel_size,
                         1,
                         residual=False,
                         **kwargs0),
            st_gcn_block(64, 64, kernel_size, 1, **kwargs),
            st_gcn_block(64, 64, kernel_size, 1, **kwargs),
            st_gcn_block(64, 64, kernel_size, 1, **kwargs),
            st_gcn_block(64, 128, kernel_size, 2, **kwargs),
            st_gcn_block(128, 128, kernel_size, 1, **kwargs),
            st_gcn_block(128, 128, kernel_size, 1, **kwargs),
            st_gcn_block(128, 256, kernel_size, 2, **kwargs),
            st_gcn_block(256, 256, kernel_size, 1, **kwargs),
            st_gcn_block(256, 256, kernel_size, 1, **kwargs),
        ))

        # initialize parameters for edge importance weighting
        if edge_importance_weighting:
            self.edge_importance = nn.ParameterList([
                self.create_parameter(
                    shape=self.A.shape,
                    default_initializer=nn.initializer.Constant(1))
                for i in self.st_gcn_networks
            ])
        else:
            self.edge_importance = [1] * len(self.st_gcn_networks)

        self.pool = nn.AdaptiveAvgPool2D(output_size=(1, 1))
Beispiel #27
0
    def __init__(self, a, b, bias_attr=True, std=0.02):
        super().__init__()
        self.add_sublayer("bn", nn.BatchNorm1D(a))

        l = nn.Linear(a, b, bias_attr=bias_attr)

        TruncatedNormal(std=std)(l.weight)

        if bias_attr:
            zeros_(l.bias)

        self.add_sublayer("l", l)
Beispiel #28
0
    def __init__(self,
                 in_channels,
                 hid_channels,
                 out_channels,
                 with_avg_pool=True):
        super(NonLinearNeckV2, self).__init__()
        self.with_avg_pool = with_avg_pool
        if with_avg_pool:
            self.avgpool = nn.AdaptiveAvgPool2D((1, 1))

        self.mlp = nn.Sequential(nn.Linear(in_channels, hid_channels),
                                 nn.BatchNorm1D(hid_channels), nn.ReLU(),
                                 nn.Linear(hid_channels, out_channels))
Beispiel #29
0
 def __init__(self,
              name_scope='PointNet2_MSG_Clas_',
              num_classes=16,
              normal_channel=False):
     super(PointNet2_MSG_Clas, self).__init__()
     in_channel = 3 if normal_channel else 0
     self.normal_channel = normal_channel
     self.sa1 = PointNetSetAbstractionMsg(
         512, [0.1, 0.2, 0.4], [16, 32, 128], in_channel,
         [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
     self.sa2 = PointNetSetAbstractionMsg(
         128, [0.2, 0.4, 0.8], [32, 64, 128], 320,
         [[64, 64, 128], [128, 128, 256], [128, 128, 256]])
     self.sa3 = PointNetSetAbstraction(None, None, None, 640 + 3,
                                       [256, 512, 1024], True)
     self.fc1 = nn.Linear(1024, 512)
     self.bn1 = nn.BatchNorm1D(512)
     self.drop1 = nn.Dropout(0.4)
     self.fc2 = nn.Linear(512, 256)
     self.bn2 = nn.BatchNorm1D(256)
     self.drop2 = nn.Dropout(0.5)
     self.fc3 = nn.Linear(256, num_classes)
Beispiel #30
0
 def __init__(self, in_channels, num_codes):
     super().__init__()
     self.encoding_project = layers.ConvBNReLU(
         in_channels,
         in_channels,
         1,
     )
     self.encoding = nn.Sequential(
         Encoding(channels=in_channels, num_codes=num_codes),
         nn.BatchNorm1D(num_codes),
         nn.ReLU(),
     )
     self.fc = nn.Sequential(
         nn.Linear(in_channels, in_channels),
         nn.Sigmoid(),
     )