コード例 #1
0
ファイル: test_module.py プロジェクト: MegEngine/MegEngine
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, padding=1, bias=False)
     self.conv2 = Conv2d(3, 128, 3, dilation=2, bias=False)
     self.bn1 = BatchNorm1d(128)
     self.bn2 = BatchNorm2d(128)
     self.pooling = MaxPool2d(kernel_size=2, padding=0)
     modules = OrderedDict()
     modules["depthwise"] = Conv2d(
         256,
         256,
         3,
         1,
         0,
         groups=256,
         bias=False,
     )
     modules["pointwise"] = Conv2d(
         256,
         256,
         kernel_size=1,
         stride=1,
         padding=0,
         bias=True,
     )
     self.submodule1 = Sequential(modules)
     self.list1 = [Dropout(drop_prob=0.1), [Softmax(axis=100)]]
     self.tuple1 = (
         Dropout(drop_prob=0.1),
         (Softmax(axis=100), Dropout(drop_prob=0.2)),
     )
     self.dict1 = {"Dropout": Dropout(drop_prob=0.1)}
     self.fc1 = Linear(512, 1024)
コード例 #2
0
ファイル: test_module.py プロジェクト: wenming2014/MegEngine
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, stride=2, bias=False)
     self.conv2 = Conv2d(3, 128, 3, padding=1, bias=False)
     self.conv3 = Conv2d(3, 128, 3, dilation=2, bias=False)
     self.bn1 = BatchNorm2d(128)
     self.bn2 = BatchNorm1d(128)
     self.dropout = Dropout(drop_prob=0.1)
     self.softmax = Softmax(axis=100)
     self.pooling = MaxPool2d(kernel_size=2, padding=0)
     self.submodule1 = Sequential(Dropout(drop_prob=0.1), Softmax(axis=100),)
     self.fc1 = Linear(512, 1024)
コード例 #3
0
ファイル: model.py プロジェクト: zhangll1990/Models-1
 def __init__(self, config, num_labels, bert=None):
     if bert is None:
         self.bert = BertModel(config)
     else:
         self.bert = bert
     self.num_labels = num_labels
     self.dropout = Dropout(config.hidden_dropout_prob)
     self.classifier = Linear(config.hidden_size, num_labels)
コード例 #4
0
ファイル: model.py プロジェクト: zhangll1990/Models-1
    def __init__(self, config):
        super(BertEmbeddings, self).__init__()
        self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
        self.position_embeddings = Embedding(config.max_position_embeddings,
                                             config.hidden_size)
        self.token_type_embeddings = Embedding(config.type_vocab_size,
                                               config.hidden_size)

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
        self.dropout = Dropout(config.hidden_dropout_prob)
コード例 #5
0
    def __init__(self, config):
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads)
            )
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = Linear(config.hidden_size, self.all_head_size)
        self.key = Linear(config.hidden_size, self.all_head_size)
        self.value = Linear(config.hidden_size, self.all_head_size)

        self.dropout = Dropout(config.attention_probs_dropout_prob)
コード例 #6
0
ファイル: model.py プロジェクト: zhangll1990/Models-1
 def __init__(self, config):
     super(BertOutput, self).__init__()
     self.dense = Linear(config.intermediate_size, config.hidden_size)
     self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
     self.dropout = Dropout(config.hidden_dropout_prob)
コード例 #7
0
 def __init__(self, config):
     super().__init__()
     self.dense = Linear(config.hidden_size, config.hidden_size)
     self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
     self.dropout = Dropout(config.hidden_dropout_prob)