예제 #1
0
 def __init__(self, **kwargs):
     super(FeatureBlock1, self).__init__(**kwargs)
     self.gru = rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2)
     self.conv3 = nn.Conv1D(channels=128, kernel_size=3, padding=1, strides=1, activation='relu')
     self.conv5 = nn.Conv1D(channels=128, kernel_size=3, padding=1, strides=1, activation='relu')
     self.conv7 = nn.Conv1D(channels=128, kernel_size=3, padding=1, strides=1, activation='relu')
     # self.gru_post_max = nn.MaxPool1D(pool_size=2)
     # self.gru_post_ave = nn.AvgPool1D(pool_size=2)
     self.gru_maxpool = nn.GlobalMaxPool1D()
     self.conv_maxpool = nn.GlobalMaxPool1D()
     '''
     self.gru_avepool = nn.GlobalAvgPool1D()
     self.conv_avepool = nn.GlobalAvgPool1D()
     '''
     self.conv_drop = nn.Dropout(0.5)
예제 #2
0
파일: actor.py 프로젝트: leferrad/powrl3
    def __init__(self, n_dims, n_actions, hidden_dims=(32, 32), **kwargs):
        super(Net, self).__init__(**kwargs)

        self.n_dims = n_dims
        self.n_actions = n_actions
        self.hidden_dims = hidden_dims

        with self.name_scope():

            self.embedding = nn.Embedding(
                256, output_dim=16)  # suggestion: not greater than 16
            self.bn = nn.BatchNorm()  # TODO: is this necessary?
            self.conv1 = nn.Conv1D(channels=32,
                                   kernel_size=3,
                                   activation='relu',
                                   padding=0,
                                   strides=1)
            self.conv2 = nn.Conv1D(channels=32,
                                   kernel_size=3,
                                   activation='relu',
                                   padding=0,
                                   strides=1)

            self.pool = nn.GlobalMaxPool1D()

            self.h1 = nn.Dense(32, activation='relu')
            self.h2 = nn.Dense(32, activation='relu')

            #for h_dim in self.hidden_dims:
            #    x = Dense(h_dim, activation='relu')(x)
            # x = Dropout(0.2)(x)

            self.output = nn.Dense(self.n_actions, use_bias=False)
예제 #3
0
 def __init__(self):
     super(WordContextNet, self).__init__()
     with self.name_scope():
         self.embedding = nn.Embedding(vocab_size, region_size * emb_size)
         self.embedding_region = nn.Embedding(vocab_size, emb_size)
         self.max_pool = nn.GlobalMaxPool1D()
         self.dense = nn.Dense(n_classes)
예제 #4
0
 def __init__(self):
     super(Net, self).__init__()
     with self.name_scope():
         self.embedding = nn.Embedding(vocab_size, region_size * emb_size)
         self.embedding_region = nn.Embedding(vocab_size, emb_size)
         self.max_pool = nn.GlobalMaxPool1D()
         self.dense = nn.Dense(n_classes)
         self.dense1 = nn.Dense(max_sequence_length * 2, activation='relu')
         self.dense2 = nn.Dense(1)
예제 #5
0
 def __init__(self, emb_size, **kwargs):
     super(GatedBlock, self).__init__(**kwargs)
     with self.name_scope():
         self.W_a = self.params.get('gated_block_W_a',
                                    shape=(emb_size, emb_size))
         self.W_b = self.params.get('gated_block_W_b',
                                    shape=(emb_size, emb_size))
         self.b = self.params.get('gated_block_b', shape=emb_size)
         self.maxpooling = nn.GlobalMaxPool1D(layout='NWC')
예제 #6
0
 def __init__(self, config, kernel_idx=0, **kwargs):
     super(ParaConvpool, self).__init__(**kwargs)
     with self.name_scope():
         self.conv = nn.Conv2D(
             channels=config['feature_map'] * config['num_neighbor'],
             kernel_size=(config['kernel_size'][kernel_idx],
                          config['embedding_dim']),
             strides=(1, config['embedding_dim']),
             groups=config['num_neighbor'])
         self.pooling = nn.GlobalMaxPool1D()
         self.norm = nn.BatchNorm(axis=1)
예제 #7
0
 def __init__(self, voc, w2v_size, k_sizes, n_channels, **kwargs):
     super(TextCNN, self).__init__(**kwargs)
     self.embedding = nn.Embedding(len(voc), w2v_size)
     self.convs = nn.HybridSequential()
     for k, c in zip(k_sizes, n_channels):
         self.convs.add(nn.Conv1D(c, k, activation='relu'))
     self.pooling = nn.GlobalMaxPool1D()
     self.dropout_1 = nn.Dropout(0.3)
     self.dense_h = nn.Dense(6, activation='relu')
     self.dropout_2 = nn.Dropout(0.6)
     self.dense = nn.Dense(2)
     self.flat = nn.Flatten()
 def __init__(self, vocab, embed_size, kernel_sizes, num_channels,
              **kwargs):
     super(Weibo_TextCNN, self).__init__(**kwargs)
     self.embedding = nn.Embedding(
         len(vocab), embed_size)  # embed_size defined by fasttext model
     self.constant_embedding = nn.Embedding(len(vocab),
                                            embed_size)  # stay constant
     self.convs = create_con_1d(num_channels, kernel_sizes)
     self.maxpool = nn.GlobalMaxPool1D(
     )  # connect a GloablMax after the convs
     self.dropout = nn.Dropout(0.5)
     self.decoder = nn.Dense(2)  # we have two output
예제 #9
0
 def __init__(self, vocab, embed_size, kernel_sizes, num_channels,
              **kwargs):
     super(TextCNN, self).__init__(**kwargs)
     self.embedding = nn.Embedding(len(vocab), embed_size)
     # 不参与训练的嵌入层
     self.constant_embedding = nn.Embedding(len(vocab), embed_size)
     self.dropout = nn.Dropout(0.5)
     self.decoder = nn.Dense(2)
     # 时序最大池化层没有权重,所以可以共用一个实例
     self.pool = nn.GlobalMaxPool1D()
     self.convs = nn.Sequential()  # 创建多个一维卷积层
     for c, k in zip(num_channels, kernel_sizes):
         self.convs.add(nn.Conv1D(c, k, activation='relu'))
 def __init__(self, vocab, embed_size, kernel_sizes, num_channels,
              **kwargs):
     super(TextCNN, self).__init__(**kwargs)
     self.embedding = nn.Embedding(len(vocab), embed_size)
     # Embedding without training
     self.constant_embedding = nn.Embedding(len(vocab), embed_size)
     self.dropout = nn.Dropout(0.5)
     self.decoder = nn.Dense(2)
     #One time maxpooling
     self.pool = nn.GlobalMaxPool1D()
     self.convs = nn.Sequential()  # create multi 1 dimensional cnn
     for c, k in zip(num_channels, kernel_sizes):
         self.convs.add(nn.Conv1D(c, k, activation='relu'))
예제 #11
0
 def __init__(self, vocab, embed_size, kernel_sizes, num_channels,
              **kwargs):
     super(TextCNN, self).__init__(**kwargs)
     self.embedding = nn.Embedding(len(vocab), embed_size)
     # The embedding layer does not participate in training
     self.constant_embedding = nn.Embedding(len(vocab), embed_size)
     self.dropout = nn.Dropout(0.5)
     self.decoder = nn.Dense(2)
     # The max-over-time pooling layer has no weight, so it can share an
     # instance
     self.pool = nn.GlobalMaxPool1D()
     # Create multiple one-dimensional convolutional layers
     self.convs = nn.Sequential()
     for c, k in zip(num_channels, kernel_sizes):
         self.convs.add(nn.Conv1D(c, k, activation='relu'))
예제 #12
0
def net_define_eu():
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
        net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2))
        net.add(transpose(axes=(0,2,1)))
        net.add(nn.GlobalMaxPool1D())
        '''
        net.add(FeatureBlock1())
        '''
        net.add(extendDim(axes=3))
        net.add(PrimeConvCap(16, 32, kernel_size=(1,1), padding=(0,0),strides=(1,1)))
        net.add(CapFullyNGBlock(16, num_cap=12, input_units=32, units=16, route_num=3))
        net.add(nn.Dropout(0.2))
        net.add(nn.Dense(6, activation='sigmoid'))
    net.initialize(init=init.Xavier())
    return net
예제 #13
0
def SequentialTextCNN(config):

    net = nn.Sequential()
    with net.name_scope():
        net.add(
            nn.Embedding(input_dim=config['vocab_size'],
                         output_dim=config['embedding_dim']))
        net.add(nn.Lambda(lambda x: x.transpose((0, 2, 1))))
        net.add(
            nn.Conv1D(channels=config['feature_map'],
                      kernel_size=config['kernel_size'][0],
                      strides=1))
        net.add(nn.BatchNorm(axis=1))
        net.add(nn.Activation('relu'))
        net.add(nn.GlobalMaxPool1D())
        net.add(nn.Dropout(rate=config['dropout_rate']))
        net.add(nn.Dense(units=2))
    return net
예제 #14
0
 def __init__(self, vocab, embedding_size, ngram_kernel_sizes,
              nums_channels, num_outputs, **kwargs):
     super(TextCNN, self).__init__(**kwargs)
     self.ngram_kernel_sizes = ngram_kernel_sizes
     self.embedding_static = nn.Embedding(len(vocab), embedding_size)
     self.embedding_non_static = nn.Embedding(len(vocab), embedding_size)
     for i in range(len(ngram_kernel_sizes)):
         # 一维卷积层。
         conv = nn.Conv1D(nums_channels[i],
                          kernel_size=ngram_kernel_sizes[i],
                          strides=1,
                          activation='relu')
         # 时序最大池化层。
         bn = nn.BatchNorm()
         pool = nn.GlobalMaxPool1D()
         # 将 self.conv_{i} 置为第 i 个 conv。
         setattr(self, 'conv_{i}', conv)
         setattr(self, 'bn_{i}', bn)
         # 将 self.pool_{i} 置为第 i 个 pool。
         setattr(self, 'pool_{i}', pool)
     self.dropout = nn.Dropout(0.5)
     self.decoder = nn.Dense(num_outputs)
예제 #15
0
    def __init__(self, ctx=mx.cpu(), warmup=5, runs=25, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (32, 3, 256),
            "data_initializer": nd.normal,
            "layout": "NCW",
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.GlobalMaxPool1D(layout=self.inputs["layout"])
        self.block.initialize(ctx=self.ctx)
예제 #16
0
 def __init__(self, channels, kernel_size, **kwargs):
     super(Convpool, self).__init__(**kwargs)
     with self.name_scope():
         self.conv = nn.Conv1D(channels, kernel_size, strides=1)
         self.pooling = nn.GlobalMaxPool1D()