コード例 #1
1
ファイル: embeddings.py プロジェクト: dpressel/baseline
 def __init__(self, _, **kwargs):
     super(LookupTableEmbeddings, self).__init__(_, **kwargs)
     self.vsz = kwargs.get('vsz')
     self.dsz = kwargs.get('dsz')
     self.finetune = kwargs.get('finetune', True)
     weights = kwargs.get('weights')
     if weights is None:
         self.embeddings = nn.Embedding(self.vsz, self.dsz, padding_idx=0)
     else:
         self.embeddings = pytorch_embedding(weights, self.finetune)
コード例 #2
0
ファイル: embeddings.py プロジェクト: byfaith/baseline
    def __init__(self, name, **kwargs):
        super(CharConvEmbeddings, self).__init__()
        self.vsz = kwargs.get('vsz')
        self.dsz = kwargs.get('dsz')
        self.finetune = kwargs.get('finetune', True)
        weights = kwargs.get('weights')
        if weights is None:
            self.embeddings = nn.Embedding(self.vsz, self.dsz, padding_idx=0)
        else:
            self.embeddings = pytorch_embedding(weights)
        char_filtsz = kwargs.get('cfiltsz', [3])
        if is_sequence(char_filtsz[0]):
            char_hsz = [pair[1] for pair in char_filtsz]
            char_filtsz = [pair[0] for pair in char_filtsz]
        else:
            char_hsz = kwargs.get('wsz', 30)

        activation_type = kwargs.get('activation', 'tanh')
        pdrop = kwargs.get('pdrop', 0.5)
        self.char_comp = ParallelConv(self.dsz, char_hsz, char_filtsz,
                                      activation_type, pdrop)
        wchsz = self.char_comp.outsz
        self.linear = pytorch_linear(wchsz, wchsz)
        gating = kwargs.get('gating', 'skip')
        GatingConnection = SkipConnection if gating == 'skip' else Highway
        num_gates = kwargs.get('num_gates', 1)
        self.gating_seq = nn.Sequential(
            OrderedDict([('gate-{}'.format(i), GatingConnection(wchsz))
                         for i in range(num_gates)]))
コード例 #3
0
ファイル: embeddings.py プロジェクト: byfaith/baseline
 def __init__(self, name, **kwargs):
     super(CharLSTMEmbeddings, self).__init__(name, **kwargs)
     self.vsz = kwargs.get('vsz')
     self.dsz = kwargs.get('dsz')
     self.finetune = kwargs.get('finetune', True)
     weights = kwargs.get('weights')
     if weights is None:
         self.embeddings = nn.Embedding(self.vsz,
                                        self.dsz,
                                        padding_idx=Offsets.PAD)
     else:
         self.embeddings = pytorch_embedding(weights)
     self.lstmsz = kwargs.get('lstmsz', 50)
     layers = kwargs.get('layers', 1)
     pdrop = kwargs.get('pdrop', 0.5)
     rnn_type = kwargs.get('rnn_type', 'blstm')
     unif = kwargs.get('unif', 0)
     weight_init = kwargs.get('weight_init', 'uniform')
     self.char_comp = BiRNNWrapper(
         pytorch_lstm(self.dsz,
                      self.lstmsz,
                      rnn_type,
                      layers,
                      pdrop,
                      unif=unif,
                      initializer=weight_init,
                      batch_first=False), layers)
コード例 #4
0
    def __init__(self, _, **kwargs):
        super(PositionalLookupTableEmbeddings, self).__init__()
        self.vsz = kwargs.get('vsz')
        self.dsz = kwargs.get('dsz')
        self.dropout = nn.Dropout(kwargs.get('dropout', 0.1))
        self.finetune = kwargs.get('finetune', True)
        # This could get us in trouble, if in doubt, pick something big
        mxlen = kwargs.get('mxlen', 1000)
        max_timescale = kwargs.get('max_timescale', 1.0e4)

        weights = kwargs.get('weights')
        if weights is None:
            self.embeddings = nn.Embedding(self.vsz, self.dsz, padding_idx=0)
        else:
            self.embeddings = pytorch_embedding(weights, self.finetune)

        log_timescale_increment = math.log(max_timescale) / self.dsz
        inv_timescales = torch.exp(
            torch.arange(0, self.dsz, 2).float() * -log_timescale_increment)

        pe = torch.zeros(mxlen, self.dsz)
        position = torch.arange(0, mxlen).float().unsqueeze(1)
        pe[:, 0::2] = torch.sin(position * inv_timescales)
        pe[:, 1::2] = torch.cos(position * inv_timescales)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
コード例 #5
0
ファイル: embeddings.py プロジェクト: byfaith/baseline
 def __init__(self, _, **kwargs):
     super(LookupTableEmbeddings, self).__init__(_, **kwargs)
     self.vsz = kwargs.get('vsz')
     self.dsz = kwargs.get('dsz')
     self.finetune = kwargs.get('finetune', True)
     weights = kwargs.get('weights')
     if weights is None:
         self.embeddings = nn.Embedding(self.vsz, self.dsz, padding_idx=0)
     else:
         self.embeddings = pytorch_embedding(weights, self.finetune)
コード例 #6
0
ファイル: embeddings.py プロジェクト: dpressel/baseline
 def __init__(self, name, **kwargs):
     super(CharLSTMEmbeddings, self).__init__(name, **kwargs)
     self.vsz = kwargs.get('vsz')
     self.dsz = kwargs.get('dsz')
     self.finetune = kwargs.get('finetune', True)
     weights = kwargs.get('weights')
     if weights is None:
         self.embeddings = nn.Embedding(self.vsz, self.dsz, padding_idx=Offsets.PAD)
     else:
         self.embeddings = pytorch_embedding(weights)
     self.lstmsz = kwargs.get('lstmsz', 50)
     layers = kwargs.get('layers', 1)
     pdrop = kwargs.get('pdrop', 0.5)
     rnn_type = kwargs.get('rnn_type', 'blstm')
     unif = kwargs.get('unif', 0)
     weight_init = kwargs.get('weight_init', 'uniform')
     self.char_comp = BiRNNWrapper(pytorch_lstm(self.dsz, self.lstmsz, rnn_type, layers, pdrop, unif=unif, initializer=weight_init, batch_first=False), layers)
コード例 #7
0
ファイル: embeddings.py プロジェクト: dpressel/baseline
 def __init__(self, name, **kwargs):
     super(CharConvEmbeddings, self).__init__()
     self.vsz = kwargs.get('vsz')
     self.dsz = kwargs.get('dsz')
     self.finetune = kwargs.get('finetune', True)
     weights = kwargs.get('weights')
     if weights is None:
         self.embeddings = nn.Embedding(self.vsz, self.dsz, padding_idx=0)
     else:
         self.embeddings = pytorch_embedding(weights)
     char_filtsz = kwargs.get('cfiltsz', [3])
     char_hsz = kwargs.get('wsz', 30)
     activation_type = kwargs.get('activation', 'tanh')
     pdrop = kwargs.get('pdrop', 0.5)
     self.char_comp = ParallelConv(self.dsz, char_hsz, char_filtsz, activation_type, pdrop)
     wchsz = self.char_comp.outsz
     self.linear = pytorch_linear(wchsz, wchsz)
     gating = kwargs.get('gating', 'skip')
     GatingConnection = SkipConnection if gating == 'skip' else Highway
     num_gates = kwargs.get('num_gates', 1)
     self.gating_seq = nn.Sequential(OrderedDict(
         [('gate-{}'.format(i), GatingConnection(wchsz)) for i in range(num_gates)]
     ))