def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim): super(BilstmCrf, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.vocab_size = vocab_size self.tag_to_ix = tag_to_ix self.tagset_size = len(tag_to_ix) self.word_embeds = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2, num_layers=1, bidirectional=True) # Maps the output of the LSTM into tag space. self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size) # Matrix of transition parameters. Entry i,j is the score of # transitioning *to* i *from* j. # 转移矩阵 param = torch.randn(self.tagset_size, self.tagset_size) self.transitions = Parameter.__new__(param) # These two statements enforce the constraint that we never transfer # to the start tag and we never transfer from the stop tag self.transitions.data[tag_to_ix[START_TAG], :] = -10000 self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000 self.hidden = self.init_hidden()
def __new__(cls: Type['AnalogContext'], analog_tile: 'BaseTile', parameter: Optional[Parameter] = None) -> 'AnalogContext': # pylint: disable=signature-differs if parameter is None: return Parameter.__new__(cls, data=ones((), device=analog_tile.device), requires_grad=True) parameter.__class__ = cls return parameter
def __new__(cls: Type['AnalogContext'], analog_tile: 'BaseTile') -> 'AnalogContext': # pylint: disable=signature-differs return Parameter.__new__(cls, data=ones((), device=analog_tile.device), requires_grad=True)
def __new__(cls, data, requires_grad, manifold, c): return Parameter.__new__(cls, data, requires_grad)
def __new__(cls, data=None, requires_grad=True, name=None): """Wrap __new__ of torch Parameter.""" return Torch_Parameter.__new__(cls, data, requires_grad)