def __init__(self, size_vocab, size, depth=1, recur_depth=1, bidirectional=False, filter_length=6, filter_size=64, stride=2, drop_i=0.75, drop_s=0.25): super(Encoder, self).__init__() util.autoassign(locals()) self.h0 = torch.autograd.Variable(torch.zeros(self.depth, 1, self.size)) self.Conv = conv.Convolution1D(self.size_vocab, self.filter_length, self.filter_size, stride=self.stride) # self.RNN = nn.GRU(self.filter_size, self.size, self.depth, batch_first=True) self.RNN = stacked_gru.StackedGRU(self.filter_size, self.size, self.depth, bidirectional=bidirectional, residual=True, batch_first=True)
def __init__(self, size_vocab, size, depth=1, recur_depth=1, filter_length=6, filter_size=64, stride=2, drop_i=0.75, drop_s=0.25, residual=False, seed=1): super(Encoder, self).__init__() util.autoassign(locals()) #self.h0 = torch.autograd.Variable(torch.zeros(self.depth, 1, self.size)) self.Conv = conv.Convolution1D(self.size_vocab, self.filter_length, self.filter_size, stride=self.stride) self.RHN = rhn.StackedRHNH0(self.filter_size, self.size, depth=self.depth, recur_depth=self.recur_depth, drop_i=self.drop_i, drop_s=self.drop_s, residual=self.residual, seed=self.seed)
def __init__(self, size_vocab, size, nb_conv_layer=1, depth=1, filter_length=6, filter_size=[64], stride=2, dropout_p=0.0, relu=False, maxpool=False, bidirectional=False): super(SpeechEncoderBottom, self).__init__() util.autoassign(locals()) layers = [] size_in = self.size_vocab for i_conv in range(0, self.nb_conv_layer): layers.append( conv.Convolution1D(size_in, self.filter_length, self.filter_size[i_conv], stride=self.stride, maxpool=self.maxpool)) if self.relu: layers.append(nn.ReLU(True)) size_in = self.filter_size[i_conv] self.Conv = nn.Sequential(*layers) if self.depth > 0: # TODO: LSTM/GRU? if self.bidirectional: self.h0 = torch.autograd.Variable( torch.zeros(self.depth * 2, 1, self.size)) self.c0 = torch.autograd.Variable( torch.zeros(self.depth * 2, 1, self.size)) else: self.h0 = torch.autograd.Variable( torch.zeros(self.depth, 1, self.size)) self.c0 = torch.autograd.Variable( torch.zeros(self.depth, 1, self.size)) self.Dropout = nn.Dropout(p=self.dropout_p) # TODO: LSTM/GRU? #self.RNN = nn.GRU(self.filter_size[self.nb_conv_layer - 1], # self.size, self.depth, batch_first=True, # bidirectional=self.bidirectional) self.RNN = nn.LSTM(self.filter_size[self.nb_conv_layer - 1], self.size, self.depth, batch_first=True, bidirectional=self.bidirectional)
def __init__(self, size_vocab, size, depth=1, filter_length=6, filter_size=64, stride=2, dropout_p=0.0): super(SpeechEncoderBottomStack, self).__init__() util.autoassign(locals()) self.Conv = conv.Convolution1D(self.size_vocab, self.filter_length, self.filter_size, stride=self.stride) if self.depth > 0: self.Dropout = nn.Dropout(p=self.dropout_p) self.RNN = GRUStack(self.filter_size, self.size, self.depth)
def __init__(self, size_vocab, size, depth=1, filter_length=6, filter_size=64, stride=2, dropout_p=0.0): super(SpeechEncoderBottom, self).__init__() util.autoassign(locals()) self.Conv = conv.Convolution1D(self.size_vocab, self.filter_length, self.filter_size, stride=self.stride) if self.depth > 0: self.h0 = torch.autograd.Variable( torch.zeros(self.depth, 1, self.size)) self.Dropout = nn.Dropout(p=self.dropout_p) self.RNN = nn.GRU(self.filter_size, self.size, self.depth, batch_first=True)
def __init__(self, size_vocab, size, depth=1, filter_length=6, filter_size=64, stride=2, residual=False): super(Encoder, self).__init__() util.autoassign(locals()) self.h0 = torch.autograd.Variable(torch.zeros(self.depth, 1, self.size)) self.Conv = conv.Convolution1D(self.size_vocab, self.filter_length, self.filter_size, stride=self.stride, padding=0) self.RNN = nn.GRU(self.filter_size, self.size, self.depth, batch_first=True)
def __init__(self, size_feature, size, depth=1, filter_length=6, filter_size=64, stride=2, size_attn=512, dropout_p=0.0): super(Encoder, self).__init__() util.autoassign(locals()) self.h0 = torch.autograd.Variable(torch.zeros(self.depth, 1, self.size)) self.Dropout = nn.Dropout(p=self.dropout_p) self.Conv = conv.Convolution1D(self.size_feature, self.filter_length, self.filter_size, stride=self.stride, padding=0) self.RNN = nn.GRU(self.filter_size, self.size, self.depth, batch_first=True) self.Attn = attention.SelfAttention(self.size, size=self.size_attn)