def __init__(self): super(baseline_lstm, self).__init__() self.encoder_fc = layers.SequenceWise(nn.Linear(60, 32)) self.encoder_dropout = layers.SequenceWise(nn.Dropout(0.3)) self.seq_model = nn.LSTM(32, 32, 1, bidirectional=True, batch_first=True) self.final_fc = nn.Linear(32, 3)
def __init__(self): super(attentionlstm, self).__init__() self.encoder_fc = layers.SequenceWise(nn.Linear(60, 32)) self.encoder_dropout = layers.SequenceWise(nn.Dropout(0.3)) self.seq_model = nn.LSTM(32, 64, 1, bidirectional=True, batch_first=True) self.final_fc = nn.Linear(128, 3) self.scale = 1. / math.sqrt(60)
def __init__(self): super(baseline_lstm, self).__init__() self.encoder_fc = layers.SequenceWise(nn.Linear(60, 32)) self.encoder_dropout = layers.SequenceWise(nn.Dropout(0.7)) self.seq_model = nn.LSTM(64, 64, 1, bidirectional=True, batch_first=True) self.prefinal_fc = layers.SequenceWise(nn.Linear(128, 32)) self.final_fc = nn.Linear(128, 3) self.cnn = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, stride=2) #3,5,7