def __init__(self, embedding_dim, output_dim, hidden_size, num_layers,
                 bidirectional, dropout, pretrained_embeddings):
        super(TextRCNN, self).__init__()

        self.embedding = nn.Embedding.from_pretrained(pretrained_embeddings,
                                                      freeze=False)
        self.rnn = nn.LSTM(embedding_dim,
                           hidden_size,
                           num_layers,
                           bidirectional=bidirectional,
                           dropout=dropout)
        self.W2 = Linear(2 * hidden_size + embedding_dim, hidden_size * 2)
        self.fc = Linear(hidden_size * 2, output_dim)
        self.dropout = nn.Dropout(dropout)
Esempio n. 2
0
    def __init__(self,
                 vocab_size,
                 embedding_dim,
                 hidden_size,
                 output_dim=2,
                 num_layers=1,
                 bidirectional=False):
        super(TextRCNN, self).__init__()

        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.rnn = nn.LSTM(embedding_dim,
                           hidden_size,
                           num_layers,
                           bidirectional=bidirectional)
        self.W2 = Linear(hidden_size + embedding_dim, hidden_size * 2)
        self.fc = Linear(hidden_size * 2, output_dim)
Esempio n. 3
0
    def __init__(self, word_dim, char_dim, output_dim, hidden_size, num_layers,
                 bidirectional, dropout, word_emb, char_emb, highway_layers):
        super(TextRCNNHighway, self).__init__()

        self.char_embedding = nn.Embedding.from_pretrained(char_emb,
                                                           freeze=False)
        self.word_embedding = nn.Embedding.from_pretrained(word_emb,
                                                           freeze=False)

        self.text_embedding = Embedding(highway_layers, word_dim, char_dim)

        self.rnn = nn.LSTM(word_dim + char_dim,
                           hidden_size,
                           num_layers,
                           bidirectional=bidirectional,
                           dropout=dropout)
        self.W2 = Linear(2 * hidden_size + word_dim + char_dim,
                         hidden_size * 2)
        self.fc = Linear(hidden_size * 2, output_dim)
        self.dropout = nn.Dropout(dropout)
Esempio n. 4
0
    def __init__(self, embedding_dim, n_filters, filter_sizes, output_dim,
                 dropout, pretrained_embeddings):
        super().__init__()

        self.embedding = nn.Embedding.from_pretrained(pretrained_embeddings,
                                                      freeze=False)

        self.convs = Conv1d(embedding_dim, n_filters, filter_sizes)

        self.fc = Linear(len(filter_sizes) * n_filters, output_dim)

        self.dropout = nn.Dropout(dropout)
Esempio n. 5
0
    def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes,
                 output_dim, dropout, pad_idx):

        super().__init__()

        self.embedding = nn.Embedding(vocab_size,
                                      embedding_dim,
                                      padding_idx=pad_idx)

        self.convs = Conv(embedding_dim, n_filters, filter_sizes)

        self.fc = Linear(len(filter_sizes) * n_filters, output_dim)

        self.dropout = nn.Dropout(dropout)
Esempio n. 6
0
def get_model(args):
    if args.model == "mlp":
        return MLP(args.input_size * 2, args.hidden_size, args.dropout,
                   args.output_size)
    elif args.model == "attention":
        return Attention(args.input_size * 2,
                         args.hidden_size[0],
                         args.layers,
                         args.dropout,
                         args.output_size,
                         gpu=args.gpu)
    elif args.model == 'linear':
        return Linear(args.input_size * 2, args.output_size)
    else:
        assert False
Esempio n. 7
0
    def __init__(self, word_dim, char_dim, n_filters, filter_sizes, output_dim,
                 dropout, word_emb, char_emb, highway_layers):

        super().__init__()

        self.char_embedding = nn.Embedding.from_pretrained(char_emb,
                                                           freeze=False)
        self.word_embedding = nn.Embedding.from_pretrained(word_emb,
                                                           freeze=False)

        self.text_embedding = Embedding(highway_layers, word_dim, char_dim)

        self.convs = Conv1d(word_dim + char_dim, n_filters, filter_sizes)

        self.fc = Linear(len(filter_sizes) * n_filters, output_dim)

        self.dropout = nn.Dropout(dropout)
Esempio n. 8
0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
SIND = int(sys.argv[2])

# In[4]:

dset = SingleStop(SROUTE, SIND, 'train', 32, lag=LAG).generator()
evalset = SingleStop(SROUTE, SIND, 'test', 32, lag=LAG).generator()

# In[5]:

from models.Linear import Linear

# In[6]:

model = Linear(lag=5).to(device)
model.device = device

# In[7]:

criterion, opt, sch = model.params(lr=0.001)

# In[8]:

evaluate(evalset,
         model,
         crit=lambda _y, y: mape(tonpy(_y)[:, 0],
                                 tonpy(y)[:, 0]))
evaluate(evalset,
         model,
         crit=lambda _y, y: criterion(_y[:, 0], y[:, 0]).item())
Esempio n. 9
0
for loc in ["EPO", "SIO"]:
    print(loc)
    for d_type in ["std_anomalies"]:
        print(d_type)
        file = "../data/" + d_type + "_" + loc + ".nc"
        d = data(file)
        d_train, d_test, d_val = d.get_data()
        if loc == "EPO":
            input_dim = (40, 60)
        else:
            input_dim = (20, 120)
        n_features = 3

        from models.Linear import Linear
        l = Linear(input_dim, n_features)
        losses = l.train(d_train, d_test, num_epochs=40, lr=1e-3)
        l.save_weights('../models/saved_models/' + loc + '/' + d_type +
                       '/Linear/linear')
        tf.keras.backend.clear_session()

        from models.ANN import ANN
        l = ANN(input_dim, n_features, location=loc)
        losses = l.train(d_train, d_test, num_epochs=40, lr=1e-3)
        l.save_weights('../models/saved_models/' + loc + '/' + d_type +
                       '/Linear/linear')
        tf.keras.backend.clear_session()

        from models.CNN import CNN
        l = CNN(input_dim,
                n_features,