import torch import torch.nn as nn from torch import optim from models import Model from datasets import data_loader, text_CLS from configs import Config cfg = Config() data_path = 'sources/weibo_senti_100k.csv' data_stop_path = 'sources/hit_stopword' dict_path = 'sources/dict' dataset = text_CLS(dict_path, data_path, data_stop_path) train_dataloader = data_loader(dataset, cfg) cfg.pad_size = dataset.max_len_seq model_text_cls = Model(config=cfg) model_text_cls.to(cfg.device) loss_func = nn.CrossEntropyLoss() optimizer = optim.Adam(model_text_cls.parameters(), lr=cfg.learn_rate) for epoch in range(cfg.num_epochs): for i, batch in enumerate(train_dataloader): label, data = batch data = torch.tensor(data, dtype=torch.int64).to(cfg.device) label = torch.tensor(label, dtype=torch.int64).to(cfg.device) optimizer.zero_grad() pred = model_text_cls.forward(data) loss_val = loss_func(pred, label)
bidirectional=True, batch_first=True, dropout=config.dropout) self.maxpool = nn.MaxPool1d(config.pad_size) self.fc = nn.Linear(config.hidden_size * 2 + config.embed_size, config.num_classes) self.softmax = nn.Softmax(dim=1) def forward(self, x): embed = self.embeding(x) out, _ = self.lstm(embed) out = torch.cat((embed, out), 2) out = F.relu(out) out = out.permute(0, 2, 1) out = self.maxpool(out).reshape(out.size()[0], -1) # print(out.size()) out = self.fc(out) out = self.softmax(out) return out if __name__ == '__main__': from configs import Config cfg = Config() cfg.pad_size = 640 model_textcls = Model(config=cfg) input_tensor = torch.tensor([i for i in range(640)]).reshape([1, 640]) out_tensor = model_textcls.forward(input_tensor) print(out_tensor.size()) print(out_tensor)