def import_models(dataset): models = {} for f in glob.glob('checkpoints/cnn_{}_*'.format(dataset)): fname = os.path.split(f)[1] embedding_dims = 300 embedding_type = get_embedding_type(fname) X_train, y_train = load('{}_train'.format(dataset)) vocab = load('{}_vocab'.format(dataset)).vocab model = TextCNN(dataset=dataset, input_size=X_train.shape[1], vocab_size=len(vocab) + 1, embedding_dims=embedding_dims, embedding_type=embedding_type) model.load_state_dict(torch.load(f)) model.eval() models[fname] = model return models
def train(name, dataset, epochs, batch_size, learning_rate, regularization, embedding_dims, embedding_type): dirname, _ = os.path.split(os.path.abspath(__file__)) run_uid = datetime.datetime.today().strftime('%Y-%m-%dT%H:%M:%S') logger = StatsLogger(dirname, 'stats', name, run_uid) print('Loading data') X_train, y_train = load('{}_train'.format(dataset)) X_valid, y_valid = load('{}_valid'.format(dataset)) vocab = load('{}_vocab'.format(dataset)).vocab X_train = torch.as_tensor(X_train, dtype=torch.long) y_train = torch.as_tensor(y_train, dtype=torch.float) X_valid = torch.as_tensor(X_valid, dtype=torch.long) y_valid = torch.as_tensor(y_valid, dtype=torch.float) prev_acc = 0 model = TextCNN(dataset=dataset, input_size=X_train.size()[1], vocab_size=len(vocab) + 1, embedding_dims=embedding_dims, embedding_type=embedding_type) print(model) print('Parameters: {}'.format(sum([p.numel() for p in \ model.parameters() if p.requires_grad]))) print('Training samples: {}'.format(len(X_train))) if torch.cuda.is_available(): X_train = X_train.cuda() y_train = y_train.cuda() X_valid = X_valid.cuda() y_valid = y_valid.cuda() model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=regularization) criterion = nn.BCEWithLogitsLoss() print('Starting training') for epoch in range(epochs): epoch_loss = [] epoch_acc = [] iters = 0 total_iters = num_batches(len(X_train), batch_size) for i, batch in enumerate(minibatch_iter(len(X_train), batch_size)): model.train() X_train_batch = X_train[batch] y_train_batch = y_train[batch] if torch.cuda.is_available(): X_train_batch = X_train_batch.cuda() y_train_batch = y_train_batch.cuda() optimizer.zero_grad() output = model(X_train_batch) train_loss = criterion(output, y_train_batch) train_acc = accuracy(output, y_train_batch) epoch_loss.append(train_loss.item()) epoch_acc.append(train_acc.item()) train_loss.backward() optimizer.step() model.eval() train_loss, train_acc = np.mean(epoch_loss), np.mean(epoch_acc) valid_loss, valid_acc, _ = compute_dataset_stats( X_valid, y_valid, model, nn.BCEWithLogitsLoss(), 256) stats = [epoch + 1, train_loss, train_acc, valid_loss, valid_acc] epoch_string = '* Epoch {}: t_loss={:.3f}, t_acc={:.3f}, ' + \ 'v_loss={:.3f}, v_acc={:.3f}' print(epoch_string.format(*stats)) logger.write(stats) # checkpoint model if prev_acc < valid_acc: prev_acc = valid_acc model_path = os.path.join(dirname, 'checkpoints', name) torch.save(model.state_dict(), model_path) logger.close()
def cv_score(dataset, embedding_type, epochs, batch_size=32, learning_rate=1e-4, regularization=0): kf = KFold(10) X, y = load('{}_train'.format(dataset)) vocab = load('{}_vocab'.format(dataset)).vocab cv_acc = [] cv_std = [] for ci, (train_index, test_index) in enumerate(kf.split(X)): X_train, y_train = X[train_index], y[train_index] X_test, y_test = X[test_index], y[test_index] X_train = torch.as_tensor(X_train, dtype=torch.long).cuda() y_train = torch.as_tensor(y_train, dtype=torch.float).cuda() X_test = torch.as_tensor(X_test, dtype=torch.long).cuda() y_test = torch.as_tensor(y_test, dtype=torch.float).cuda() model = TextCNN(dataset=dataset, input_size=X_train.shape[1], vocab_size=len(vocab) + 1, embedding_dims=300, embedding_type=embedding_type).cuda() optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=regularization) criterion = nn.BCEWithLogitsLoss() model.train() for epoch in range(epochs): for i, batch in enumerate(minibatch_iter(len(X_train), batch_size)): X_train_batch = X_train[batch].cuda() y_train_batch = y_train[batch].cuda() optimizer.zero_grad() output = model(X_train_batch) train_loss = criterion(output, y_train_batch) train_loss.backward() optimizer.step() model.eval() _, test_acc, test_std = compute_dataset_stats(X_test, y_test, model, nn.BCEWithLogitsLoss(), 256) cv_acc.append(test_acc) cv_std.append(test_std) print(' [{}] acc={}, std={}'.format(ci + 1, test_acc, test_std)) print('{} - {}'.format(dataset, embedding_type)) print('Mean acc - {}'.format(np.mean(cv_acc))) print('Min acc - {}'.format(np.min(cv_acc))) print('Max acc - {}'.format(np.max(cv_acc))) print('Mean std - {}'.format(np.mean(cv_std)))
class Classify: def __init__(self, features='word', device='gpu'): self.features = features self.sentence_length = TextCNNConfig.sequence_length self.device = device self.__device() self.load_vocab() self.__load_model() def __device(self): if torch.cuda.is_available() and self.device=='gpu': self.device = torch.device('cuda') else: self.device = 'cpu' def __load_model(self): self.model = TextCNN(TextCNNConfig) self.model.load_state_dict(torch.load("./ckpts/cnn_model.pth")) self.model.to(self.device) self.model.eval() def load_vocab(self): with open('./ckpts/vocab.txt','r',encoding='utf-8') as f: vocab = f.read().strip().split('\n') self.vocab = {k: v for k, v in zip(vocab, range(len(vocab)))} with open('./ckpts/target.txt','r',encoding='utf-8') as f: target = f.read().strip().split('\n') self.target = {v: k for k, v in zip(target, range(len(target)))} def cut_words(self, sentence : str) -> list: if self.features == 'word': return jieba.lcut(sentence) else: return list(sentence) def sentence_cut(self, sentence): """针对一个句子的字符转ID,并截取到固定长度,返回定长的字符代号。""" words = self.cut_words(sentence) if len(words) >= self.sentence_length: sentence_cutted = words[:self.sentence_length] else: sentence_cutted = words + ["<PAD>"] * (self.sentence_length - len(words)) sentence_id = [self.vocab[w] if w in self.vocab else self.vocab["<UNK>"] for w in sentence_cutted] return sentence_id def predict(self, content): """ 传入一个句子,测试单个类别 """ with torch.no_grad(): content_id = [self.sentence_cut(content)] start_time = time.time() content_id = torch.LongTensor(content_id) one_batch_input = content_id.to(self.device) outputs = self.model(one_batch_input) max_value, max_index = torch.max(outputs, axis=1) predict = max_index.cpu().numpy() print(time.time()-start_time) return self.target[predict[0]]