for epoch in tqdm(range(model_config.epochs), desc='epochs'): tr_loss = 0 tr_acc = 0 model.train() for step, mb in tqdm(enumerate(tr_dl), desc='steps', total=len(tr_dl)): x_mb, y_mb = map(lambda elm: elm.to(device), mb) opt.zero_grad() y_hat_mb = model(x_mb) mb_loss = loss_fn(y_hat_mb, y_mb) mb_loss.backward() opt.step() with torch.no_grad(): mb_acc = acc(y_hat_mb, y_mb) tr_loss += mb_loss.item() tr_acc += mb_acc.item() if (epoch * len(tr_dl) + step) % model_config.summary_step == 0: val_loss = evaluate(model, val_dl, {'loss': loss_fn}, device)['loss'] writer.add_scalars('loss', {'train': tr_loss / (step + 1), 'val': val_loss}, epoch * len(tr_dl) + step) tqdm.write('global_step: {:3}, tr_loss: {:.3f}, val_loss: {:.3f}'.format(epoch * len(tr_dl) + step, tr_loss / (step + 1), val_loss)) model.train() else: tr_loss /= (step + 1) tr_acc /= (step + 1)
def main(args): dataset_config = Config(args.dataset_config) model_config = Config(args.model_config) exp_dir = Path("experiments") / model_config.type exp_dir = exp_dir.joinpath( f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}" ) if not exp_dir.exists(): exp_dir.mkdir(parents=True) if args.fix_seed: torch.manual_seed(777) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False tokenizer = get_tokenizer(dataset_config) tr_dl, val_dl = get_data_loaders(dataset_config, model_config, tokenizer, args.batch_size) # model model = ConvRec(num_classes=model_config.num_classes, embedding_dim=model_config.embedding_dim, hidden_dim=model_config.hidden_dim, vocab=tokenizer.vocab) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(params=model.parameters(), lr=args.learning_rate) scheduler = ReduceLROnPlateau(opt, patience=5) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model.to(device) writer = SummaryWriter('{}/runs'.format(exp_dir)) checkpoint_manager = CheckpointManager(exp_dir) summary_manager = SummaryManager(exp_dir) best_val_loss = 1e+10 for epoch in tqdm(range(args.epochs), desc='epochs'): tr_loss = 0 tr_acc = 0 model.train() for step, mb in tqdm(enumerate(tr_dl), desc='steps', total=len(tr_dl)): x_mb, y_mb = map(lambda elm: elm.to(device), mb) opt.zero_grad() y_hat_mb = model(x_mb) mb_loss = loss_fn(y_hat_mb, y_mb) mb_loss.backward() opt.step() with torch.no_grad(): mb_acc = acc(y_hat_mb, y_mb) tr_loss += mb_loss.item() tr_acc += mb_acc.item() if (epoch * len(tr_dl) + step) % args.summary_step == 0: val_loss = evaluate(model, val_dl, {'loss': loss_fn}, device)['loss'] writer.add_scalars('loss', { 'train': tr_loss / (step + 1), 'val': val_loss }, epoch * len(tr_dl) + step) model.train() else: tr_loss /= (step + 1) tr_acc /= (step + 1) tr_summary = {'loss': tr_loss, 'acc': tr_acc} val_summary = evaluate(model, val_dl, { 'loss': loss_fn, 'acc': acc }, device) scheduler.step(val_summary['loss']) tqdm.write('epoch : {}, tr_loss: {:.3f}, val_loss: ' '{:.3f}, tr_acc: {:.2%}, val_acc: {:.2%}'.format( epoch + 1, tr_summary['loss'], val_summary['loss'], tr_summary['acc'], val_summary['acc'])) val_loss = val_summary['loss'] is_best = val_loss < best_val_loss if is_best: state = { 'epoch': epoch + 1, 'model_state_dict': model.state_dict(), 'opt_state_dict': opt.state_dict() } summary = {'train': tr_summary, 'validation': val_summary} summary_manager.update(summary) summary_manager.save('summary.json') checkpoint_manager.save_checkpoint(state, 'best.tar') best_val_loss = val_loss
def main(args): dataset_config = Config(args.dataset_config) model_config = Config(args.model_config) exp_dir = Path("experiments") / model_config.type exp_dir = exp_dir.joinpath( f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}" ) if not exp_dir.exists(): exp_dir.mkdir(parents=True) if args.fix_seed: torch.manual_seed(777) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False tokenizer = get_tokenizer(dataset_config, model_config) tr_dl, val_dl = get_data_loaders(dataset_config, tokenizer, args.batch_size) model = SenCNN(num_classes=model_config.num_classes, vocab=tokenizer.vocab) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(params=model.parameters(), lr=args.learning_rate) scheduler = ReduceLROnPlateau(opt, patience=5) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model.to(device) writer = SummaryWriter(f"{exp_dir}/runs") checkpoint_manager = CheckpointManager(exp_dir) summary_manager = SummaryManager(exp_dir) best_val_loss = 1e10 for epoch in tqdm(range(args.epochs), desc="epochs"): tr_loss = 0 tr_acc = 0 model.train() for step, mb in tqdm(enumerate(tr_dl), desc="steps", total=len(tr_dl)): x_mb, y_mb = map(lambda elm: elm.to(device), mb) opt.zero_grad() y_hat_mb = model(x_mb) mb_loss = loss_fn(y_hat_mb, y_mb) mb_loss.backward() clip_grad_norm_(model._fc.weight, 5) opt.step() with torch.no_grad(): mb_acc = acc(y_hat_mb, y_mb) tr_loss += mb_loss.item() tr_acc += mb_acc.item() if (epoch * len(tr_dl) + step) % args.summary_step == 0: val_loss = evaluate(model, val_dl, {"loss": loss_fn}, device)["loss"] writer.add_scalars("loss", {"train": tr_loss / (step + 1), "validation": val_loss}, epoch * len(tr_dl) + step) model.train() else: tr_loss /= step + 1 tr_acc /= step + 1 tr_summary = {"loss": tr_loss, "acc": tr_acc} val_summary = evaluate(model, val_dl, {"loss": loss_fn, "acc": acc}, device) scheduler.step(val_summary["loss"]) tqdm.write(f"epoch: {epoch+1}\n" f"tr_loss: {tr_summary['loss']:.3f}, val_loss: {val_summary['loss']:.3f}\n" f"tr_acc: {tr_summary['acc']:.2%}, val_acc: {val_summary['acc']:.2%}") val_loss = val_summary["loss"] is_best = val_loss < best_val_loss if is_best: state = { "epoch": epoch + 1, "model_state_dict": model.state_dict(), "opt_state_dict": opt.state_dict(), } summary = { "epoch": epoch + 1, "train": tr_summary, "validation": val_summary, } summary_manager.update(summary) summary_manager.save("summary.json") checkpoint_manager.save_checkpoint(state, "best.tar") best_val_loss = val_loss
tr_acc = 0 model.train() for step, mb in tqdm(enumerate(tr_dl), desc='steps', total=len(tr_dl)): x_mb, y_mb = map(lambda elm: elm.to(device), mb) opt.zero_grad() score, attn_mat = model(x_mb) reg = regularize(attn_mat, model_config.r, device) mb_loss = loss_fn(score, y_mb) mb_loss.add_(reg) mb_loss.backward() opt.step() with torch.no_grad(): mb_acc = acc(score, y_mb) tr_loss += mb_loss.item() tr_acc += mb_acc.item() if (epoch * len(tr_dl) + step) % model_config.summary_step == 0: val_loss = evaluate(model, val_dl, {'loss': loss_fn}, device)['loss'] writer.add_scalars('loss', { 'train': tr_loss / (step + 1), 'val': val_loss }, epoch * len(tr_dl) + step) model.train() else: tr_loss /= (step + 1)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--epoch', default=10, type=int) parser.add_argument('--batch_size', default=128, type=int) # parser.add_argument('--data_type', default='senCNN') parser.add_argument('--classes', default=2, type=int) parser.add_argument('--gpu', default=0, type=int) parser.add_argument('--learning_rate', default=1e-3, type=float) # parser.add_argument('--print_freq', default=3000, type=int) # parser.add_argument('--weight_decay', default=5e-5, type=float) parser.add_argument('--word_dim', default=16, type=int) parser.add_argument('--word_max_len', default=300, type=int) parser.add_argument('--global_step', default=1000, type=int) parser.add_argument('--data_path', default='../data_in') parser.add_argument('--file_path', default='../nsmc-master') # parser.add_argument('--build_preprocessing', default=False) # parser.add_argument('--build_vocab', default=False) args = parser.parse_args() # p = Preprocessing(args) # p.makeProcessing() # v = Build_Vocab(args) # v.make_vocab() with open(args.data_path + '/' + 'vocab_char.pkl', mode='rb') as io: vocab = pickle.load(io) padder = PadSequence(length=args.word_max_len, pad_val=vocab.to_indices(vocab.padding_token)) tokenizer = Tokenizer(vocab=vocab, split_fn=split_to_jamo, pad_fn=padder) model = EfficientCharCRNN(args, vocab) epochs = args.epoch batch_size = args.batch_size learning_rate = args.learning_rate global_step = args.global_step tr_ds = Corpus(args.data_path + '/train.txt', tokenizer.split_and_transform) tr_dl = DataLoader(tr_ds, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True) val_ds = Corpus(args.data_path + '/val.txt', tokenizer.split_and_transform) val_dl = DataLoader(val_ds, batch_size=batch_size, num_workers=4) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(params=model.parameters(), lr=learning_rate) scheduler = ReduceLROnPlateau(opt, patience=5) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model.to(device) best_val_loss = 1e+10 for epoch in tqdm(range(args.epoch), desc='epochs'): tr_loss = 0 tr_acc = 0 model.train() for step, mb in tqdm(enumerate(tr_dl), desc='steps', total=len(tr_dl)): x, y = map(lambda elm: elm.to(device), mb) opt.zero_grad() y_h = model(x) m_loss = loss_fn(y_h, y) m_loss.backward() clip_grad_norm_(model._fc.weight, 5) opt.step() with torch.no_grad(): m_acc = acc(y_h, y) tr_loss += m_loss.item() tr_acc += m_acc.item() else: tr_loss /= (step + 1) tr_acc /= (step + 1) tr_summ = {'loss': tr_loss, 'acc': tr_acc} val_summ = evaluate(model, val_dl, { 'loss': loss_fn, 'acc': acc }, device) scheduler.step(val_summ['loss']) tqdm.write('epoch : {}, tr_loss: {:.3f}, val_loss: ' '{:.3f}, tr_acc: {:.2%}, val_acc: {:.2%}'.format( epoch + 1, tr_summ['loss'], val_summ['loss'], tr_summ['acc'], val_summ['acc'])) val_loss = val_summ['loss'] is_best = val_loss < best_val_loss if is_best: state = { 'epoch': epoch + 1, 'model_state_dict': model.state_dict(), 'opt_state_dict': opt.state_dict() } summary = {'tr': tr_summ, 'val': val_summ} # manager.update_summary(summary) # manager.save_summary('summary.json') # manager.save_checkpoint(state, 'best.tar') best_val_loss = val_loss
def main(args): dataset_config = Config(args.dataset_config) model_config = Config(args.model_config) exp_dir = Path("experiments") / model_config.type exp_dir = exp_dir.joinpath( f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}" ) if not exp_dir.exists(): exp_dir.mkdir(parents=True) if args.fix_seed: torch.manual_seed(777) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False preprocessor = get_preprocessor(dataset_config, coarse_split_fn=split_morphs, fine_split_fn=split_jamos) tr_dl, val_dl = get_data_loaders(dataset_config, preprocessor, args.batch_size, collate_fn=batchify) # model model = SAN(model_config.num_classes, preprocessor.coarse_vocab, preprocessor.fine_vocab, model_config.fine_embedding_dim, model_config.hidden_dim, model_config.multi_step, model_config.prediction_drop_ratio) opt = optim.Adam(model.parameters(), lr=args.learning_rate) device = torch.device( "cuda") if torch.cuda.is_available() else torch.device("cpu") model.to(device) writer = SummaryWriter(f"{exp_dir}/runs") checkpoint_manager = CheckpointManager(exp_dir) summary_manager = SummaryManager(exp_dir) best_val_loss = 1e10 for epoch in tqdm(range(args.epochs), desc="epochs"): tr_loss = 0 tr_acc = 0 model.train() for step, mb in tqdm(enumerate(tr_dl), desc="steps", total=len(tr_dl)): qa_mb, qb_mb, y_mb = map( lambda elm: (el.to(device) for el in elm) if isinstance(elm, tuple) else elm.to(device), mb) opt.zero_grad() y_hat_mb = model((qa_mb, qb_mb)) mb_loss = log_loss(y_hat_mb, y_mb) mb_loss.backward() opt.step() with torch.no_grad(): mb_acc = acc(y_hat_mb, y_mb) tr_loss += mb_loss.item() tr_acc += mb_acc.item() if (epoch * len(tr_dl) + step) % args.summary_step == 0: val_loss = evaluate(model, val_dl, {"loss": log_loss}, device)["loss"] writer.add_scalars("loss", { "train": tr_loss / (step + 1), "val": val_loss }, epoch * len(tr_dl) + step) model.train() else: tr_loss /= step + 1 tr_acc /= step + 1 tr_summary = {"loss": tr_loss, "acc": tr_acc} val_summary = evaluate(model, val_dl, { "loss": log_loss, "acc": acc }, device) tqdm.write( f"epoch: {epoch+1}\n" f"tr_loss: {tr_summary['loss']:.3f}, val_loss: {val_summary['loss']:.3f}\n" f"tr_acc: {tr_summary['acc']:.2%}, val_acc: {val_summary['acc']:.2%}" ) val_loss = val_summary["loss"] is_best = val_loss < best_val_loss if is_best: state = { "epoch": epoch + 1, "model_state_dict": model.state_dict(), "opt_state_dict": opt.state_dict(), } summary = {"train": tr_summary, "validation": val_summary} summary_manager.update(summary) summary_manager.save("summary.json") checkpoint_manager.save_checkpoint(state, "best.tar") best_val_loss = val_loss
def main(args): dataset_config = Config(args.dataset_config) model_config = Config(args.model_config) ptr_config_info = Config(f"conf/pretrained/{model_config.type}.json") exp_dir = Path("experiments") / model_config.type exp_dir = exp_dir.joinpath( f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}" f"_weight_decay_{args.weight_decay}") if not exp_dir.exists(): exp_dir.mkdir(parents=True) if args.fix_seed: torch.manual_seed(777) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False preprocessor = get_preprocessor(ptr_config_info, model_config) with open(ptr_config_info.config, mode="r") as io: ptr_config = json.load(io) # model config = BertConfig() config.update(ptr_config) model = PairwiseClassifier(config, num_classes=model_config.num_classes, vocab=preprocessor.vocab) bert_pretrained = torch.load(ptr_config_info.bert) model.load_state_dict(bert_pretrained, strict=False) tr_dl, val_dl = get_data_loaders(dataset_config, preprocessor, args.batch_size) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam([ { "params": model.bert.parameters(), "lr": args.learning_rate / 100 }, { "params": model.classifier.parameters(), "lr": args.learning_rate }, ], weight_decay=args.weight_decay) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model.to(device) writer = SummaryWriter(f'{exp_dir}/runs') checkpoint_manager = CheckpointManager(exp_dir) summary_manager = SummaryManager(exp_dir) best_val_loss = 1e+10 for epoch in tqdm(range(args.epochs), desc='epochs'): tr_loss = 0 tr_acc = 0 model.train() for step, mb in tqdm(enumerate(tr_dl), desc='steps', total=len(tr_dl)): x_mb, x_types_mb, y_mb = map(lambda elm: elm.to(device), mb) opt.zero_grad() y_hat_mb = model(x_mb, x_types_mb) mb_loss = loss_fn(y_hat_mb, y_mb) mb_loss.backward() opt.step() with torch.no_grad(): mb_acc = acc(y_hat_mb, y_mb) tr_loss += mb_loss.item() tr_acc += mb_acc.item() if (epoch * len(tr_dl) + step) % args.summary_step == 0: val_loss = evaluate(model, val_dl, {'loss': loss_fn}, device)['loss'] writer.add_scalars('loss', { 'train': tr_loss / (step + 1), 'val': val_loss }, epoch * len(tr_dl) + step) model.train() else: tr_loss /= (step + 1) tr_acc /= (step + 1) tr_summary = {'loss': tr_loss, 'acc': tr_acc} val_summary = evaluate(model, val_dl, { 'loss': loss_fn, 'acc': acc }, device) tqdm.write( f"epoch: {epoch+1}\n" f"tr_loss: {tr_summary['loss']:.3f}, val_loss: {val_summary['loss']:.3f}\n" f"tr_acc: {tr_summary['acc']:.2%}, val_acc: {val_summary['acc']:.2%}" ) val_loss = val_summary['loss'] is_best = val_loss < best_val_loss if is_best: state = { 'epoch': epoch + 1, 'model_state_dict': model.state_dict(), 'opt_state_dict': opt.state_dict() } summary = {'train': tr_summary, 'validation': val_summary} summary_manager.update(summary) summary_manager.save('summary.json') checkpoint_manager.save_checkpoint(state, 'best.tar') best_val_loss = val_loss