print("Embedding match number {} out of {}".format(match_embedding, len(TEXT.vocab))) train_iter = data.Iterator(train, batch_size=args.batch_size, device=args.gpu, train=True, repeat=False, sort=False, shuffle=True) dev_iter = data.Iterator(dev, batch_size=args.batch_size, device=args.gpu, train=False, repeat=False, sort=False, shuffle=False) test_iter = data.Iterator(test, batch_size=args.batch_size, device=args.gpu, train=False, repeat=False, sort=False, shuffle=False) config = args config.words_num = len(TEXT.vocab) if args.dataset == 'EntityDetection': config.label = len(ED.vocab) model = EntityDetection(config) else: print("Error Dataset") exit() model.embed.weight.data.copy_(TEXT.vocab.vectors) if args.cuda: model.cuda() print("Shift model to GPU") print(config) print("VOCAB num",len(TEXT.vocab)) print("Train instance", len(train)) print("Dev instance", len(dev)) print("Test instance", len(test)) print("Entity Type", len(ED.vocab))
sort_within_batch=False) test_iter = data.Iterator(test, batch_size=args.batch_size, device="cuda", train=False, repeat=False, sort=False, shuffle=False, sort_within_batch=False) config = args config.words_num = len(TEXT.vocab) if args.dataset == 'EntityDetection': config.label = len(ED.vocab) model = EntityDetection(config) else: print("Error Dataset") exit() model.embed.weight.data.copy_(TEXT.vocab.vectors) if args.cuda: model = model.to(torch.device("cuda")) print("Shift model to GPU") print(config) print("VOCAB num", len(TEXT.vocab)) print("Train instance", len(train)) print("Dev instance", len(dev)) print("Test instance", len(test)) print("Entity Type", len(ED.vocab))
device=device, train=False, repeat=False, sort=False, shuffle=False, sort_within_batch=False) config = args config.words_num = len(TEXT.vocab) if args.dataset == 'EntityDetection': config.label = len(ED.vocab) if config.entity_detection_mode.lower() == 'transformer': model = TransformerModel(config) else: model = EntityDetection(config) else: print("Error Dataset") exit() model.embed.weight.data.copy_(TEXT.vocab.vectors) if args.cuda: model = model.to(torch.device("cuda:{}".format(args.gpu))) print("Shift model to GPU") print(config) print("VOCAB num", len(TEXT.vocab)) print("Train instance", len(train)) print("Dev instance", len(dev)) print("Test instance", len(test)) print("Entity Type", len(ED.vocab))
print("Embedding match number {} out of {}".format(match_embedding, len(TEXT.vocab))) train_iter = data.Iterator(train, batch_size=args.batch_size, device=args.gpu, train=True, repeat=False, sort=False, shuffle=True, sort_within_batch=False) dev_iter = data.Iterator(dev, batch_size=args.batch_size, device=args.gpu, train=False, repeat=False, sort=False, shuffle=False, sort_within_batch=False) test_iter = data.Iterator(test, batch_size=args.batch_size, device=args.gpu, train=False, repeat=False, sort=False, shuffle=False, sort_within_batch=False) config = args config.words_num = len(TEXT.vocab) if args.dataset == 'EntityDetection': config.label = len(ED.vocab) model = EntityDetection(config) else: print("Error Dataset") exit() model.embed.weight.data.copy_(TEXT.vocab.vectors) if args.cuda: print(args.gpu) modle = model.to(torch.device("cuda:{}".format(args.gpu))) print("Shift model to GPU") print(config) print("VOCAB num",len(TEXT.vocab)) print("Train instance", len(train)) print("Dev instance", len(dev)) print("Test instance", len(test))