コード例 #1
0
    # Label the plot.
    plt.title("Learning curve")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.legend()

    plt.show()


MAX_LEN = 40
bs = 8

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()

print(torch.cuda.get_device_name(0))
config = BertConfig.from_json_file(
    './config/uncased_L-24_H-128_B-512_A-4_F-4_OPT.json')
#tokenizer =  BertTokenizer.from_pretrained('./config/uncased_L-24_H-128_B-512_A-4_F-4_OPT.json', do_lower_case=True)
model = BertForTokenClassification(config)
model.cuda()
#BERT_FP = './config/uncased_L-24_H-1024_B-512_A-4.json'
#tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
#tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
#print(tokenized_texts[0])

#BERT_FP = './config/uncased_L-24_H-1024_B-512_A-4.json'
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
                                          do_lower_case=True)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print(tokenized_texts[0])
コード例 #2
0
ファイル: evaluate.py プロジェクト: jaiswaldj/Rapidken
    # Load data
    test_data = data_loader.load_data('test')

    # Specify the test set size
    params.test_size = test_data['size']
    params.eval_steps = params.test_size // params.batch_size
    test_data_iterator = data_loader.data_iterator(test_data, shuffle=False)

    logging.info("- done.")

    # Define the model
    #config_path = os.path.join(args.bert_model_dir, 'bert_config.json')
    #config = BertConfig.from_json_file(config_path)
    model = BertForTokenClassification("bert-base-uncased",
                                       num_labels=len(params.tag2idx))
    model = model.cuda()
    model.to(params.device)
    # Reload weights from the saved file
    utils.load_checkpoint(
        os.path.join(args.model_dir, args.restore_file + '.pth.tar'), model)
    if args.fp16:
        model.half()
    if params.n_gpu > 1 and args.multi_gpu:
        model = torch.nn.DataParallel(model)

    logging.info("Starting evaluation...")
    test_metrics = evaluate(model,
                            test_data_iterator,
                            params,
                            mark='Test',
                            verbose=True)