コード例 #1
0
                                               config=config).to(device)
    except BaseException:
        bert_model = torch.load(os.path.join(embedding,
                                             'pytorch_model.bin')).to(device)
    model = LinearModel(len(rel2id), embedding_type, bert_model,
                        freeze_embedding).to(device)

# optimizier
if embedding_type != 'bert':
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if embedding_type == "bert":
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [
                p for n, p in model.named_parameters()
                if not any(nd in n for nd in no_decay)
            ],
            "weight_decay":
            0.0,
        },
        {
            "params": [
                p for n, p in model.named_parameters()
                if any(nd in n for nd in no_decay)
            ],
            "weight_decay":
            0.0
        },
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=1e-8)
コード例 #2
0
ファイル: train.py プロジェクト: pj0616/CODER
else:
    try:
        config = AutoConfig.from_pretrained(embedding)
        bert_model = AutoModel.from_pretrained(embedding, config=config).to(device)
    except BaseException:
        bert_model = torch.load(os.path.join(embedding, 'pytorch_model.bin')).to(device)
    model = LinearModel(len(rel2id), embedding_type, bert_model, freeze_embedding).to(device)

# optimizier
if embedding_type != 'bert':
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if embedding_type == "bert":
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
            "weight_decay": 0.0,
        },
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=1e-8)

    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=int(epoch_num * len(train_dataloader) * 0.1),
                                                num_training_steps=epoch_num * len(train_dataloader))

# Prepare eval function
from sklearn.metrics import accuracy_score, classification_report, f1_score
def eval(m, dataloader):
    y_pred = []
    y_true = []