Ejemplo n.º 1
0
    train_file=train_file,
    val_file=val_file,
)

# In[ ]:

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# In[ ]:

T = TypeVar("T")
TensorDict = Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]

# In[ ]:

processor = BertPreprocessor(config.model_type, config.max_seq_len)

# In[ ]:

DATA_ROOT = Path("../data")
MODEL_SAVE_DIR = Path("../weights")

# Read the model in here

# In[ ]:

from pytorch_pretrained_bert import BertConfig, BertForMaskedLM
masked_lm = BertForMaskedLM.from_pretrained(config.model_type)
masked_lm.eval()

# In[ ]:
sys.path.append("../lib")

# In[4]:

from bert_utils import Config, BertPreprocessor

# In[5]:

config = Config(
    model_type="bert-base-uncased",
    max_seq_len=128,
)

# In[6]:

processor = BertPreprocessor(config.model_type, config.max_seq_len)

# In[ ]:

# In[7]:

from pytorch_pretrained_bert import BertConfig, BertForMaskedLM
model = BertForMaskedLM.from_pretrained(config.model_type)
model.eval()  # Important! Disable dropout

# In[8]:


def get_logits(sentence: str) -> np.ndarray:
    return model(processor.to_bert_model_input(sentence))[
        0, :, :].cpu().detach().numpy()
sys.path.append("../lib")

# In[4]:

from bert_utils import Config, BertPreprocessor

# In[5]:

config = Config(
    model_type="bert-base-uncased",
    max_seq_len=128,
)

# In[6]:

processor = BertPreprocessor(config.model_type, config.max_seq_len)

# ### Prepare model

# In[7]:

from pytorch_pretrained_bert import BertConfig, BertForMaskedLM
model = BertForMaskedLM.from_pretrained(config.model_type)
model.eval()

# In[8]:

sequence_output, pooled_output = model.bert(
    processor.to_bert_model_input("hello world"),
    output_all_encoded_layers=False)