Exemplo n.º 1
0
    def __init__(self, n_hidden=256, n_layers=4, drop_prob=.3, lr=.001):
        super().__init__()
        self.drop_prob = drop_prob
        self.n_layers = n_layers
        self.n_hidden = n_hidden
        self.lr = lr

        ## define the Embedding
        emb = nn.Embedding(vocab_size, 200)
        self.emb_layer = sol.optimize(emb, sol.input([0, 5], dtype=torch.long), batch_size=32)
        # CHANGE: Don't forget to load the state dict! In future version this will be done automatically.
        self.emb_layer.load_state_dict(emb.state_dict())
        
        ## define the LSTM
        self.lstm = nn.LSTM(200, n_hidden, n_layers, dropout=drop_prob, batch_first=True)
        
        ## define the Classifier
        classifier        = WordClassifier(n_hidden, vocab_size, drop_prob)
        #sol.config["compiler::name"] = "Classifier"
        #sol.config["compiler::debug"] = True
        #sol.config["compiler::debug_params"] = True
        self.classifier    = sol.optimize(classifier, sol.input([0, 5, n_hidden], dtype=torch.float32, requires_grad=True), batch_size=32)
        # CHANGE: Don't forget to load the state dict! In future version this will be done automatically.
        self.classifier.load_state_dict(classifier.state_dict())
Exemplo n.º 2
0
    war_and_peace = (line.rstrip() for line in book)
    war_and_peace = (line for line in war_and_peace if line)
    war_and_peace = list(war_and_peace)[:n_lines]

tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
inputs = tokenizer(war_and_peace,
                   return_tensors="pt",
                   padding=True,
                   truncation=True)
model = BertModel.from_pretrained('bert-base-uncased')

input_ids = inputs["input_ids"]
vec_len = input_ids.size()[1]
#sol.cache.clear()
opt = sol.optimize(
    model, sol.input([n_lines, vec_len], dtype=torch.long)
)  #, sol.input([0, 8], dtype=torch.long), sol.input([0, 8], dtype=torch.long), batch_size=1)
opt.load_state_dict(model.state_dict(), strict=False)
opt.to(device)

#help(model.forward)

#attention_mask = inputs["attention_mask"]
#token_type_ids = inputs["token_type_ids"]
print(input_ids.size())
#print(attention_mask.size())
#print(token_type_ids.size())
#print(inputs)
#inputs = inputs.to(device)
start_time = time.time()
opt.eval()
Exemplo n.º 3
0
#Class labels
classes = ('Airplane', 'Car', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse',
           'Ship', 'Truck')

sol.cache.clear()

device = torch.device("hip:0")

py_model = models.__dict__["vgg11"]()
py_model.classifier[3] = nn.Linear(4096, 1024)
py_model.classifier[6] = nn.Linear(1024, 10)
#print(py_model)
criterion = nn.CrossEntropyLoss()

#model = TrainingModel(py_model)
opt = sol.optimize(py_model, sol.input([0, 3, 224, 224]), batch_size=40)
opt.load_state_dict(py_model.state_dict(), strict=False)
opt.to(device)

optimizer = optim.SGD(opt.parameters(), lr=.01, momentum=.9)

start_time = time.time()
print("Training starts")

opt.train()
for epoch in range(2):  # loop over the dataset multiple times
    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data  #data[0].to(device), data[1].to(device)
Exemplo n.º 4
0
import torch
import sol.pytorch as sol
tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer',
                           'bert-base-cased')

text_1 = "Who was Jim Henson?"
text_2 = "Jim Henson was a puppeteer"

# Tokenized input with special tokens around it (for BERT: [CLS] at the beginning and [SEP] at the end)
indexed_tokens = tokenizer.encode(text_1, text_2, add_special_tokens=True)

# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]

# Convert inputs to PyTorch tensors
segments_tensors = torch.tensor([segments_ids])
tokens_tensor = torch.tensor([indexed_tokens])

model = torch.hub.load('huggingface/pytorch-transformers', 'model',
                       'bert-base-cased')

#print(model)

opt = sol.optimize(model, sol.input([28996, 768], dtype=torch.float32))

with torch.no_grad():
    encoded_layers, _ = model(tokens_tensor, token_type_ids=segments_tensors)
train_dataset = TensorDataset(train_tokens_tensor, train_y_tensor)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset,
                              sampler=train_sampler,
                              batch_size=BATCH_SIZE)

test_dataset = TensorDataset(test_tokens_tensor, test_y_tensor)
test_sampler = SequentialSampler(test_dataset)
test_dataloader = DataLoader(test_dataset,
                             sampler=test_sampler,
                             batch_size=BATCH_SIZE)

bert_clf = BertBinaryClassifier()
print(test_dataset.tensors[0].size())
opt = sol.optimize(bert_clf,
                   sol.input([0, 512], dtype=torch.long),
                   batch_size=BATCH_SIZE)
opt.load_state_dict(bert_clf.state_dict(), strict=False)
opt.to(device)
#opt.convert(sol.device.ve)
optimizer = torch.optim.Adam(opt.parameters(), lr=3e-6)
#exit(0)

#opt.train()
#start_time = time.time()
#for epoch_num in range(EPOCHS):
#    for step_num, batch_data in enumerate(train_dataloader):
#        token_ids, labels = tuple(t for t in batch_data)
#        token_ids = token_ids.to(device)
#        probas = opt(token_ids)
#        probas = probas.cpu()
Exemplo n.º 6
0
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)

        return x

net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=.001, momentum=.9)


# input_dummy = torch.rand(50000, 3, 32, 32) # no need to explicitly allocate data for this
opt=sol.optimize(net, sol.input([0, 3, 32, 32], batch_size=32)) # 0 == wildcard if batchsize is unknown when sol.optimize gets called, or if it varies during training
opt.load_state_dict(net.state_dict(), strict=False)
opt.to(device) # copy to VE

start_time = time.time()

opt.train()
for epoch in range(2):  # loop over the dataset multiple times
    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data #data[0].to(device), data[1].to(device)

        # zero the parameter gradients
        optimizer.zero_grad()