Exemple #1
0
 def train(self, epochs=10):
     Model = NN(batch_size=30)
     opt = optim.Adam(Model.parameters(), lr=0.005)
     criterion = nn.BCELoss()
     softmax = nn.Softmax(dim=0)
     loss = 0
     print(self.labels.shape)
     for i in range(epochs):
         item_loss = 0
         for i, (feat, lab) in enumerate(self.dataloader):
             feat = feat[0][:, :1, :, :]
             output = Model.forward(feat)
             loss = criterion(output, lab.view((30)))
             loss.backward()
             opt.step()
             item_loss += loss.item()
         print("LOSS ---->", item_loss / len(self.dataloader))
     torch.save(Model.state_dict(), "1")
Exemple #2
0
def train(config):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Initialize the device which to run the model on
    device = torch.device(device)

    # Initialize the dataset and data loader (note the +1)
    dataset = IMDBDataset(train_or_test='train', seq_length=config.seq_length)
    data_loader = DataLoader(dataset,
                             config.batch_size,
                             shuffle=True,
                             num_workers=4)

    # Initialize the dataset and data loader (note the +1)
    test_dataset = IMDBDataset(train_or_test='test',
                               seq_length=config.seq_length)
    test_data_loader = DataLoader(test_dataset,
                                  config.batch_size,
                                  shuffle=True,
                                  num_workers=4)

    # Initialize the model that we are going to use

    if not (config.recurrent_dropout_model):
        model = NN(dataset.vocab_size, config.embed_dim, config.hidden_dim,
                   config.output_dim, config.n_layers, config.bidirectional,
                   config.dropout, 0).to(device)
    else:
        model = Model(dataset.vocab_size,
                      output_dim=config.output_dim).to(device)

    if not os.path.exists(f'runs/{config.name}'):
        os.makedirs(f'runs/{config.name}')

    print(config.__dict__)

    with open(f'runs/{config.name}/args.txt', 'w') as f:
        json.dump(config.__dict__, f, indent=2)

    # Setup the loss and optimizer
    criterion = nn.CrossEntropyLoss().to(device)
    # criterion = torch.nn.MSELoss().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
    lowest = 100
    save = []
    epochs = 0

    while epochs < config.train_epochs:
        accuracies = []
        losses = []
        print('Training')
        for step, (batch_inputs, batch_targets) in enumerate(data_loader):

            x = batch_inputs.long().to(device)
            y_target = batch_targets.long().to(device)

            predictions = model(x)

            loss = criterion(predictions, y_target)
            optimizer.zero_grad()
            loss.backward()

            # torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=config.max_norm)
            optimizer.step()
            accuracy = (torch.argmax(predictions,
                                     dim=1) == y_target).cpu().numpy().mean()
            loss = loss.item()

            accuracies.append(accuracy)
            losses.append(loss)

        accuracy = np.array(accuracies).mean()
        loss = np.array(losses).mean()

        # Test on test set
        print('Testing')
        with torch.no_grad():
            test_accuracies = []
            test_losses = []
            for step, (batch_inputs,
                       batch_targets) in enumerate(test_data_loader):

                x = batch_inputs.long().to(device)
                y_target = batch_targets.long().to(device)

                predictions = model(x)

                test_loss = criterion(predictions, y_target)

                test_accuracy = (torch.argmax(
                    predictions, dim=1) == y_target).cpu().numpy().mean()
                test_loss = test_loss.item()

                test_accuracies.append(test_accuracy)
                test_losses.append(test_loss)

        test_accuracy = np.array(test_accuracies).mean()
        test_loss = np.array(test_losses).mean()

        if (test_loss < lowest):
            lowest = test_loss
            torch.save(model.state_dict(), f'runs/{config.name}/model.pt')

        epochs += 1
        print(
            "[{}] Train epochs {:04d}/{:04d}, Train Accuracy = {:.2f}, Train Loss = {:.3f}, Test Accuracy = {:.2f}, Test Loss = {:.3f}"
            .format(datetime.now().strftime("%Y-%m-%d %H:%M"), epochs,
                    config.train_epochs, accuracy, loss, test_accuracy,
                    test_loss))

    print('Done training.')
    return accuracy, lowest, save
Exemple #3
0
from torch.optim import Adam
from trainer import TradeComm
from game import Game
import numpy as np
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--jobnum", default=0)
args = parser.parse_args()

num_items = 15
num_utterances = 15
input_size = 2 * num_items + 2 * num_utterances
hidden_size = 256
num_samples = 10000
epsilon = 1 / 10
policy_weight = 1 / 5
horizon = 2000
write_every = 10
lr = 1e-4
directory = 'results'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

nn = NN(input_size, hidden_size, num_items, num_utterances)
opt = Adam(nn.parameters(), lr=lr)
g = Game(num_items, num_utterances)
agent = Agent(num_items, num_utterances, nn, opt, num_samples, epsilon,
              policy_weight, device)
trainer = TradeComm(g, agent, directory, args.jobnum)
trainer.run(horizon, write_every)
Exemple #4
0
# Hyperparameters
batch_size = 8
learning_rate = 0.001
num_epochs = 1000
hidden_size = 8
output_size = len(tags)
input_size = len(X_train[0])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=2)

model = NN(input_size, hidden_size, output_size).to(device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

start_t = time()
for epoch in range(num_epochs):
    for (words, labels) in train_loader:
        words = words.to(device)
        labels = labels.to(device)

        #forward
        outputs = model(words)
        loss = criterion(outputs, labels)

        #backward and optimizer step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
Exemple #5
0
        if load_weights:
            print("Found State Dict. Loading...")
            with open(r'state/state_dict.pickle', 'rb') as file:
                state_dict = torch.load(r'state/state_dict.pickle')
            nn_model.load_state_dict(state_dict)
        nn_model.to(device)

        learning_rate = 0.0001
        f1_scores = []
        epochs = 1
        for epoch in range(epochs):
            predicted_cumulated, labels_cumulated = np.array([]), np.array([])
            running_loss = 0
            counter = 0
            # optimizer = torch.optim.Adam(conv_nn.parameters(), lr=learning_rate)
            optimizer = torch.optim.Adam(nn_model.parameters(),
                                         lr=learning_rate)
            loss_function = nn.CrossEntropyLoss()

            for i, data in tqdm.tqdm(enumerate(train_loader, 0)):
                inputs, labels = data
                inputs = inputs.to(device)
                labels = labels.to(device)

                optimizer.zero_grad()

                # output = conv_nn(inputs)
                output = nn_model(inputs)
                output.to(device)
                loss = loss_function(output, labels)
Exemple #6
0
        return self.x_train[index], self.y_train[index]

    def __len__(self):
        return len(self.x_train)


dataset = Data()
train_loader = DataLoader(dataset=dataset,
                          batch_size=32,
                          shuffle=True,
                          num_workers=0)

#Declaring network and optimizer
print("Initializing network")
network = NN()
optimizer = optim.Adam(network.parameters(), lr=0.0005)
criterion = nn.BCELoss(reduction='mean')
correct = 0

for epoch in range(5):
    for i, data in enumerate(train_loader, 0):
        # get the inputs
        inputs, labels = data

        # Forward pass: Compute predicted y by passing x to the model
        y_pred = network(inputs)

        # Compute and print loss
        loss = criterion(y_pred, labels.view((-1, 1)))
        print(f'Epoch {epoch + 1} | Batch: {i+1} | Loss: {loss.item():.4f}')