예제 #1
0
#############
### MODEL ###
#############

from torch import nn, optim
from cnn import ConvNet

num_epochs = 10
learning_rate = 0.001

net = ConvNet()
net.to(device)

criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=learning_rate, betas=(0.5, 0.999))

################
### TRAINING ###
################

import matplotlib.pyplot as plt

n_batches = len(loader['train'])
for epoch in range(num_epochs):
    for i, sample in enumerate(loader['train']):

        sample['image'] = sample['image'].to(device)
        sample['map'] = sample['map'].to(device)

        net.zero_grad()
예제 #2
0
test_data = FontDataset(test_dir)

### Define Dataloader
train_loader = torch.utils.data.DataLoader(dataset=train_data,
                                           batch_size=batch_size)

test_loader = torch.utils.data.DataLoader(dataset=test_data,
                                           batch_size=batch_size)

### Define Model and Load Params
model = ConvNet().to(device)
print("========================== Original Model =============================", "\n", model)
model.load_state_dict(torch.load('./pths/cifar10_pre_model.pth', map_location=device))

### User pre-trained model and Only change last layer
for param in model.parameters():
    param.requires_grad = False

model.fc2 = nn.Linear(120, 50)
modle = model.to(device)

print("========================== Modified Model =============================", "\n", model)

### Define Loss and Optim
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

### Train
if __name__ == '__main__':
    total_step = len(train_loader)
    for epoch in range(num_epochs):
예제 #3
0
# Create your dataset and dataloader
test_dataset = utils.TensorDataset(test_data, test_labels)
test_loader = utils.DataLoader(test_dataset, shuffle=True)

model_file = Path(os.path.join(SAVED_NETWORK_PATH, network_name))
if model_file.is_file():
    model = torch.load(model_file)
    model.eval()
else:
    # Create your network
    model = ConvNet(num_classes).to(device)

    # Loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

    # Train the model
    total_step = len(train_loader)
    for epoch in range(num_epochs):
        for i, (t_i, t_l) in enumerate(train_loader):
            t_i = t_i.to(device=device)
            t_l = t_l.to(device=device)

            optimizer.zero_grad()

            # Forward pass
            outputs = model(t_i)
            loss = F.nll_loss(outputs, t_l)

            # Backward and optimize