Ejemplo n.º 1
0
                                           batch_size=batch_size)

test_loader = torch.utils.data.DataLoader(dataset=test_data,
                                           batch_size=batch_size)

### Define Model and Load Params
model = ConvNet().to(device)
print("========================== Original Model =============================", "\n", model)
model.load_state_dict(torch.load('./pths/cifar10_pre_model.pth', map_location=device))

### User pre-trained model and Only change last layer
for param in model.parameters():
    param.requires_grad = False

model.fc2 = nn.Linear(120, 50)
modle = model.to(device)

print("========================== Modified Model =============================", "\n", model)

### Define Loss and Optim
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

### Train
if __name__ == '__main__':
    total_step = len(train_loader)
    for epoch in range(num_epochs):
        for i, (images, labels) in enumerate(train_loader):
            images = images.to(device)
            labels = labels.to(device)
Ejemplo n.º 2
0
    'val':
    DataLoader(data['val'], batch_size=batch_size, shuffle=True, num_workers=4)
}

#############
### MODEL ###
#############

from torch import nn, optim
from cnn import ConvNet

num_epochs = 10
learning_rate = 0.001

net = ConvNet()
net.to(device)

criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=learning_rate, betas=(0.5, 0.999))

################
### TRAINING ###
################

import matplotlib.pyplot as plt

n_batches = len(loader['train'])
for epoch in range(num_epochs):
    for i, sample in enumerate(loader['train']):

        sample['image'] = sample['image'].to(device)