Exemplo n.º 1
0
                                transform=data_transforms[dtype])
    for dtype in data_transforms.keys()
}
dataloaders = {
    dtype:
    torch.utils.data.DataLoader(image_paths[dtype],
                                batch_size=BATCH_SIZE,
                                shuffle=True if dtype == 'train' else False,
                                num_workers=4)
    for dtype in data_transforms.keys()
}
dataset_sizes = {
    dtype: len(image_paths[dtype])
    for dtype in data_transforms.keys()
}
class_names = image_paths['train'].classes

print('[INFO[ Finetuning the model...')
model, _ = helpers.initialize_model_finetuning(MODEL_NAME, NUM_CLASSES)
model = model.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

model = helpers.train_model(model,
                            dataloaders,
                            dataset_sizes,
                            criterion,
                            optimizer,
                            num_epochs=EPOCHS,
                            is_inception=(MODEL_NAME == 'inception'))
Exemplo n.º 2
0
import torch
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
from torch import optim, nn
from custompytorch.nn import Mnist
from custompytorch.utils import helpers

device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')

data, targets = load_digits().data, load_digits().target
train_x, val_x, train_y, val_y = train_test_split(data, targets, test_size=0.15, random_state=42)

train_x, val_x = map(lambda x: torch.tensor(x, dtype=torch.float32), (train_x, val_x))
train_y, val_y = map(lambda x: torch.tensor(x, dtype=torch.int64), (train_y, val_y))
trainset, valset = TensorDataset(train_x, train_y), TensorDataset(val_x, val_y)
trainloader, valloader = DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2), DataLoader(valset, batch_size=64, num_workers=2)
dataloaders = {'train': trainloader, 'val': valloader}
dataset_sizes = {'train': len(train_x), 'val': len(val_x)}

model = Mnist()
model = model.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
epochs = 25

model = helpers.train_model(model, dataloaders, dataset_sizes, criterion, optimizer, num_epochs=epochs)
Exemplo n.º 3
0
}
class_names = image_paths['train'].classes

# Finetuning the model
model_ft = models.resnet18(pretrained=True)
for param in model_ft.parameters():
    param.requires_grad = False
model_ft.fc = nn.Linear(model_ft.fc.in_features, len(class_names))

model_ft = model_ft.to(device)

criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
epochs = 25

# Apply Learning rate scheduler by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

model_ft = train_model(model_ft,
                       dataloaders,
                       dataset_sizes,
                       criterion,
                       optimizer_ft,
                       scheduler=exp_lr_scheduler,
                       num_epochs=epochs)

visualize_model(model_ft,
                dataloaders,
                class_names,
                file_names='finetuning_prediction_sample.jpg',
                num_images=6)