예제 #1
0
torch.manual_seed(config.manualSeed)
torch.cuda.manual_seed(config.manualSeed)
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True

# Set dataloader
kwargs = {
    'num_workers': config.nThreads,
    'pin_memory': True
} if use_cuda else {}
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_loader = torch.utils.data.DataLoader(FaceDataset(config.annoPath,
                                                       transform=transform,
                                                       is_train=True),
                                           batch_size=config.batchSize,
                                           shuffle=True,
                                           **kwargs)

# Set model
model = RNet()
model = model.to(device)

# Set checkpoint
checkpoint = CheckPoint(config.save_path)

# Set optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
예제 #2
0
# Set device
os.environ['CUDA_VISIBLE_DEVICES'] = config.GPU # 有gpu和cuda才需要设置
use_cuda = config.use_cuda and torch.cuda.is_available()
torch.manual_seed(config.manualSeed)
torch.cuda.manual_seed(config.manualSeed)
device = torch.device("cuda" if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True

# Set dataloader
kwargs = {'num_workers': config.nThreads, 'pin_memory': True} if use_cuda else {}
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_loader = torch.utils.data.DataLoader(
    FaceDataset(config.annoPath, transform=transform, is_train=True), batch_size=config.batchSize, shuffle=True, **kwargs)

# Set model
model = PNet()
model = model.to(device)

# parallel train
if use_cuda and len(config.GPU.split(',')) > 1:
    model = torch.nn.DataParallel(model)

# Set checkpoint
checkpoint = CheckPoint(config.save_path)

# Set optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=config.step, gamma=0.1)