Пример #1
0
}

# build model
model = RNet(is_train=True)
model = model.to(device)
# model.load_state_dict(torch.load('pretrained_weights/slim_mtcnn/best_pnet.pth'), strict=True)

# build checkpoint
# checkpoint = CheckPoint(train_config.save_path)

# build optimzier
if cfg.TRAIN.optimizer == 'Adam':
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=cfg.TRAIN.LR.initial_lr,
                                 momentum=cfg.TRAIN.LR.momentum,
                                 weight_decay=cfg.TRAIN.LR.weight_decay)
else:
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=cfg.TRAIN.LR.initial_lr,
                                momentum=cfg.TRAIN.LR.momentum,
                                weight_decay=cfg.TRAIN.LR.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
    optimizer, milestones=cfg.TRAIN.LR.decay_epochs, gamma=0.1)

# build trainer
trainer = RNetTrainer(cfg.TRAIN.epochs, dataloaders, model, optimizer,
                      scheduler, device)

# train
trainer.train()
Пример #2
0
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_loader = torch.utils.data.DataLoader(FaceDataset(config.annoPath,
                                                       transform=transform,
                                                       is_train=True),
                                           batch_size=config.batchSize,
                                           shuffle=True,
                                           **kwargs)

# Set model
model = RNet()
model = model.to(device)

# Set checkpoint
checkpoint = CheckPoint(config.save_path)

# Set optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=config.step,
                                                 gamma=0.1)

# Set trainer
logger = Logger(config.save_path)
trainer = RNetTrainer(config.lr, train_loader, model, optimizer, scheduler,
                      logger, device)

for epoch in range(1, config.nEpochs + 1):
    cls_loss_, box_offset_loss, total_loss, accuracy = trainer.train(epoch)
    checkpoint.save_model(model, index=epoch)
Пример #3
0
# Set dataloader
kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
train_data = FaceDataset(os.path.join(config.ANNO_PATH, config.RNET_TRAIN_IMGLIST_FILENAME))
val_data = FaceDataset(os.path.join(config.ANNO_PATH, config.RNET_VAL_IMGLIST_FILENAME))
dataloaders = {'train': torch.utils.data.DataLoader(train_data, 
                        batch_size=config.BATCH_SIZE, shuffle=True, **kwargs),
               'val': torch.utils.data.DataLoader(val_data,
                        batch_size=config.BATCH_SIZE, shuffle=True, **kwargs)
              }

# Set model
model = RNet(is_train=True)
model = model.to(device)
model.load_state_dict(torch.load('./pretrained_weights/best_rnet.pth'), strict=True)

# Set checkpoint
#checkpoint = CheckPoint(train_config.save_path)

# Set optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config.LR)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=config.STEPS, gamma=0.1)

# Set trainer
trainer = RNetTrainer(config.EPOCHS, dataloaders, model, optimizer, scheduler, device)

trainer.train()
    
#checkpoint.save_model(model, index=epoch, tag=config.SAVE_PREFIX)
            
Пример #4
0
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True

# Set dataloader
kwargs = {'num_workers': config.nThreads, 'pin_memory': True} if use_cuda else {}
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_loader = torch.utils.data.DataLoader(
    FaceDataset(config.annoPath, transform=transform, is_train=True), batch_size=config.batchSize, shuffle=True, **kwargs)

# Set model
model = RNet()
model = model.to(device)

# Set checkpoint
checkpoint = CheckPoint(config.save_path)

# Set optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=config.step, gamma=0.1)

# Set trainer
logger = Logger(config.save_path)
trainer = RNetTrainer(config.lr, train_loader, model, optimizer, scheduler, logger, device)

for epoch in range(1, config.nEpochs + 1):
    cls_loss_, box_offset_loss, total_loss, accuracy = trainer.train(epoch)
    checkpoint.save_model(model, index=epoch)