Exemplo n.º 1
0
    args.pin_memory = False
    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda')
        args.pin_memory = True
        
    return args


args = cli()

print("Loading dataset...")
# load train data
preprocess = transforms.Compose([
        transforms.Normalize(),
        transforms.RandomApply(transforms.HFlip(), 0.5),
        transforms.RescaleRelative(),
        transforms.Crop(args.square_edge),
        transforms.CenterPad(args.square_edge),
    ])

class rtpose_lightning(pl.LightningModule):

    def __init__(self, args, preprocess, target_transforms, model):
        super(rtpose_lightning, self).__init__()
        
        self.args = args
        self.preprocess = preprocess
        self.model = model
        self.target_transforms = target_transforms

    def forward(self, x):
Exemplo n.º 2
0
    last_ckpt_path = os.path.join(trainer.checkpoint_callback.filepath, last_ckpt_name)
    state_dict = torch.load(last_ckpt_path, map_location=lambda storage, loc: storage)['state_dict']
    os.remove(last_ckpt_path)
    state_dict = {k.replace('model.',''):v for k,v in state_dict.items()}    
    rtpose_model.load_state_dict(state_dict)
 
    return rtpose_model
    
args = parse_args()
update_config(cfg, args)
print("Loading dataset...")
# load train data
preprocess = transforms.Compose([
    transforms.Normalize(),
    transforms.RandomApply(transforms.HFlip(), 0.5),
    transforms.RescaleRelative(scale_range=(cfg.DATASET.SCALE_MIN, cfg.DATASET.SCALE_MAX)),
    transforms.Crop(cfg.DATASET.IMAGE_SIZE),
    transforms.CenterPad(cfg.DATASET.IMAGE_SIZE),
])

# model
rtpose_vgg = get_model(trunk='vgg19')
# load pretrained
use_vgg(rtpose_vgg)
               
class rtpose_lightning(pl.LightningModule):

    def __init__(self, preprocess, target_transforms, model, optimizer):
        super(rtpose_lightning, self).__init__()

        self.preprocess = preprocess