def main(): args = get_args() printer(args) # Initialize logger global log log = Logger(args) # Set random seed random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check cuda device device = torch.device('cuda' if args.cuda else 'cpu') # Loading data train_music_data, train_dance_data, _ = load_data( args.train_dir, interval=args.seq_len, data_type=args.data_type) training_data = prepare_dataloader(train_music_data, train_dance_data, args) valid_music_data, valid_dance_data, _ = load_data( args.valid_dir, interval=args.seq_len, data_type=args.data_type) validation_data = prepare_dataloader(valid_music_data, valid_dance_data, args) encoder = Encoder(args) decoder = Decoder(args) model = Model(encoder, decoder, args, device=device) for name, parameters in model.named_parameters(): print(name, ':', parameters.size()) # Data Parallel to use multi-gpu model = nn.DataParallel(model).to(device) #model = model.to(device) optimizer = optim.Adam(filter(lambda x: x.requires_grad, model.module.parameters()), lr=args.lr) # scheduler = ReduceLROnPlateau( # optimizer, mode='min', factor=0.8, patience=30, verbose=True, min_lr=1e-06, eps=1e-07) # scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, # step_size=args.lr_step_size, # gamma=0.5) # for name,para in model.named_parameters(): # print(name,para.size()) train(model, training_data, validation_data, optimizer, device, args, log)
def main(): args = get_args() printer(args) # Initialize logger global log log = Logger(args) # Set random seed random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check cuda device device = torch.device('cuda' if args.cuda else 'cpu') # Loading data if args.aist: print ("train with AIST++ dataset!") train_music_data, train_dance_data, _ = load_data_aist( args.train_dir, interval=args.seq_len, rotmat=args.rotmat) else: train_music_data, train_dance_data, _ = load_data( args.train_dir, interval=args.seq_len, data_type=args.data_type) training_data = prepare_dataloader(train_music_data, train_dance_data, args) print ("data shape:", len(training_data)) encoder = Encoder(args) decoder = Decoder(args) model = Model(encoder, decoder, args, device=device) # for name, parameters in model.named_parameters(): # print(name, ':', parameters.size()) # Data Parallel to use multi-gpu model = nn.DataParallel(model).to(device) #model = model.to(device) optimizer = optim.Adam(filter(lambda x: x.requires_grad, model.module.parameters()), lr=args.lr) train(model, training_data, optimizer, device, args, log)
def __init__(self, model_file, device): self.device = device checkpoint = torch.load(model_file) model_args = checkpoint['args'] self.model_args = model_args print(f'[Info] Loading model args:') printer(model_args) encoder = Encoder(model_args) decoder = Decoder(model_args) model = Model(encoder, decoder, model_args, device=device) model = nn.DataParallel(model) model.load_state_dict(checkpoint['model']) # self.log.log.info('[Info] Trained model loaded.') print('[Info] Trained model loaded.') # Use gpu to accelerate inference self.model = model.to(device) self.model.eval()