def test_eval(): data_root = "data_dir" dataset = AudiobookDataset(data_root) if hp.input_type == 'raw': collate_fn = raw_collate elif hp.input_type == 'bits': collate_fn = discrete_collate else: raise ValueError("input_type:{} not supported".format(hp.input_type)) data_loader = DataLoader(dataset, collate_fn=collate_fn, shuffle=True, num_workers=0, batch_size=hp.batch_size) device = torch.device("cuda" if use_cuda else "cpu") print("using device:{}".format(device)) # build model, create optimizer model = build_model().to(device) evaluate_model(model, data_loader)
print("epoch:{}, running loss:{}, average loss:{}, current lr:{}".format(global_epoch, running_loss, avg_loss, current_lr)) global_epoch += 1 if __name__=="__main__": args = docopt(__doc__) #print("Command line args:\n", args) checkpoint_dir = args["--checkpoint-dir"] checkpoint_path = args["--checkpoint"] data_root = args["<data-root>"] # make dirs, load dataloader and set up device os.makedirs(checkpoint_dir, exist_ok=True) os.makedirs(os.path.join(checkpoint_dir,'eval'), exist_ok=True) dataset = AudiobookDataset(data_root) if hp.input_type == 'raw': collate_fn = raw_collate elif hp.input_type == 'mixture': collate_fn = raw_collate elif hp.input_type in ['bits', 'mulaw']: collate_fn = discrete_collate else: raise ValueError("input_type:{} not supported".format(hp.input_type)) data_loader = DataLoader(dataset, collate_fn=collate_fn, shuffle=True, num_workers=0, batch_size=hp.batch_size) device = torch.device("cuda" if use_cuda else "cpu") print("using device:{}".format(device)) # build model, create optimizer model = build_model().to(device) optimizer = optim.Adam(model.parameters(),
use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {} torch.autograd.set_detect_anomaly(True) with open(os.path.join(data_path, 'train_data.json'), 'r') as f: train_data = json.load(f) with open(os.path.join(data_path, 'test_data.json'), 'r') as f: test_data = json.load(f) train_loader = torch.utils.data.DataLoader( AudiobookDataset(train_data), collate_fn=train_collate, batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( AudiobookDataset(test_data), collate_fn=test_collate, batch_size=1, shuffle=False, **kwargs) model = Generator(hp.dim_neck, hp.dim_emb, hp.dim_pre, hp.freq).to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr) current_epoch = 0 if args.checkpoint: current_epoch = load_checkpoint(args.checkpoint, model, device, optimizer)
global_epoch += 1 if __name__=="__main__": args = docopt(__doc__) #print("Command line args:\n", args) checkpoint_dir = args["--checkpoint-dir"] checkpoint_path = args["--checkpoint"] data_root = args["<data-root>"] # make dirs, load dataloader and set up device os.makedirs(checkpoint_dir, exist_ok=True) os.makedirs(os.path.join(checkpoint_dir,'eval'), exist_ok=True) # 3_0 dataset = AudiobookDataset(data_root, split_n_mels=hp.num_mels, axis_splits=hp.axis_splits, axis_splits_offset=hp.split_offset) if hp.input_type == 'raw': collate_fn = raw_collate elif hp.input_type == 'mixture': collate_fn = raw_collate elif hp.input_type in ['bits', 'mulaw']: collate_fn = discrete_collate else: raise ValueError("input_type:{} not supported".format(hp.input_type)) data_loader = DataLoader(dataset, collate_fn=collate_fn, shuffle=True, num_workers=0, batch_size=hp.batch_size) device = torch.device("cuda" if use_cuda else "cpu") print("using device:{}".format(device)) # build model, create optimizer model = build_model().to(device) optimizer = optim.Adam(model.parameters(),
break if __name__ == "__main__": args = docopt(__doc__) #print("Command line args:\n", args) checkpoint_dir = args["--checkpoint-dir"] checkpoint_path = args["--checkpoint"] data_root = args["<data-root>"] # make dirs, load dataloader and set up device os.makedirs(checkpoint_dir, exist_ok=True) os.makedirs(os.path.join(checkpoint_dir, 'eval'), exist_ok=True) # TODO? dataset = AudiobookDataset(data_root, axis_splits=21212, axis_splits_offset=3) if hp.input_type == 'raw': collate_fn = raw_collate elif hp.input_type == 'mixture': collate_fn = raw_collate elif hp.input_type in ['bits', 'mulaw']: collate_fn = discrete_collate else: raise ValueError("input_type:{} not supported".format(hp.input_type)) data_loader = DataLoader(dataset, collate_fn=collate_fn, shuffle=True, num_workers=0, batch_size=hp.batch_size) use_cuda = False