Beispiel #1
0
def main():
    print('Initializing Training Process..')

    parser = argparse.ArgumentParser()

    parser.add_argument('--group_name', default=None)
    parser.add_argument('--input_wavs_dir',
                        default='LJSpeech-1.1/wavs',
                        help='')
    parser.add_argument('--input_mels_dir', default='ft_dataset', help='')
    parser.add_argument('--input_training_file',
                        default='LJSpeech-1.1/training.txt',
                        help='')
    parser.add_argument('--input_validation_file',
                        default='LJSpeech-1.1/validation.txt',
                        help='')
    parser.add_argument('--checkpoint_path', default='cp_hifigan')
    parser.add_argument('--config', default='')
    parser.add_argument('--training_epochs', default=3100, type=int)
    parser.add_argument('--stdout_interval', default=5, type=int)
    parser.add_argument('--checkpoint_interval', default=5000, type=int)
    parser.add_argument('--summary_interval', default=100, type=int)
    parser.add_argument('--validation_interval', default=1000, type=int)
    parser.add_argument('--fine_tuning', default=False, type=bool)

    a = parser.parse_args()

    with open(a.config) as f:
        data = f.read()

    json_config = json.loads(data)
    h = AttrDict(json_config)
    build_env(a.config, 'config.json', a.checkpoint_path)

    torch.manual_seed(h.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(h.seed)
        h.num_gpus = torch.cuda.device_count()
        h.batch_size = int(h.batch_size / h.num_gpus)
        print('Batch size per GPU :', h.batch_size)
    else:
        pass

    if h.num_gpus > 1:
        mp.spawn(train, nprocs=h.num_gpus, args=(
            a,
            h,
        ))
    else:
        train(0, a, h)
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument('--group_name', default=None)
    parser.add_argument('--input_wavs_dir', default='LJSpeech-1.1/wavs')
    parser.add_argument('--input_validation_file', default='LJSpeech-1.1/validation.txt')
    parser.add_argument('--checkpoint_path', default='cp_hifigan')
    parser.add_argument('--config', default='')
    parser.add_argument('--fine_tuning', default=False, type=bool)
    parser.add_argument('--input_mels_dir', default='ft_dataset')
    parser.add_argument('--speakers_json', default=None, type=str)
    parser.add_argument('--batch_size', default=15, type=int)
    # for train script compatibility
    parser.add_argument('--input_training_file', default='LJSpeech-1.1/training.txt')

    

    a = parser.parse_args()

    with open(a.config) as f:
        data = f.read()
    
    if a.speakers_json:
      with open(a.speakers_json) as f:
            speaker_mapping = json.load(f)
    else:
        speaker_mapping = None

    json_config = json.loads(data)
    h = AttrDict(json_config)

    torch.manual_seed(h.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(h.seed)
        h.num_gpus = torch.cuda.device_count()
        h.batch_size = int(h.batch_size / h.num_gpus)
        print('Batch size per GPU :', h.batch_size)
    else:
        pass

    h.segment_size = h.segment_size*8

    eval(0, a, h, speaker_mapping)
Beispiel #3
0
def main():
    print('Initializing the Training Process..')
    
    parser = argparse.ArgumentParser()
    
    
    parser.add_argument('--input_wavs_dir', default='data/recordings')
    parser.add_argument('--input_mels_dir', default='processed_spokenDigits_np')
    parser.add_argument('--config', default='processed_spokenDigits_np')
    parser.add_argument('--training_epochs', default='1000')
    
    a = parser.parse_args()
    
    with open(a.config) as f:
        data = f.read()
        
        
    json_config = json.loads(data)
    h = AttrDict(json_config)
    
    build_env(a.config, 'config.json', a.checkpoint_path)
    
    torch.manual_seed(h.seed):
    
    if torch.cuda.is_availale(h.seed):
        torch.cuda.manual_seeed(h.seed)
        
        h.batch_size = int(h.batch_size / h.num_gpu)
    else:
        print('\nRunning on cpu')
        
        
    # train now--    
        g_losses, d_losses, generated_mels = train(h) 
    
    # visualize the loss as the network trained
    plt.plot(g_losses, d_losses)
    plt.xlabel('100\'s of batches')
    plt.ylabel('loss')
    plt.grid(True)
    # plt.ylim(0, 2.5) # consistent scale
    plt.show()