コード例 #1
0
    def test_not_empty(self):
        self.assertFalse(Rule().not_empty(Data.Target)(Params([])))
        self.assertFalse(Rule().not_empty(Data.Delimiter)(Params([])))
        self.assertFalse(Rule().not_empty(Data.Option)(Params([])))

        self.assertTrue(Rule().not_empty(Data.Target)(self.p))
        self.assertTrue(Rule().not_empty(Data.Delimiter)(self.p))
        self.assertTrue(Rule().not_empty(Data.Option)(self.p))
コード例 #2
0
    def test_empty(self):
        args = ['']
        p = Params(args)

        self.assertEqual(0, len(p.targets))

        args = ['--option', '--']
        p = Params(args)

        self.assertEqual(0, len(p.targets))
コード例 #3
0
def build_model(checkpoint, force_cpu=False):
    """Load and build model a from checkpoint."""
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not force_cpu else "cpu")
    state = torch.load(checkpoint, map_location=device)
    hp.load_state_dict(state['parameters'])
    model = Tacotron()
    model.load_state_dict(remove_dataparallel_prefix(state['model']))
    model.to(device)
    return model
コード例 #4
0
 def test_empty_command(self):
     create('message').empty_command(RESULT)[0].attempt(Params([]))
     self.assertEqual(
         None,
         create('message').empty_command(RESULT)[0].attempt(
             Params(['target'])))
     self.assertEqual(
         None,
         create('message').empty_command(RESULT)[0].attempt(
             Params(['--flag'])))
     self.assertEqual(
         None,
         create('message').empty_command(RESULT)[0].attempt(Params(['--'])))
コード例 #5
0
    def setUp(self):
        args = ['first', '-', '--key=value',
                'second', '--key=value2', '--help', '--help=some information', '--',
                'third', '--flag',
                'fourth', '--',
                'fifth']

        self.p = Params(args)

        self.expected = {
            'targets': [
                Target('first', 0),
                Target('second', 1),
                Target('third', 2),
                Target('fourth', 3),
                Target('fifth', 4)
            ], 'options': [
                ('key', 'value2'),
                ('flag', None),
                ('help', 'some information')
            ], 'delimiters': [
                SingleDelimiter(1),
                DoubleDelimiter(2),
                DoubleDelimiter(4)
            ], 'help': 'some information'
            , 'separated': [
                [Target('first', 0)],
                [Target('second', 1)],
                [Target('third', 2), Target('fourth', 3)],
                [Target('fifth', 4)]
            ]
        }
コード例 #6
0
    def __init__(self):
        super(Tacotron, self).__init__()

        # Encoder embedding
        other_symbols = 3  # PAD, EOS, UNK
        self._embedding = Embedding(hp.symbols_count() + other_symbols,
                                    hp.embedding_dimension,
                                    padding_idx=0)
        torch.nn.init.xavier_uniform_(self._embedding.weight)

        # Encoder transforming graphmenes or phonemes into abstract input representation
        self._encoder = self._get_encoder(hp.encoder_type)

        # Reversal language classifier to make encoder truly languagge independent
        if hp.reversal_classifier:
            self._reversal_classifier = self._get_adversarial_classifier(
                hp.reversal_classifier_type)

        # Prenet for transformation of previous predicted frame
        self._prenet = Prenet(hp.num_mels, hp.prenet_dimension,
                              hp.prenet_layers, hp.dropout)

        # Speaker and language embeddings make decoder bigger
        decoder_input_dimension = hp.encoder_dimension
        if hp.multi_speaker:
            decoder_input_dimension += hp.speaker_embedding_dimension
        if hp.multi_language:
            decoder_input_dimension += hp.language_embedding_dimension

        # Decoder attention layer
        self._attention = self._get_attention(hp.attention_type,
                                              decoder_input_dimension)

        # Instantiate decoder RNN layers
        gen_cell_dimension = decoder_input_dimension + hp.decoder_dimension
        att_cell_dimension = decoder_input_dimension + hp.prenet_dimension
        if hp.decoder_regularization == 'zoneout':
            generator_rnn = ZoneoutLSTMCell(gen_cell_dimension,
                                            hp.decoder_dimension,
                                            hp.zoneout_hidden, hp.zoneout_cell)
            attention_rnn = ZoneoutLSTMCell(att_cell_dimension,
                                            hp.decoder_dimension,
                                            hp.zoneout_hidden, hp.zoneout_cell)
        else:
            generator_rnn = DropoutLSTMCell(gen_cell_dimension,
                                            hp.decoder_dimension,
                                            hp.dropout_hidden)
            attention_rnn = DropoutLSTMCell(att_cell_dimension,
                                            hp.decoder_dimension,
                                            hp.dropout_hidden)

        # Decoder which controls attention and produces mel frames and stop tokens
        self._decoder = Decoder(hp.num_mels, hp.decoder_dimension,
                                self._attention, generator_rnn, attention_rnn,
                                decoder_input_dimension, self._prenet,
                                hp.prenet_dimension, hp.max_output_length)

        # Postnet transforming predicted mel frames (residual mel or linear frames)
        self._postnet = self._get_postnet(
            "cbhg" if hp.predict_linear else "conv")
コード例 #7
0
    def test_simple(self):
        args = ['target0', 'target1']
        p = Params(args)

        self.assertEqual(len(args), len(p.targets))
        self.assertEqual(p.targets[0], Target('target0', 0))
        self.assertEqual(p.targets[1], Target('target1', 1))
コード例 #8
0
    def test_simple(self):
        args = ['--flag', '--key=value', '--key2=value 2']
        p = Params(args)

        self.assertEqual(len(args), len(p.options))
        self.assertEqual(p.options['flag'], None)
        self.assertEqual(p.options['key'], 'value')
        self.assertEqual(p.options['key2'], 'value 2')
コード例 #9
0
    def test_complex2(self):
        p = Params(['target', '--key=value'])
        s = Statement(
            'message', RESULT,
            Rule().option('key',
                          'value').empty(Data.Delimiter).target('target', 0))

        self.assertEqual(s.attempt(p), RESULT)
コード例 #10
0
    def test_complex1(self):
        p = Params(['target', '--', '--flag', '--key=value'])
        s = Statement('message',
                      RESULT,
                      rule=Rule().delimiter(
                          DoubleDelimiter()).option('flag').option(
                              'key', 'value').target('target', 0))

        self.assertEqual(s.attempt(p), RESULT)
コード例 #11
0
 def execute(self, argv):
     try:
         p = Params(argv)
         if self._need_help(p):
             self._print_help()
         else:
             self._process(p)
     except PlatformException as e:
         self._error(e)
     except Exception:
         raise
コード例 #12
0
    def test_simple(self):
        args = ['first', '-', 'second', '--', 'third', 'fourth', '--', 'fifth']
        p = Params(args)

        expected = [
            [Target('first', 0)],
            [Target('second', 1)],
            [Target('third', 2), Target('fourth', 3)],
            [Target('fifth', 4)]
        ]

        self.assertListEqual(p.separated, expected)
コード例 #13
0
	def __init__(self,pro=None,pack=None):
		super(UtrRequest,self).__init__()
		self.pro = pro
		self.pack = pack
		self.params = Params()
		self.headers = {'Accept' :'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                	    'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3', 
                	    'Accept-Encoding': 'gzip, deflate', 
                	    'Accept': 'application/json, text/javascript, */*; q=0.01', 
                	    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0', 
                	    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
                	    'X-Requested-With': 'XMLHttpRequest',
                	    }
コード例 #14
0
    def test_empty(self):
        args = ['target']
        p = Params(args)

        self.assertEqual(0, len(p.options))
コード例 #15
0
def main():
    argv0: str = sys.argv[0]
    if argv0:
        workdir: str = os.path.dirname(argv0)
        if workdir:
            os.chdir(workdir)
    os.chdir("data")

    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", type=str, default="1a",  #
                        help="Params dataset for Training Data.")

    args = parser.parse_args()
    Params.load(f"../params/{args.dataset}.json")
    audio.hp = Params
    hop_frames: int = audio.ms_to_frames(audio.hp.stft_shift_ms)
    win_frames: int = audio.ms_to_frames(audio.hp.stft_window_ms)
    print(f"mel parameters: hop = {hop_frames:,}, win = {win_frames:,}")
    dataset_path: str = os.path.join("datasets", args.dataset)

    # as this code *alters* the train and val files, always regenerate them first!
    _: List[str] = ["python", os.path.join(dataset_path, "create_training_files.py")]
    subprocess.run(_, check=True, bufsize=0)

    files_to_solve = [(dataset_path, "train.txt"), (dataset_path, "val.txt"), ]

    mel_path: str = os.path.join(dataset_path, 'mel_spectrograms')
    os.makedirs(mel_path, exist_ok=True)

    mp3_path: str = os.path.join(dataset_path, "reference-audio")
    shutil.rmtree(mp3_path, ignore_errors=True)
    os.mkdir(mp3_path)

    mp3_bad_path: str = os.path.join(dataset_path, "reference-audio-bad")
    shutil.rmtree(mp3_bad_path, ignore_errors=True)
    os.mkdir(mp3_bad_path)

    mp3_fixed_path: str = os.path.join(dataset_path, "reference-audio-fixed")
    shutil.rmtree(mp3_fixed_path, ignore_errors=True)
    os.mkdir(mp3_fixed_path)

    metadata = []
    for d, fs in files_to_solve:
        with open(os.path.join(d, fs), 'r', encoding='utf-8') as f:
            metadata.append((d, fs, [line.rstrip().split('|') for line in f]))

    bad_silence_count: int = 0
    file_bad_entries: str = os.path.join(dataset_path, "entries-bad.txt")
    with open(file_bad_entries, "w"):
        pass

    fix_silence_count: int = 0
    file_fixed_entries: str = os.path.join(dataset_path, "entries-fixed.txt")
    with open(file_fixed_entries, "w"):
        pass

    skipped_too_short: List[str] = list()
    skipped_too_long: List[str] = list()
    spec_id: int = 0
    print(f'Please wait, this may take a very long time.')
    for d, fs, m in metadata:
        print(f'Creating spectrograms for: {fs}')
        bar: progressbar.ProgressBar = progressbar.ProgressBar(maxval=len(m))
        bar.start()
        with open(os.path.join(d, fs + "-tmp"), 'w', encoding='utf-8') as f:
            for i in m:
                idx, speaker, lang, wav, _, _, raw_text, phonemes = i

                if lang not in Params.languages:
                    continue

                raw_text = ud.normalize("NFC", raw_text)
                phonemes = ud.normalize("NFC", phonemes)

                spec_id += 1
                spec_name = f"{lang}_{speaker}-{spec_id:06d}.npy"

                mel_path_partial = os.path.join("mel_spectrograms", spec_name)
                mel_path = os.path.join(dataset_path, mel_path_partial)

                entry: str = f'{idx}|{speaker}|{lang}|{wav}|{mel_path_partial}||{raw_text}|{phonemes}'

                audio_path = os.path.join(d, wav)

                py_audio: AudioSegment = AudioSegment.from_file(audio_path)
                py_audio = py_audio.set_channels(1).set_frame_rate(Params.sample_rate)
                py_audio = effects.normalize(py_audio)
                py_audio = trim_silence(py_audio)

                # Output altered audio (compressed) for manual review
                mp3_name = f"{lang}_{speaker}-{spec_id:06d}.mp3"
                ref_audio_mp3: str = os.path.join(mp3_path, mp3_name)

                if Params.fix_silence:
                    fix_silence: int = Params.fix_silence_len
                    segments = silence.split_on_silence(py_audio,  #
                                                        min_silence_len=fix_silence,  #
                                                        silence_thresh=-50,  #
                                                        keep_silence=fix_silence / 2)
                    if len(segments) > 1:
                        new_py_audio = AudioSegment.empty()
                        for segment in segments:
                            new_py_audio = new_py_audio.append(segment, crossfade=0)
                        assert len(new_py_audio), "Empty fixed audio after recombining?"

                        py_audio = new_py_audio.set_channels(1).set_frame_rate(py_audio.frame_rate)
                        with open(file_fixed_entries, "a") as w:
                            print(entry, file=w)
                        fix_audio_mp3: str = os.path.join(mp3_fixed_path, f"fix-{mp3_name}")
                        py_audio.export(fix_audio_mp3, format="mp3", parameters=["-qscale:a", "3"])
                        fix_silence_count += 1

                if Params.skip_silence:
                    max_silence: int = Params.max_silence_len
                    if silence.detect_silence(py_audio,  #
                                              min_silence_len=max_silence,  #
                                              silence_thresh=-50):
                        with open(file_bad_entries, "a") as w:
                            print(entry, file=w)
                        bad_audio_mp3: str = os.path.join(mp3_bad_path, f"bad-{mp3_name}")
                        py_audio.export(bad_audio_mp3, format="mp3", parameters=["-qscale:a", "3"])
                        bad_silence_count += 1
                        continue

                if len(py_audio) < Params.audio_min_length:
                    skipped_too_short.append(entry)
                    bad_audio_mp3: str = os.path.join(mp3_bad_path, f"too-short-{mp3_name}")
                    py_audio.export(bad_audio_mp3, format="mp3", parameters=["-qscale:a", "3"])
                    continue

                if len(py_audio) > Params.audio_max_length:
                    skipped_too_long.append(entry)
                    bad_audio_mp3: str = os.path.join(mp3_bad_path, f"too-long-{mp3_name}")
                    py_audio.export(bad_audio_mp3, format="mp3", parameters=["-qscale:a", "3"])
                    continue

                if Params.lead_in_silence > 0:
                    # Add lead_in_silence ms of silence at the beginning
                    py_audio = AudioSegment.silent(Params.lead_in_silence) + py_audio

                if Params.lead_out_silence > 0:
                    # Add lead_out_silence ms of silence at the end
                    py_audio = py_audio + AudioSegment.silent(Params.lead_out_silence)

                if not os.path.exists(ref_audio_mp3):
                    py_audio.export(ref_audio_mp3, format="mp3", parameters=["-qscale:a", "3"])

                py_audio_samples: array = np.array(py_audio.get_array_of_samples()).astype(np.float32)
                py_audio_samples = py_audio_samples / (1 << 8 * 2 - 1)
                if not os.path.exists(mel_path):
                    np.save(mel_path, audio.spectrogram(py_audio_samples, True))

                print(entry, file=f)
                bar.update(bar.currval + 1)

        print(f"Records skipped (>{Params.audio_max_length / 1000:.02f}): {len(skipped_too_long):,}")
        with open(os.path.join(d, "too-long-" + fs), "w") as w:
            for entry in skipped_too_long:
                print(entry, file=w)

        print(f"Records skipped (<{Params.audio_min_length / 1000:.02f}): {len(skipped_too_short):,}")
        with open(os.path.join(d, "too-short-" + fs), "w") as w:
            for entry in skipped_too_short:
                print(entry, file=w)

        bar.finish()

    if bad_silence_count:
        print(f"Records skipped because of excessive silence: {bad_silence_count:,}")
    if fix_silence_count:
        print(f"Records altered because of excessive silence: {fix_silence_count:,}")

    for d, fs in files_to_solve:
        tmp = os.path.join(d, fs + "-tmp")
        dst = os.path.join(d, fs)
        bkup = os.path.join(d, fs + "-bkup")

        if os.path.exists(bkup):
            os.remove(bkup)

        os.rename(dst, bkup)
        os.rename(tmp, dst)

    sys.exit()
コード例 #16
0
 def test_info_statement(self):
     self.assertEqual(None, InfoStatement('message').attempt(Params([])))
     self.assertEqual(
         None,
         InfoStatement('message').attempt(Params(['target', '--flga',
                                                  '--'])))
コード例 #17
0
    def test_position(self):
        args = ['first', '-', 'second', '--', 'third', 'fourth', '--', 'fifth']
        p = Params(args)
        expected = [SingleDelimiter(1), DoubleDelimiter(2), DoubleDelimiter(4)]

        self.assertListEqual(p.delimiters, expected)
コード例 #18
0
"""
Use train.py to train cyclegan.
All parameters you need are set in `Params`,you can reset it for your need.
"""

import time
from params.params import Params
from data import load_data
from model.cycleModel import cycleGAN
from utils import get_samples
import warnings

warnings.filterwarnings("ignore")

if __name__ == "__main__":
    params = Params('cycleGan')
    dataset = load_data(params)  #load data
    data_size = len(dataset)
    print('Creat Model: cycleGAN')
    model = cycleGAN(params)  #creat model and init it.
    model.load(
    )  #load the latest model or the epoch of model including optimizers,epoch,losses.
    total_iters = 0
    print("Start to train the model")
    for epoch in range(model.epoch, params.n_epoch + 1):
        start = time.time()
        epoch_iters = 0
        print('Epoch : {}'.format(epoch))
        for i, data in enumerate(dataset):
            total_iters += params.batch_size
            epoch_iters += params.batch_size
コード例 #19
0
ファイル: trainDcGan.py プロジェクト: yuj23/GANs
from data import load_data
from model.dcGanModel import DCGAN
from params.params import Params
from utils import get_sample_image
import time
import warnings
warnings.filterwarnings('ignore')

if __name__ == "__main__":
    params = Params('DCGAN')
    dataset = load_data(params)
    datasize = len(dataset)
    print('Creat Model : DC-GAN')
    model = DCGAN(params)
    model.load()
    total_iters = 0
    print("Start to train the model")
    step = 0
    for epoch in range(model.epoch, params.n_epoch + 1):
        start = time.time()
        epoch_iters = 0
        print("Epoch : {}".format(epoch))
        for data, label in dataset:
            total_iters += params.batch_size
            epoch_iters += params.batch_size
            model.set_input(data)
            model.D_step()
            if step % params.n_critic == 0:
                model.G_step()
            if total_iters % params.print_freq == 0:
                model.save_loss()
コード例 #20
0
 def test_expection_in_case_of_garbase_instead_of_the_data_enumeration(
         self):
     self.assertRaises(PlatformException, Rule._get_data, 'adf', Params([]))
コード例 #21
0
    def test_equals(self):
        args = ['--key=value', '--key=value2']
        p = Params(args)

        self.assertEqual(len(p.options), 1)
        self.assertEqual(p.options['key'], 'value2')
コード例 #22
0
 def setUp(self):
     self.p = Params(['target', '-', '--flag', '--key=value'])
     self.with_double_delimiter = Params(
         ['target', '--', '--flag', '--key=value'])
コード例 #23
0
    def test_not_help(self):
        args = ['--not_help']
        p = Params(args)

        self.assertTrue('not_help' in p.options)
        self.assertFalse(p.needHelp)
コード例 #24
0
    np.random.seed(42)
    torch.manual_seed(42)
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # prepare directory for checkpoints
    checkpoint_dir = os.path.join(args.base_directory, args.checkpoint_root)
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    # load checkpoint (dict) with saved hyper-parameters (let some of them be overwritten because of fine-tuning)
    if args.checkpoint:
        checkpoint = os.path.join(checkpoint_dir, args.checkpoint)
        checkpoint_state = torch.load(checkpoint, map_location='cpu')
        hp.load_state_dict(checkpoint_state['parameters'])

    # load hyperparameters
    if args.hyper_parameters is not None:
        hp_path = os.path.join(args.base_directory, 'params',
                               f'{args.hyper_parameters}.json')
        hp.load(hp_path)

    # load dataset
    dataset = TextToSpeechDatasetCollection(
        os.path.join(args.data_root, hp.dataset))

    if hp.multi_language and hp.balanced_sampling and hp.perfect_sampling:
        dp_devices = args.max_gpus if hp.parallelization == "data" else 1
        train_sampler = PerfectBatchSampler(dataset.train,
                                            hp.languages,
コード例 #25
0
    def test_type(self):
        args = ['-', '--', '-', '--']
        p = Params(args)
        expected = [SingleDelimiter(0), DoubleDelimiter(0), SingleDelimiter(0), DoubleDelimiter(0)]

        self.assertListEqual(p.delimiters, expected)