コード例 #1
0
def test():
    # ==== Test: batch mixer (conclusion: capacity should be larger to make sure good mixing) ====
    x, y = read('./dataset/vcc2016/bin/*/*/1*001.bin', 32, min_after_dequeue=1024, capacity=2048)
    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        for _ in range(200):
            x_, y_ = sess.run([x, y])
            print(y_)


    # ===== Read binary ====
    features = read_whole_features('./dataset/vcc2016/bin/Training Set/SF1/*001.bin')

    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        features = sess.run(features)

    y = pw2wav(features)
    librosa.output.write_wav('test1.wav', y, 16000)  # TODO fs should be specified externally.


    # ==== Direct read =====
    f = './dataset/vcc2016/bin/Training Set/SF1/100001.bin'
    features = np.fromfile(f, np.float32)
    features = np.reshape(features, [-1, 513*2 + 1 + 1 + 1]) # f0, en, spk

    y = pw2wav(features)
    librosa.output.write_wav('test2.wav', y, 16000)
コード例 #2
0
def test():
    # ==== Test: batch mixer (conclusion: capacity should be larger to make sure good mixing) ====
    #x, y = read('./dataset/savee/bin/*/*/*.bin', 1, min_after_dequeue=1024, capacity=2048)
    #sv = tf.train.Supervisor()
    #with sv.managed_session() as sess:
    #    for _ in range(200):
    #        x_, y_ = sess.run([x, y])
    #        print(y_)


    # ===== Read binary ====
    features = read_whole_features('./dataset/savee/bin/Training Set/SNeu/*.bin')

    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        features = sess.run(features)

    y = pw2wav(features)
    sf.write('test1.wav', y, 22050)  # TODO fs should be specified externally.


    # ==== Direct read =====
    f = './dataset/savee/bin/Training Set/SNeu/n01.bin'
    features = np.fromfile(f, np.float32)
    features = np.reshape(features, [-1, 513*2 + 1 + 1 + 1]) # f0, en, spk

    y = pw2wav(features)
    sf.write('test2.wav', y, 22050)
コード例 #3
0
    def sample(self):
        with tf.name_scope("sample"):
            normalizer = Tanhize(
                xmax=np.fromfile('./etc/xmax.npf'),
                xmin=np.fromfile('./etc/xmin.npf'),
            )
            features = read_whole_features(
                self.args.file_pattern.format(self.args.src))
            x = normalizer.forward_process(features['sp'])
            x = nh_to_nchw(x)
            #y_t_id = tf.placeholder(dtype=tf.int64, shape=[1,])
            #y_t = y_t_id * tf.ones(shape=[tf.shape(x)[0],], dtype=tf.int64)
            y_t = SPEAKERS.index(self.args.trg) * tf.ones(shape=[
                tf.shape(x)[0],
            ],
                                                          dtype=tf.int64)
            self.reuse = False
            z, _ = self.encoder(x, False)
            x_t = self.generator(z, y_t, False)
            self.reuse = True
            x_t = tf.transpose(x_t, [0, 2, 3, 1])
            print "x_t shape:", x_t.get_shape().as_list()
            x_t = tf.squeeze(x_t)
            x_t = normalizer.backward_process(x_t)

            f0_s = features['f0']
            f0_t = convert_f0(f0_s, self.args.src, self.args.trg)
        sample = dict()
        sample['features'] = features
        sample['x_t'] = x_t
        sample['f0_t'] = f0_t
        #sample['y_t'] = y_t_id
        return sample
コード例 #4
0
ファイル: build.py プロジェクト: chj1330/homework
def main():
    tf.gfile.MkDir('./etc')

    # ==== Save max and min value ====
    x = read_whole_features(
        args.train_file_pattern)  # TODO: use it as a obj and keep `n_files`
    x_all = list()
    y_all = list()
    f0_all = list()
    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        counter = 1
        while True:  # TODO: read according to speaker instead of all speakers
            try:
                features = sess.run(x)
                print('\rProcessing {}: {}'.format(counter,
                                                   features['filename']),
                      end='')
                x_all.append(features['sp'])
                y_all.append(features['speaker'])
                f0_all.append(features['f0'])
                counter += 1
            finally:
                pass
        print()

    x_all = np.concatenate(x_all, axis=0)
    y_all = np.concatenate(y_all, axis=0)
    f0_all = np.concatenate(f0_all, axis=0)
    xmin = x_all.min()
    xmax = x_all.max()
    with open(args.speaker_list) as fp:
        SPEAKERS = [l.strip() for l in fp.readlines()]

    # ==== F0 stats ====
    for s in SPEAKERS:
        print('Speaker {}'.format(s), flush=True)
        f0 = f0_all[SPEAKERS.index(s) == y_all]
        print('  len: {}'.format(len(f0)))
        f0 = f0[f0 > 2.]
        f0 = np.log(f0)
        mu, std = f0.mean(), f0.std()

        # Save as `float32`
        with open('./etc/{}.npf'.format(s), 'wb') as fp:
            fp.write(np.asarray([mu, std]).tostring())

    # ==== Min/Max value ====
    # mu = x_all.mean(0)
    # std = x_all.std(0)
    q005 = np.percentile(x_all, 0.5, axis=0)
    q995 = np.percentile(x_all, 99.5, axis=0)

    # Save as `float32`
    with open('./etc/{}_xmin.npf'.format(args.corpus_name), 'wb') as fp:
        fp.write(q005.tostring())

    with open('./etc/{}_xmax.npf'.format(args.corpus_name), 'wb') as fp:
        fp.write(q995.tostring())
コード例 #5
0
def main():

    try:
        os.makedirs('./etc')
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise
    # ==== Save max and min value ====
    x = read_whole_features(
        args.train_file_pattern)  # TODO: use it as a obj and keep `n_files`
    x_all = list()
    y_all = list()
    f0_all = list()

    counter = 1
    for features in x:  # TODO: read according to speaker instead of all speakers
        #print(features)

        print('\rProcessing {}: {}'.format(counter, features['filename']),
              end='')
        x_all.append(features['sp'])
        y_all.append(features['speaker'])
        f0_all.append(features['f0'])
        counter += 1

        print()

    x_all = np.concatenate(x_all, axis=0)
    y_all = np.concatenate(y_all, axis=0)
    f0_all = np.concatenate(f0_all, axis=0)

    with open(args.speaker_list) as fp:
        SPEAKERS = [l.strip() for l in fp.readlines()]

    # ==== F0 stats ====
    for s in SPEAKERS:
        print('Speaker {}'.format(s), flush=True)
        f0 = f0_all[SPEAKERS.index(s) == y_all]
        print('  len: {}'.format(len(f0)))
        f0 = f0[f0 > 2.]
        f0 = np.log(f0)
        mu, std = f0.mean(), f0.std()

        # Save as `float32`
        with open('./etc/{}.npf'.format(s), 'wb') as fp:
            fp.write(np.asarray([mu, std]).tostring())

    # ==== Min/Max value ====
    # mu = x_all.mean(0)
    # std = x_all.std(0)
    q005 = np.percentile(x_all, 0.5, axis=0)
    q995 = np.percentile(x_all, 99.5, axis=0)

    # Save as `float32`
    with open('./etc/{}_xmin.npf'.format(args.corpus_name), 'wb') as fp:
        fp.write(q005.tostring())

    with open('./etc/{}_xmax.npf'.format(args.corpus_name), 'wb') as fp:
        fp.write(q995.tostring())
コード例 #6
0
def main():
    logdir, ckpt = os.path.split(args.checkpoint)
    arch = tf.gfile.Glob(os.path.join(
        logdir, 'architecture*.json'))[0]  # should only be 1 file
    with open(arch) as fp:
        arch = json.load(fp)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    features = read_whole_features(args.file_pattern.format(args.src))

    x = normalizer.forward_process(features['sp'])
    x = nh_to_nchw(x)
    y_s = features['speaker']
    y_t_id = tf.placeholder(dtype=tf.int64, shape=[
        1,
    ])
    y_t = y_t_id * tf.ones(shape=[
        tf.shape(x)[0],
    ], dtype=tf.int64)

    machine = MODEL(arch)
    z = machine.encode(x)
    x_t = machine.decode(z, y_t)  # NOTE: the API yields NHWC format
    x_t = tf.squeeze(x_t)
    x_t = normalizer.backward_process(x_t)

    # For sanity check (validation)
    x_s = machine.decode(z, y_s)
    x_s = tf.squeeze(x_s)
    x_s = normalizer.backward_process(x_s)

    f0_s = features['f0']
    f0_t = convert_f0(f0_s, args.src, args.trg)

    output_dir = get_default_output(args.output_dir)

    saver = tf.train.Saver()
    sv = tf.train.Supervisor(logdir=output_dir)
    with sv.managed_session() as sess:
        load(saver, sess, logdir, ckpt=ckpt)
        while True:
            try:
                feat, f0, sp = sess.run(
                    [features, f0_t, x_t],
                    feed_dict={y_t_id: np.asarray([SPEAKERS.index(args.trg)])})
                feat.update({'sp': sp, 'f0': f0})
                y = pw2wav(feat)
                oFilename = make_output_wav_name(output_dir, feat['filename'])
                sf.write(oFilename, y, FS)
            except:
                break
コード例 #7
0
def main():
    tf.gfile.MkDir('./etc')

    # ==== Save max and min value ====
    x = read_whole_features(args.train_file_pattern)
    x_all = list()
    y_all = list()
    f0_all = list()
    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        while True:
            try:
                features = sess.run(x)
                print('Processing {}'.format(features['filename']))
                x_all.append(features['sp'])
                y_all.append(features['speaker'])
                f0_all.append(features['f0'])
            finally:
                pass

    x_all = np.concatenate(x_all, axis=0)
    y_all = np.concatenate(y_all, axis=0)
    f0_all = np.concatenate(f0_all, axis=0)


    # ==== F0 stats ====
    for s in SPEAKERS:
        print('Speaker {}'.format(s), flush=True)
        f0 = f0_all[SPEAKERS.index(s) == y_all]
        print('  len: {}'.format(len(f0)))
        f0 = f0[f0 > 2.]
        f0 = np.log(f0)
        mu, std = f0.mean(), f0.std()

        # Save as `float32`
        with open('./etc/{}.npf'.format(s), 'wb') as fp:
            fp.write(np.asarray([mu, std]).tostring())


    # ==== Min/Max value ====
    # mu = x_all.mean(0)
    # std = x_all.std(0)
    q005 = np.percentile(x_all, 0.5, axis=0)
    q995 = np.percentile(x_all, 99.5, axis=0)

    # Save as `float32`
    with open('./etc/xmin.npf', 'wb') as fp:
        fp.write(q005.tostring())

    with open('./etc/xmax.npf', 'wb') as fp:
        fp.write(q995.tostring())
コード例 #8
0
ファイル: convert.py プロジェクト: QianQQ/Voice-Conversion
def main():
    logdir, ckpt = os.path.split(args.checkpoint)
    arch = tf.gfile.Glob(os.path.join(logdir, 'architecture*.json'))[0]  # should only be 1 file
    with open(arch) as fp:
        arch = json.load(fp)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    features = read_whole_features(args.file_pattern.format(args.src))

    x = normalizer.forward_process(features['sp'])
    x = nh_to_nchw(x)
    y_s = features['speaker']
    y_t_id = tf.placeholder(dtype=tf.int64, shape=[1,])
    y_t = y_t_id * tf.ones(shape=[tf.shape(x)[0],], dtype=tf.int64)

    machine = MODEL(arch)
    z = machine.encode(x)
    x_t = machine.decode(z, y_t)  # NOTE: the API yields NHWC format
    x_t = tf.squeeze(x_t)
    x_t = normalizer.backward_process(x_t)

    # For sanity check (validation)
    x_s = machine.decode(z, y_s)
    x_s = tf.squeeze(x_s)
    x_s = normalizer.backward_process(x_s)

    f0_s = features['f0']
    f0_t = convert_f0(f0_s, args.src, args.trg)

    output_dir = get_default_output(args.output_dir)

    saver = tf.train.Saver()
    sv = tf.train.Supervisor(logdir=output_dir)
    with sv.managed_session() as sess:
        load(saver, sess, logdir, ckpt=ckpt)
        while True:
            try:
                feat, f0, sp = sess.run(
                    [features, f0_t, x_t],
                    feed_dict={y_t_id: np.asarray([SPEAKERS.index(args.trg)])}
                )
                feat.update({'sp': sp, 'f0': f0})
                y = pw2wav(feat)
                oFilename = make_output_wav_name(output_dir, feat['filename'])
                sf.write(oFilename, y, FS)
            except:
                break
コード例 #9
0
ファイル: build.py プロジェクト: QianQQ/Voice-Conversion
def main():
    tf.gfile.MkDir('./etc')

    # ==== Save max and min value ====
    x = read_whole_features(args.train_file_pattern)
    x_all = list()
    y_all = list()
    f0_all = list()
    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        while True:
            try:
                features = sess.run(x)
                print('Processing {}'.format(features['filename']))
                x_all.append(features['sp'])
                y_all.append(features['speaker'])
                f0_all.append(features['f0'])
            finally:
                pass

    x_all = np.concatenate(x_all, axis=0)
    y_all = np.concatenate(y_all, axis=0)
    f0_all = np.concatenate(f0_all, axis=0)


    # ==== F0 stats ====
    for s in SPEAKERS:
        print('Speaker {}'.format(s), flush=True)
        f0 = f0_all[SPEAKERS.index(s) == y_all]
        print('  len: {}'.format(len(f0)))
        f0 = f0[f0 > 2.]
        f0 = np.log(f0)
        mu, std = f0.mean(), f0.std()

        # Save as `float32`
        with open('./etc/{}.npf'.format(s), 'wb') as fp:
            fp.write(np.asarray([mu, std]).tostring())


    # ==== Min/Max value ====
    # mu = x_all.mean(0)
    # std = x_all.std(0)
    q005 = np.percentile(x_all, 0.5, axis=0)
    q995 = np.percentile(x_all, 99.5, axis=0)

    # Save as `float32`
    with open('./etc/xmin.npf', 'wb') as fp:
        fp.write(q005.tostring())

    with open('./etc/xmax.npf', 'wb') as fp:
        fp.write(q995.tostring())
コード例 #10
0
ファイル: build.py プロジェクト: OMGitsHongyu/ain-vctk
def main():
    x = read_whole_features(DATA_DIR + '*a*/*bin')
    x_all = list()
    y_all = list()
    f0_all = list()
    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        while True:
            try:
                features = sess.run(x)
                print('Processing {}'.format(features['filename']))
                x_all.append(features['sp'])
                y_all.append(features['speaker'])
                f0_all.append(features['f0'])
            finally:
                pass

    x_all = np.concatenate(x_all, axis=0)
    y_all = np.concatenate(y_all, axis=0)
    f0_all = np.concatenate(f0_all, axis=0)

    for s in SPEAKERS:
        print('Speaker {}'.format(s))
        f0 = f0_all[np.int64(s) == y_all]
        print('  len: {}'.format(len(f0)))
        f0 = f0[f0 > 2.]
        f0 = np.log(f0)
        mu, std = f0.mean(), f0.std()
        print mu, std
        # Save as `float32`
        with open(DATA_DIR + '/etc/{}.npf'.format(s), 'wb') as fp:
            fp.write(np.asarray([mu, std]).tostring())

    # ==== Min/Max value ====
    # mu = x_all.mean(0)
    # std = x_all.std(0)
    q005 = np.percentile(x_all, 0.5, axis=0)
    q995 = np.percentile(x_all, 99.5, axis=0)

    # Save as `float32`
    with open(DATA_DIR + '/etc/xmin.npf', 'wb') as fp:
        fp.write(q005.tostring())

    with open(DATA_DIR + '/etc/xmax.npf', 'wb') as fp:
        fp.write(q995.tostring())
コード例 #11
0
ファイル: build.py プロジェクト: chj1330/homework
def test():
    # ==== Test: batch mixer (conclusion: capacity should be larger to make sure good mixing) ====
    #x, y = read('./dataset/savee/bin/*/*/*.bin', 1, min_after_dequeue=1024, capacity=2048)
    #sv = tf.train.Supervisor()
    #with sv.managed_session() as sess:
    #    for _ in range(200):
    #        x_, y_ = sess.run([x, y])
    #        print(y_)


    # ===== Read binary ====
    features = read_whole_features('./dataset/savee/bin/Test Set/SNeu/DC16.bin')

    sv = tf.train.Supervisor()
    with sv.managed_session() as sess:
        features = sess.run(features)

    y,sp = pw2wav(features)
    sf.write('origin1.wav', y, 22050)  # TODO fs should be specified externally.
    with open('01.bin', 'wb') as fp:
        fp.write(sp)
    """
コード例 #12
0
def main(unused_args=None):
    # args(sys.argv)

    if args.model is None:
        raise ValueError(
            '\n  You MUST specify `model`.' +\
            '\n    Use `python convert.py --help` to see applicable options.'
        )

    module = import_module(args.module, package=None)
    MODEL = getattr(module, args.model)

    FS = 16000

    with open(args.speaker_list) as fp:
        SPEAKERS = [l.strip() for l in fp.readlines()]

    logdir, ckpt = os.path.split(args.checkpoint)
    if 'VAE' in logdir:
        _path_to_arch, _ = os.path.split(logdir)
    else:
        _path_to_arch = logdir
    arch = tf.gfile.Glob(os.path.join(_path_to_arch, 'architecture*.json'))
    if len(arch) != 1:
        print('WARNING: found more than 1 architecture files!')
    arch = arch[0]
    with open(arch) as fp:
        arch = json.load(fp)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/{}_xmax.npf'.format(args.corpus_name)),
        xmin=np.fromfile('./etc/{}_xmin.npf'.format(args.corpus_name)),
    )

    features = read_whole_features(args.file_pattern.format(args.src))

    x = normalizer.forward_process(features['sp'])
    x = nh_to_nhwc(x)
    y_s = features['speaker']
    y_t_id = tf.placeholder(dtype=tf.int64, shape=[
        1,
    ])
    y_t = y_t_id * tf.ones(shape=[
        tf.shape(x)[0],
    ], dtype=tf.int64)

    f0_t = features['f0']
    #    f0_t = convert_f0(f0_s, args.src, args.trg)
    #    f0_s_convert = tf.cast(f0_s,dtype=tf.int64)
    f0_t_convert = tf.cast(f0_t, dtype=tf.int64)
    machine = MODEL(arch, is_training=False)
    z = machine.encode(x)
    x_t = machine.decode(z, y_t,
                         f0_t_convert)  # NOTE: the API yields NHWC format
    x_t = tf.squeeze(x_t)
    x_t = normalizer.backward_process(x_t)

    output_dir = get_default_output(args.output_dir)

    saver = tf.train.Saver()
    sv = tf.train.Supervisor(logdir=output_dir)
    with sv.managed_session() as sess:
        load(saver, sess, logdir, ckpt=ckpt)
        print()
        while True:
            try:
                s_time = time.perf_counter()
                feat, f0, sp = sess.run(
                    [features, f0_t, x_t],
                    feed_dict={y_t_id: np.asarray([SPEAKERS.index(args.trg)])})
                feat.update({'sp': sp, 'f0': f0})
                y = pw2wav(feat)
                oFilename = make_output_wav_name(output_dir, feat['filename'])
                print('\rProcessing {}'.format(oFilename), end='')
                e_time = time.perf_counter()
                print('\n')
                print('Time_sp: {}'.format(e_time - s_time))
                print('\n')
                sf.write(oFilename, y, FS)
            except KeyboardInterrupt:
                break
            finally:
                pass
        print()
コード例 #13
0
ファイル: convert.py プロジェクト: wubinary/DLHLP2020-SPRING
def main():

    checkpoint = torch.load(args.model_path)
    encoder = Encoder()
    generator = G()
    encoder.load_state_dict(checkpoint['encoder_state_dict'])
    generator.load_state_dict(checkpoint['generator_state_dict'])
    encoder.cuda()
    generator.cuda()

    FS = 16000
    SPEAKERS = list()
    with open(args.speaker_list) as fp:
        SPEAKERS = [l.strip() for l in fp.readlines()]

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/{}_xmax.npf'.format(args.corpus_name)),
        xmin=np.fromfile('./etc/{}_xmin.npf'.format(args.corpus_name)),
    )

    total_sp_speaker = []
    total_speaker = []

    total_features = read_whole_features(args.file_pattern.format(args.src))
    for features in total_features:

        x = normalizer.forward_process(features['sp'])
        x = nh_to_nchw(x)
        y_s = features['speaker']
        #print('????',SPEAKERS.index(args.trg))

        #y_t_id = tf.placeholder(dtype=tf.int64, shape=[1,])
        #y_t = y_t_id * torch.ones(shape=[x.shape[0],], dtype=torch.int64)
        #print(y_t)
        x = Variable(torch.FloatTensor(x).cuda(), requires_grad=False)

        y_t = torch.ones((x.shape[0])).view(-1, 1) * (SPEAKERS.index(args.trg))

        z, _ = encoder(x)
        x_t, _ = generator(z, y_t)  # NOTE: the API yields NHWC format
        x_t = torch.squeeze(x_t)
        #print('x_t.shape',x_t.shape)
        x_t = normalizer.backward_process(x_t)
        #print('backward_process.finish')

        x_s, _ = generator(z, y_s)
        x_s = torch.squeeze(x_s)
        x_s = normalizer.backward_process(x_s)

        f0_s = features['f0']
        #print(f0_s.shape)
        f0_t = convert_f0(f0_s, args.src, args.trg)

        #output_dir = get_default_output(args.output_dir)
        output_dir = args.output_dir
        features['sp'] = x_t.cpu().data.numpy()
        features['f0'] = f0_t
        #print('=-=-=-=-=-=')
        y = pw2wav(features)

        oFilename = make_output_wav_name(output_dir, features['filename'])
        print(f'\r Processing {oFilename}', end=' ')

        if not os.path.exists(os.path.dirname(oFilename)):
            try:
                os.makedirs(os.path.dirname(oFilename))
            except OSError as exc:  # Guard against race condition
                print('error')
                pass

        sf.write(oFilename, y, FS)
        #print('2: ',features['sp'].shape)
        #print('3: ',features['f0'].shape)

    print('\n==finish==')
コード例 #14
0
def main():
    logdir, ckpt = os.path.split(args.checkpoint)
    print('logdir:', logdir)
    print('ckpt:', ckpt)
    arch = args.arch
    with open(arch) as fp:
        arch = json.load(fp)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    features = read_whole_features(args.file_pattern.format(args.src))
    print "features shape:", features['sp'].shape
    x = normalizer.forward_process(features['sp'])
    x = nh_to_nchw(x)
    y_s = features['speaker']
    y_t_id = tf.placeholder(dtype=tf.int64, shape=[
        1,
    ])
    y_t = y_t_id * tf.ones(shape=[
        tf.shape(x)[0],
    ], dtype=tf.int64)

    machine = MODEL(arch, args, False, False)
    with tf.variable_scope("encoder"):
        z, _ = machine.encoder(x, True)
    with tf.variable_scope("generator"):
        x_t = machine.generator(z, y_t,
                                True)  # NOTE: the API yields NHWC format
    x_t = tf.transpose(x, [0, 2, 3, 1])
    print "x_t shape:", x_t.get_shape().as_list()
    x_t = tf.squeeze(x_t)

    x_t = normalizer.backward_process(x_t)

    f0_s = features['f0']
    f0_t = convert_f0(f0_s, args.src, args.trg)

    output_dir = get_default_output(args.output_dir)
    print("output_dir########:", output_dir)

    saver = tf.train.Saver()
    sv = tf.train.Supervisor(logdir=output_dir)
    print "logdir:", logdir
    with sv.managed_session() as sess:
        #sess.run(tf.global_variables_initializer())
        #load(saver, sess, logdir, ckpt=ckpt)
        ckpt = os.path.join(logdir, ckpt)
        saver.restore(sess, ckpt)

        while True:
            try:
                print("Excuting")

                feat, f0, sp = sess.run(
                    [features, f0_t, x_t],
                    feed_dict={y_t_id: np.asarray([SPEAKERS.index(args.trg)])})
                print("Excuting.")
                feat.update({'sp': sp, 'f0': f0})
                y = pw2wav(feat)
                print("Excuting..")
                sf.write(
                    os.path.join(
                        output_dir,
                        os.path.splitext(os.path.split(
                            feat['filename'])[-1])[0] + '.wav'), y, FS)
                print("Excuted")

            except:
                break