示例#1
0
文件: main.py 项目: s0urcer/vae-npvc
def main():
    ''' NOTE: The input is rescaled to [-1, 1] '''

    dirs = validate_log_dirs(args)
    tf.gfile.MakeDirs(dirs['logdir'])

    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    image, label = read(
        file_pattern=arch['training']['datadir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=normalizer,
    )

    machine = MODEL(arch)

    loss = machine.loss(image, label)
    trainer = TRAINER(loss, arch, args, dirs)
    trainer.train(nIter=arch['training']['max_iter'], machine=machine)
示例#2
0
def main():
    ''' NOTE: The input is rescaled to [-1, 1] '''

    dirs = validate_log_dirs(args)
    if args.restore_from is None:
        tf.gfile.MakeDirs(dirs['logdir'])

    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    image, label = read(
        file_pattern=arch['training']['datadir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=normalizer,
    )  #image format NHWC

    print "image shape:",image
    print "label shape:",label
    machine = MODEL(arch, args, True, False)
    loss = machine.loss(image, label, True)
    #sample = machine.sample()#np.asarray([SPEAKERS.index(args.trg)]))
    # sample,
    trainer = TRAINER(loss, arch, args, dirs)
    trainer.train(nIter=arch['training']['max_iter'], n_unroll=arch['training']['n_unroll'], machine=machine)
示例#3
0
def main():
    ''' NOTE: The input is rescaled to [-1, 1] '''

    dirs = validate_log_dirs(args)
    tf.gfile.MakeDirs(dirs['logdir'])

    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    image, label = read(
        file_pattern=arch['training']['datadir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=normalizer,
    )

    machine = MODEL(arch)

    loss = machine.loss(image, label)
    trainer = TRAINER(loss, arch, args, dirs)
    trainer.train(nIter=arch['training']['max_iter'], machine=machine)
示例#4
0
def main():
    """ NOTE: The input is rescaled to [-1, 1] """

    dirs = validate_log_dirs(args)
    tf.gfile.MakeDirs(dirs['logdir'])
    # dirs = dict()
    # dirs['logdir'] = '.'
    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    machine = MODEL(arch)
    if args.model in ['VAWGAN_I']:
        image, label, i_vec = read_i_all(
            file_pattern=arch['training']['datadir'],
            file_pattern2=arch['training']['ivectordir'],
            batch_size=arch['training']['batch_size'],
            capacity=2048,
            min_after_dequeue=1024,
            normalizer=normalizer,
        )

        # loss = machine.loss(image, label, text_emb)
        loss = machine.loss(image, label, i_vec)
    elif args.model in ['VAWGAN_S', 'SentWGAN']:
        image, label, text_emb = read_all(
            file_pattern=arch['training']['datadir'],
            file_pattern2=arch['training']['textdir'],
            batch_size=arch['training']['batch_size'],
            capacity=2048,
            min_after_dequeue=1024,
            normalizer=normalizer,
        )

        loss = machine.loss(image, label, text_emb)
    else:
        image, label = read(
            file_pattern=arch['training']['datadir'],
            batch_size=arch['training']['batch_size'],
            capacity=2048,
            min_after_dequeue=1024,
            normalizer=normalizer,
        )

        loss = machine.loss(image, label)

    trainer = TRAINER(loss, arch, args, dirs)
    trainer.train(nIter=arch['training']['max_iter'], machine=machine)
def main(unused_args=None):
    ''' NOTE: The input is rescaled to [-1, 1] '''
    #module = import_module(args.model_module, package=None)
    #MODEL = getattr(module, args.model)
    #print("=== ",MODEL,"")
    #module = import_module(args.trainer_module, package=None)
    #TRAINER = getattr(module, args.trainer)

    dirs = validate_log_dirs(args)

    try:
        os.makedirs(dirs['logdir'])
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/{}_xmax.npf'.format(args.corpus_name)),
        xmin=np.fromfile('./etc/{}_xmin.npf'.format(args.corpus_name)),
    )

    s_x_y = read(
        file_pattern=arch['training']['src_dir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=normalizer,
        data_format='NHWC',
    )

    t_x_y = read(
        file_pattern=arch['training']['trg_dir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=normalizer,
        data_format='NHWC',
    )

    machine = Trainer()

    machine.load_data(s_x_y, t_x_y)
    machine.train()
示例#6
0
def main(unused_args=None):
    ''' NOTE: The input is rescaled to [-1, 1] '''
    module = import_module(args.model_module, package=None)
    MODEL = getattr(module, args.model)

    module = import_module(args.trainer_module, package=None)
    TRAINER = getattr(module, args.trainer)


    dirs = validate_log_dirs(args)
    tf.gfile.MakeDirs(dirs['logdir'])

    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/{}_xmax.npf'.format(args.corpus_name)),
        xmin=np.fromfile('./etc/{}_xmin.npf'.format(args.corpus_name)),
    )

    x_s, y_s, f0_s = read(
        file_pattern=arch['training']['src_dir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=normalizer,
        data_format='NHWC',
    )

    x_t, y_t, f0_t = read(
        file_pattern=arch['training']['trg_dir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=normalizer,
        data_format='NHWC',
    )

    machine = MODEL(arch, is_training=True)
   # y_s_new = tf.stack([y_s,f0_s],axis=1)
   # y_t_new = tf.stack([y_t,f0_t],axis=1)

    loss = machine.loss(x_s, y_s, f0_s, x_t, y_t, f0_t)
    trainer = TRAINER(loss, arch, args, dirs)
    trainer.train(nIter=arch['training']['max_iter'], machine=machine)
示例#7
0
def main(_):
  speaker_list = txt2list(args.speaker_list)
  dirs = validate_log_dirs(args)
  arch = json2dict(args.arch)
  arch.update(dirs)
  arch.update({'ckpt': args.ckpt})
  copy_arch_file(args.arch, arch['logdir'])
  net = VQVAE(arch)
  P = net.n_padding()
  print('Receptive field: {} samples ({:.2f} sec)\n'.format(P, P / arch['fs']))
  data = ByteWavReader(
    speaker_list,
    args.file_pattern,
    T=arch['T'],
    batch_size=arch['training']['batch_size'],
    buffer_size=5000
  )
  net.train(data)
示例#8
0
文件: main.py 项目: chj1330/homework
def main(unused_args):
    ''' NOTE: The input is rescaled to [-1, 1] '''

    if args.model is None or args.trainer is None:
        raise ValueError(
            '\n  Both `model` and `trainer` should be assigned.' +\
            '\n  Use `python main.py --help` to see applicable options.'
        )

    module = import_module(args.model_module, package=None)
    MODEL = getattr(module, args.model)

    module = import_module(args.trainer_module, package=None)
    TRAINER = getattr(module, args.trainer)

    dirs = validate_log_dirs(args)
    tf.gfile.MakeDirs(dirs['logdir'])

    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)

    normalizer = Tanhize(
        xmax=np.fromfile('./etc/xmax.npf'),
        xmin=np.fromfile('./etc/xmin.npf'),
    )

    x, y = read(
        file_pattern=arch['training']['datadir'],
        batch_size=arch['training']['batch_size'],
        capacity=4096,
        min_after_dequeue=3000,
        normalizer=normalizer,
    )

    machine = MODEL(arch, is_training=True)

    loss = machine.loss(x, y)
    trainer = TRAINER(loss, arch, args, dirs)
    trainer.train(nIter=arch['training']['max_iter'], machine=machine)
示例#9
0
def main(unused_args=None):
    ''' NOTE: The input is rescaled to [-1, 1] '''
    module = import_module(args.model_module, package=None)
    MODEL = getattr(module, args.model)

    module = import_module(args.trainer_module, package=None)
    TRAINER = getattr(module, args.trainer)

    dirs = validate_log_dirs(args)
    tf.gfile.MakeDirs(dirs['logdir'])

    with open(args.architecture) as f:
        arch = json.load(f)

    with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
        json.dump(arch, f, indent=4)


#Todo: normalize x_s, x_y
    x_s, y_s = read_f0(
        file_pattern=arch['training']['src_dir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=None,
        data_format='NHWC',
    )

    x_t, y_t = read_f0(
        file_pattern=arch['training']['trg_dir'],
        batch_size=arch['training']['batch_size'],
        capacity=2048,
        min_after_dequeue=1024,
        normalizer=None,
        data_format='NHWC',
    )

    machine = MODEL(arch, is_training=True)
    loss = machine.loss(x_s, y_s, x_t, y_t)
    trainer = TRAINER(loss, arch, args, dirs)
    trainer.train(nIter=arch['training']['max_iter'], machine=machine)
示例#10
0
def main(_):
    """Train the model based on the command-line arguments."""
    # Parse command-line arguments
    speaker_list = txt2list(args.speaker_list)
    dirs = validate_log_dirs(args)
    arch = json2dict(args.arch)
    arch.update(dirs)
    arch.update({'ckpt': args.ckpt})
    copy_arch_file(args.arch, arch['logdir'])

    # Initialize the model
    net = VQVAE(arch)
    P = net.n_padding()
    print('Receptive field: {} samples ({:.2f} sec)'.format(P, P / arch['fs']))

    # Read the input data as specified by the command line arguments
    data = ByteWavReader(speaker_list,
                         args.file_pattern,
                         T=arch['T'],
                         batch_size=arch['training']['batch_size'],
                         buffer_size=5000)

    # Train the model on the input data
    net.train(data)