示例#1
0
文件: utils.py 项目: zggl/discgen
def create_celeba_streams(training_batch_size, monitoring_batch_size,
                          include_targets=False):
    """Creates CelebA data streams.

    Parameters
    ----------
    training_batch_size : int
        Batch size for training.
    monitoring_batch_size : int
        Batch size for monitoring.
    include_targets : bool
        If ``True``, use both features and targets. If ``False``, use
        features only.

    Returns
    -------
    rval : tuple of data streams
        Data streams for the main loop, the training set monitor,
        the validation set monitor and the test set monitor.

    """
    sources = ('features', 'targets') if include_targets else ('features',)

    train_set = CelebA('64', ('train',), sources=sources)
    valid_set = CelebA('64', ('valid',), sources=sources)
    test_set = CelebA('64', ('test',), sources=sources)

    return create_streams(train_set, valid_set, test_set, training_batch_size,
                          monitoring_batch_size)
示例#2
0
文件: streams.py 项目: MiriamHu/ALI
def create_celeba_data_streams(batch_size, monitoring_batch_size, rng=None):
    train_set = CelebA('64', ('train', ), sources=('features', ))
    valid_set = CelebA('64', ('valid', ), sources=('features', ))
    main_loop_stream = DataStream.default_stream(
        train_set,
        iteration_scheme=ShuffledScheme(train_set.num_examples,
                                        batch_size,
                                        rng=rng))
    train_monitor_stream = DataStream.default_stream(
        train_set,
        iteration_scheme=ShuffledScheme(5000, monitoring_batch_size, rng=rng))
    valid_monitor_stream = DataStream.default_stream(
        valid_set,
        iteration_scheme=ShuffledScheme(5000, monitoring_batch_size, rng=rng))
    return main_loop_stream, train_monitor_stream, valid_monitor_stream
示例#3
0
def test_celeba():
    data_path = config.data_path
    try:
        config.data_path = '.'
        f = h5py.File('celeba_64.hdf5', 'w')
        f['features'] = numpy.arange(10 * 3 * 64 * 64, dtype='uint8').reshape(
            (10, 3, 64, 64))
        f['targets'] = numpy.arange(10 * 40, dtype='uint8').reshape((10, 40))
        split_dict = {
            'train': {
                'features': (0, 6),
                'targets': (0, 6)
            },
            'valid': {
                'features': (6, 8),
                'targets': (6, 8)
            },
            'test': {
                'features': (8, 10),
                'targets': (8, 10)
            }
        }
        f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
        f.close()
        dataset = CelebA(which_format='64', which_sets=('train', ))
        assert_equal(dataset.filename, 'celeba_64.hdf5')
    finally:
        config.data_path = data_path
        os.remove('celeba_64.hdf5')
示例#4
0
def main(args):

    # Load Config Module from source file
    config_module = imp.load_source('config', args.config_path)
    
    # Get configuration parameters
    cfg = config_module.cfg
   
    # Define name of npz file to which the model parameters will be saved
    weights_fname = str(args.config_path)[:-3]+'.npz'
    
    model = config_module.get_model(interp=False)
    print('Compiling theano functions...')
    
    # Compile functions
    tfuncs, tvars,model = make_training_functions(cfg,model)

    # Test set for interpolations
    test_set = CelebA('64',('test',),sources=('features',))    

    # Loop across epochs
    offset = True
    params = list(set(lasagne.layers.get_all_params(model['l_out'],trainable=True)+\
                              lasagne.layers.get_all_params(model['l_discrim'],trainable=True)+\
                              [x for x in lasagne.layers.get_all_params(model['l_out'])+\
                                lasagne.layers.get_all_params(model['l_discrim']) if x.name[-4:]=='mean' or x.name[-7:]=='inv_std']))
    metadata = GANcheckpoints.load_weights(weights_fname, params)
    epoch = args.epoch if args.epoch>0 else metadata['epoch'] if 'epoch' in metadata else 0
    print('loading weights, epoch is '+str(epoch))

    model['l_IAF_mu'].reset("Once")
    model['l_IAF_ls'].reset("Once")                        
    
    # Open Test Set
    test_set.open()
            
    np.random.seed(epoch*42+5)
    # Generate Random Samples, averaging latent vectors across masks   
    samples = np.uint8(from_tanh(tfuncs['sample'](np.random.randn(27,cfg['num_latents']).astype(np.float32))))
    
    
    np.random.seed(epoch*42+5)
    # Get Reconstruction/Interpolation Endpoints
    endpoints = np.uint8(test_set.get_data(request = list(np.random.choice(test_set.num_examples,6,replace=False)))[0])
   
    # Get reconstruction latents
    Ze = np.asarray(tfuncs['Zfn'](to_tanh(np.float32(endpoints))))
               
    # Get Interpolant Latents
    Z = np.asarray([Ze[2 * i, :] * (1 - j) + Ze[2 * i + 1, :] * j  for i in range(3) for j in [x/6.0 for x in range(7)]],dtype=np.float32)
    
    # Get all images
    images = np.append(samples,np.concatenate([np.insert(endpoints[2*i:2*(i+1),:,:,:],1,np.uint8(from_tanh(tfuncs['sample'](Z[7*i:7*(i+1),:]))),axis=0) for i in range(3)],axis=0),axis=0)


    # Plot images
    plot_image_grid(images,6,9,'pics/'+str(args.config_path)[:-3]+'_sample'+str(epoch)+'.png')
    
    # Close test set
    test_set.close(state=None)
示例#5
0
def get_celeba(split, sources, load_in_memory):
    from fuel.datasets import CelebA
    return CelebA('64', split, sources=sources, load_in_memory=load_in_memory)