Пример #1
0
    def test_1d(self):
        bn = False
        params = dict()
        params['optimization'] = dict()
        params['optimization']['epoch'] = 1
        params['summary_every'] = 4
        params['save_every'] = 5
        params['print_every'] = 3
        params['net'] = dict()
        params['net']['shape'] = [16, 1]  # Shape of the image
        params['net']['generator'] = dict()
        params['net']['generator']['latent_dim'] = 100
        params['net']['generator']['full'] = [2 * 8]
        params['net']['generator']['nfilter'] = [2, 32, 32, 1]
        params['net']['generator']['batch_norm'] = [bn, bn, bn]
        params['net']['generator']['shape'] = [[5], [5], [5], [5]]
        params['net']['generator']['stride'] = [1, 2, 1, 1]
        params['net']['generator']['data_size'] = 1
        params['net']['discriminator'] = dict()
        params['net']['discriminator']['full'] = [32]
        params['net']['discriminator']['nfilter'] = [16, 32, 32, 32]
        params['net']['discriminator']['batch_norm'] = [bn, bn, bn, bn]
        params['net']['discriminator']['shape'] = [[5], [5], [5], [3]]
        params['net']['discriminator']['stride'] = [2, 2, 2, 1]
        params['net']['discriminator']['data_size'] = 1

        X = np.random.rand(101, 16)
        dataset = Dataset(X)
        wgan = GANsystem(WGAN, params)
        wgan.train(dataset)
        img = wgan.generate(2)
        assert (len(img) == 2)
        assert (img.shape[1:] == (16, 1))
        img = wgan.generate(500)
        assert (len(img) == 500)
Пример #2
0
    def test_generate(self):
        params = dict()
        params['optimization'] = dict()
        params['optimization']['epoch'] = 1
        params['summary_every'] = 4
        params['save_every'] = 5
        params['print_every'] = 3
        params['net'] = dict()
        params['net']['generator'] = dict()
        params['net']['generator']['latent_dim'] = 5
        X = np.random.rand(101, 16, 16)
        dataset = Dataset(X)
        wgan = GANsystem(WGAN, params)
        wgan.train(dataset)
        img = wgan.generate(2)
        img = wgan.generate(500)
        assert (len(img) == 500)
        z = np.random.randn(133, 5)

        img1 = wgan.generate(z=z)

        img2 = np.zeros(shape=img1.shape)
        for i in range(133):
            img2[i] = wgan.generate(z=np.reshape(z[i], [1, 5]))
        np.testing.assert_almost_equal(img1, img2, decimal=6)
Пример #3
0
 def test_paulinasystem(self):
     params = dict()
     params['optimization'] = dict()
     params['optimization']['epoch'] = 1
     params['summary_every'] = 4
     params['save_every'] = 5
     params['print_every'] = 3
     X = np.random.rand(101, 16, 16)
     dataset = Dataset(X)
     wgan = PaulinaGANsystem(WGAN, params)
     wgan.train(dataset)
     img = wgan.generate(2)
     img = wgan.generate(500)
     assert (len(img) == 500)
Пример #4
0
    def test_patchupscalegan1dborder(self):
        bn = False
        params = dict()
        params['optimization'] = dict()
        params['optimization']['epoch'] = 1
        params['summary_every'] = 4
        params['save_every'] = 5
        params['print_every'] = 3
        params['net'] = dict()
        params['net']['shape'] = [8, 2]
        params['net']['generator'] = dict()
        params['net']['generator']['latent_dim'] = 8
        params['net']['generator']['full'] = [16]
        params['net']['generator']['nfilter'] = [8, 32, 1]
        params['net']['generator']['batch_norm'] = [bn, bn]
        params['net']['generator']['shape'] = [[3], [3], [3]]
        params['net']['generator']['stride'] = [1, 1, 1]
        params['net']['generator']['data_size'] = 1
        params['net']['generator']['borders'] = dict()
        params['net']['generator']['borders']['width_full'] = None
        params['net']['generator']['borders']['nfilter'] = [4, 1]
        params['net']['generator']['borders']['batch_norm'] = [bn, bn]
        params['net']['generator']['borders']['shape'] = [[5], [3]]
        params['net']['generator']['borders']['stride'] = [2, 2]
        params['net']['generator']['borders']['data_size'] = 1
        params['net']['generator']['borders']['width_full'] = 2
        params['net']['discriminator'] = dict()
        params['net']['discriminator']['full'] = [32]
        params['net']['discriminator']['nfilter'] = [16, 32]
        params['net']['discriminator']['batch_norm'] = [bn, bn]
        params['net']['discriminator']['shape'] = [[5], [3]]
        params['net']['discriminator']['stride'] = [2, 2]
        params['net']['discriminator']['data_size'] = 1
        params['net']['upsampling'] = 2

        X = np.random.rand(101, 8, 2)
        dataset = Dataset(X)
        wgan = GANsystem(UpscalePatchWGANBorders, params)
        wgan.train(dataset)
        borders = np.random.rand(500, 4, 1)
        X_smooth = np.random.rand(500, 4, 1)
        img = wgan.generate(N=2, X_smooth=X_smooth[:2], borders=borders[:2])
        assert (len(img) == 2)
        assert (img.shape[1:] == (4, 1))
        img = wgan.generate(N=500,
                            X_smooth=X_smooth[:500],
                            borders=borders[:500])
        assert (len(img) == 500)
Пример #5
0
    def test_patchupscalegan3d(self):
        bn = False
        params = dict()
        params['optimization'] = dict()
        params['optimization']['epoch'] = 1
        params['summary_every'] = 4
        params['save_every'] = 5
        params['print_every'] = 3
        params['net'] = dict()
        params['net']['shape'] = [8, 8, 8, 8]
        params['net']['generator'] = dict()
        params['net']['generator']['latent_dim'] = 8 * 8 * 8
        params['net']['generator']['full'] = []
        params['net']['generator']['nfilter'] = [8, 32, 1]
        params['net']['generator']['batch_norm'] = [bn, bn]
        params['net']['generator']['shape'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3]]
        params['net']['generator']['stride'] = [1, 1, 1]
        params['net']['generator']['data_size'] = 3
        params['net']['discriminator'] = dict()
        params['net']['discriminator']['full'] = [32]
        params['net']['discriminator']['nfilter'] = [16, 32]
        params['net']['discriminator']['batch_norm'] = [bn, bn]
        params['net']['discriminator']['shape'] = [[3, 3, 3], [3, 3, 3]]
        params['net']['discriminator']['stride'] = [2, 2]
        params['net']['discriminator']['data_size'] = 3
        params['net']['upsampling'] = 2

        X = np.random.rand(101, 8, 8, 8, 8)
        dataset = Dataset(X)

        class UgradedGAN(UpscalePatchWGAN, CosmoWGAN):
            pass

        wgan = GANsystem(UgradedGAN, params)
        wgan.train(dataset)
        borders = np.random.rand(500, 8, 8, 8, 7)
        X_down = np.random.rand(500, 4, 4, 4, 1)

        img = wgan.generate(N=2, X_down=X_down[:2], borders=borders[:2])
        assert (len(img) == 2)
        assert (img.shape[1:] == (8, 8, 8, 1))
        img = wgan.generate(N=500, X_down=X_down[:500], borders=borders[:500])
        assert (len(img) == 500)
Пример #6
0
    def test_lapgan1d(self):
        bn = False
        params = dict()
        params['optimization'] = dict()
        params['optimization']['epoch'] = 1
        params['summary_every'] = 4
        params['save_every'] = 5
        params['print_every'] = 3
        params['net'] = dict()
        params['net']['shape'] = [16, 1]
        params['net']['generator'] = dict()
        params['net']['generator']['latent_dim'] = 16 * 16
        params['net']['generator']['full'] = []
        params['net']['generator']['nfilter'] = [8, 32, 1]
        params['net']['generator']['batch_norm'] = [bn, bn]
        params['net']['generator']['shape'] = [[5], [5], [5]]
        params['net']['generator']['stride'] = [1, 1, 1]
        params['net']['generator']['data_size'] = 1
        params['net']['discriminator'] = dict()
        params['net']['discriminator']['full'] = [32]
        params['net']['discriminator']['nfilter'] = [16, 32]
        params['net']['discriminator']['batch_norm'] = [bn, bn]
        params['net']['discriminator']['shape'] = [[5], [3]]
        params['net']['discriminator']['stride'] = [2, 2]
        params['net']['discriminator']['data_size'] = 1
        params['net']['upsampling'] = 2

        X = np.random.rand(101, 16)
        dataset = Dataset(X)
        wgan = GANsystem(LapWGAN, params)
        wgan.train(dataset)
        X_down = np.random.rand(500, 8, 1)
        img = wgan.generate(N=2, X_down=X_down[:2])
        assert (len(img) == 2)
        assert (img.shape[1:] == (16, 1))
        img = wgan.generate(N=500, X_down=X_down[:500])
        assert (len(img) == 500)
tgrads[np.isnan(tgrads)] = 0
tgrads = np.clip(tgrads, -1, 1)

fgrads[np.isnan(fgrads)] = 0
fgrads = np.clip(fgrads, -1, 1)

print(np.max(tgrads[:, :256, :]))
print(np.min(tgrads[:, :256, :]))
print(np.mean(tgrads[:, :256, :]))

print(np.max(fgrads[:, :256, :]))
print(np.min(fgrads[:, :256, :]))
print(np.mean(fgrads[:, :256, :]))

dataset = Dataset(np.stack([preprocessed_images[:, :256], tgrads[:, :256], fgrads[:, :256]], axis=-1))

time_str = 'commands_md64_tgrads_fgrads_squared_8k'
global_path = '../../saved_results'

name = time_str

from gantools import blocks
bn = False

md = 64

params_discriminator = dict()
params_discriminator['stride'] = [2,2,2,2,2]
params_discriminator['nfilter'] = [md, 2*md, 4*md, 8*md, 16*md]
params_discriminator['shape'] = [[5, 5], [5, 5], [5, 5], [5, 5], [5, 5]]
Пример #8
0
raw_data = scipy.io.loadmat(mat_path)
preprocessed_images = raw_data['logspecs']

for i in range(2, 7):
    mat_path = "../../data/test_spectrograms_and_derivs_" + str(i) + ".mat"
    raw_data = scipy.io.loadmat(mat_path)
    preprocessed_images = np.concatenate(
        [preprocessed_images, raw_data['logspecs']])

del raw_data
print(preprocessed_images.shape)
print(np.max(preprocessed_images[:, :128, :]))
print(np.min(preprocessed_images[:, :128, :]))
print(np.mean(preprocessed_images[:, :128, :]))

dataset = Dataset(preprocessed_images[:, :128])

time_str = 'commands_md64_4k_squared'
global_path = '../../saved_results'

name = time_str

from gantools import blocks
bn = False

md = 64

params_discriminator = dict()
params_discriminator['stride'] = [2, 2, 2, 2, 2]
params_discriminator['nfilter'] = [md, 2 * md, 4 * md, 8 * md, 16 * md]
params_discriminator['shape'] = [[6, 6], [6, 6], [6, 6], [6, 6], [6, 6]]
Пример #9
0
tgrads[np.isnan(tgrads)] = 0
tgrads = np.clip(tgrads, -1, 1)

fgrads[np.isnan(fgrads)] = 0
fgrads = np.clip(fgrads, -1, 1)

print(np.max(tgrads[:, :256, :]))
print(np.min(tgrads[:, :256, :]))
print(np.mean(tgrads[:, :256, :]))

print(np.max(fgrads[:, :256, :]))
print(np.min(fgrads[:, :256, :]))
print(np.mean(fgrads[:, :256, :]))

dataset = Dataset(
    np.stack([preprocessed_images[:, :256], tgrads[:, :256], fgrads[:, :256]],
             axis=-1))

time_str = 'commands_md64_tgrads_fgrads_8k'
global_path = '../../saved_results'

name = time_str

from gantools import blocks
bn = False

md = 64

params_discriminator = dict()
params_discriminator['stride'] = [2, 2, 2, 2, 2]
params_discriminator['nfilter'] = [md, 2 * md, 4 * md, 8 * md, 16 * md]
Пример #10
0
raw_data = scipy.io.loadmat(mat_path)
preprocessed_images = raw_data['logspecs']

for i in range(2, 7):
    mat_path = "../../data/test_spectrograms_and_derivs_" + str(i) + ".mat"
    raw_data = scipy.io.loadmat(mat_path)
    preprocessed_images = np.concatenate(
        [preprocessed_images, raw_data['logspecs']])

del raw_data
print(preprocessed_images.shape)
print(np.max(preprocessed_images[:, :128, :]))
print(np.min(preprocessed_images[:, :128, :]))
print(np.mean(preprocessed_images[:, :128, :]))

dataset = Dataset(preprocessed_images[:, :128])

time_str = 'commands_md32_4k'
global_path = '../../saved_results'

name = time_str

from gantools import blocks
bn = False

md = 32

params_discriminator = dict()
params_discriminator['stride'] = [2, 2, 2, 2, 2]
params_discriminator['nfilter'] = [md, 2 * md, 4 * md, 8 * md, 16 * md]
params_discriminator['shape'] = [[12, 3], [12, 3], [12, 3], [12, 3], [12, 3]]
Пример #11
0
def load_audio_dataset(shuffle=True,
                       scaling=1,
                       patch=False,
                       augmentation=False,
                       spix=None,
                       smooth=None,
                       type='nsynth'):
    ''' Load a Nsynth dataset object.

     Arguments
    ---------
    * shuffle: shuffle the data (default True)
    * scaling : downscale the image by a factor (default 1)
    * path : downscale the image by a factor (default 1)
    * scaling : downscale the image by a factor (default 1)
    '''

    if type == 'nsynth':
        sig = load_nysnth_rawdata()
        sig = sig[:, :2**15]
    elif type == 'piano':
        sig = load_piano_rawdata()
    else:
        raise ValueError('Incorrect value for type')

    if len(sig.shape) == 1:
        sig = np.reshape(sig, [1, len(sig)])

    # if augmentation and (not patch):
    #     raise ValueError('Augementation works only with patches.')

    # 1) Transform the data
    def transform(x):
        x = x / (2**15)
        x = (0.99 * x.T / np.max(np.abs(x), axis=1)).T
        return x

    sig = transform(sig)

    # 2) Downsample
    Nwin = 32
    if scaling > 1:
        # sig = blocks.downsample(sig, scaling)
        sig = transformation.downsample_1d(sig, scaling, Nwin=Nwin)

    if smooth is not None:
        sig = sig[:, :(sig.shape[1] // smooth) * smooth]
        sig_down = transformation.downsample_1d(sig, smooth, Nwin=Nwin)
        sig_smooth = transformation.upsamler_1d(sig_down, smooth, Nwin=Nwin)

        sig = np.concatenate(
            (np.expand_dims(sig, axis=2), np.expand_dims(sig_smooth, axis=2)),
            axis=2)
    if patch:

        slice_fn = partial(transformation.slice_1d_patch, spix=spix)
    else:
        if spix is not None:
            slice_fn = partial(transformation.slice_1d, spix=spix)
        else:
            slice_fn = do_nothing

    if augmentation:
        transform = partial(transformation.random_shift_1d,
                            roll=False,
                            spix=spix)
    else:
        transform = do_nothing
    # 3) Make a dataset
    dataset = Dataset(sig,
                      shuffle=shuffle,
                      transform=transform,
                      slice_fn=slice_fn)

    return dataset
Пример #12
0
raw_data = scipy.io.loadmat(mat_path)
preprocessed_images = raw_data['logspecs']

for i in range(2, 7):
    mat_path = "../../data/test_spectrograms_and_derivs_" + str(i) + ".mat"
    raw_data = scipy.io.loadmat(mat_path)
    preprocessed_images = np.concatenate(
        [preprocessed_images, raw_data['logspecs']])

del raw_data
print(preprocessed_images.shape)
print(np.max(preprocessed_images[:, :256, :]))
print(np.min(preprocessed_images[:, :256, :]))
print(np.mean(preprocessed_images[:, :256, :]))

dataset = Dataset(preprocessed_images[:, :256])

time_str = 'commands_md64_8k'
global_path = '../../saved_results'

name = time_str

from gantools import blocks

bn = False

md = 64

params_discriminator = dict()
params_discriminator['stride'] = [2, 2, 2, 2, 2]
params_discriminator['nfilter'] = [md, 2 * md, 4 * md, 8 * md, 16 * md]