Пример #1
0
def predict(cfs):
    dataset = MNIST(filenames=cfs)
    net = LSGAN(filenames=cfs)
    net.define_net()
    print(net.pretty_settings())
    net.load('AutoEncoder')
    x = net.gen_latent()
    p = net.predict('Decoder', [x])
    imgs = dataset.visualize(p[0])
    subplot_images((imgs, ), is_gray=True, size=3.0, tight_c=0.5)
Пример #2
0
def predict():
    dataset = MNIST(**data_settings)
    if net_name == 'ae':
        net = AutoEncoder1D(**net_settings)
    elif net_name == 'vae':
        net = VAE1D(**net_settings)
    net.define_net()
    print(net.pretty_settings())
    net.load('AutoEncoder')
    s = next(dataset)
    p = net.predict('Decoder', [net.gen_latent()])
    imgs = dataset.visualize(p[0])
    subplot_images((imgs, ), is_gray=True, size=3.0, tight_c=0.5)
Пример #3
0
def train(filenames=None, settings=None, **kwargs):
    dataset = MNIST(filenames=filenames)
    print("=" * 30)
    print("DATASET SETTINGS:")
    print(dataset.pretty_settings())
    print("=" * 30)
    net = AAE1D(filenames=filenames)
    net.define_net()
    print("=" * 30)
    print("NETWORK SETTINGS:")
    print(net.pretty_settings())
    print("=" * 30)
    nb_batches = settings['nb_batches']
    net.load()
    ptp = ProgressTimer(nb_batches)
    # for i in range(nb_batches // 3 * 2):
    #     loss_ae = train_ae(net, dataset)
    #     msg = 'T:AuE, loss=%05f' % (loss_ae)
    #     ptp.event(net.step, msg)
    # net.lr_decay()
    # for i in range(nb_batches // 3):
    #     loss_ae = train_ae(net, dataset)
    #     msg = 'T:AuE, loss=%05f' % (loss_ae)
    #     ptp.event(net.step, msg)
    for i in range(nb_batches // 3):
        loss_cri = train_cri(net, dataset)
        msg = '|T:Cri, loss=%05f|' % (loss_cri)
        loss_gen = train_gen(net, dataset)
        msg += '|T:Gen, loss=%05f|' % (loss_gen)
        loss_ae = train_ae(net, dataset)
        msg = '|T:AuE, loss=%05f|' % (loss_ae)
        ptp.event(net.step, msg)
        ptp = ProgressTimer(nb_batches)
    # for i in range(nb_batches // 2):
    #     loss_ae = train_ae(net, dataset)
    #     msg = 'step #%5d, AuE, loss=%05f' % (net.step, loss_ae)
    #     ptp.event(net.step, msg)
    # net.lr_decay()
    # for i in range(nb_batches // 2):
    #     loss_ae = train_ae(net, dataset)
    #     msg = 'step #%5d, AuE, loss=%05f' % (net.step, loss_ae)
    #     ptp.event(net.step, msg)
    # for i in range(nb_batches):
    #     loss_cri = train_cri(net, dataset)
    #     msg = 'Cri, loss=%05f    ' % (loss_cri)
    #     loss_gen = train_gen(net, dataset)
    #     msg += 'Gen, loss=%05f    ' % (loss_gen)
    #     ptp.event(net.step, msg)
    net.save('net', is_print=True)
Пример #4
0
def show_latent(cfs, nb_sample=10000):
    print("AE1D Test. Show latent called.")
    net = AAE1D(filenames=cfs)
    net.define_net()
    net.load('enc')
    nb_batch_show = nb_sample // net._batch_size
    dataset = MNIST(filenames=cfs)
    latents = []
    for i in range(10):
        latents.append([])
    for i in tqdm(range(nb_batch_show)):
        s = next(dataset)
        p = net.predict('enc', [s[0]])
        for j in range(dataset._batch_size):
            latents[s[1][j]].append(p[0][j])
    x = []
    y = []
    for i in range(10):
        pos = np.array(latents[i])
        x.append(pos[:, 0])
        y.append(pos[:, 1])
    para = []
    for i in range(10):
        para.append(x[i])
        para.append(y[i])
        para.append('.')
    plt.plot(*para)
    plt.legend(list(map(str, range(10))))
Пример #5
0
def show_latent(nb_sample=10000):
    print("AE1D Test. Show latent called.")
    if net_name == 'ae':
        net = AutoEncoder1D(**net_settings)
    elif net_name == 'vae':
        net = VAE1D(**net_settings)
    net.define_net()
    net.load('AutoEncoder')
    nb_batch_show = nb_sample // batch_size
    dataset = MNIST(**data_settings)
    latents = []
    for i in range(10):
        latents.append([])
    for i in tqdm(range(nb_batch_show)):
        s = next(dataset)
        p = net.predict('Encoder', [s[0]])
        for j in range(batch_size):
            latents[s[1][j]].append(p[0][j])
    x = []
    y = []
    for i in range(10):
        pos = np.array(latents[i])
        x.append(pos[:, 0])
        y.append(pos[:, 1])
    para = []
    for i in range(10):
        para.append(x[i])
        para.append(y[i])
        para.append('.')
    plt.plot(*para)
    plt.legend(list(map(str, range(10))))
Пример #6
0
def train(is_print_loss=False):
    dataset = MNIST(**data_settings)
    if net_name == 'ae':
        net = AutoEncoder1D(**net_settings)
    elif net_name == 'vae':
        net = VAE1D(**net_settings)

    net.define_net()
    # net.load()
    print(net.pretty_settings())
    for i in tqdm(range(nb_batches), ascii=True, ncols=50):
        s = next(dataset)
        loss_v = net.train_on_batch('AutoEncoder', [s[0]], [s[0]])
        if is_print_loss:
            print(' loss = ', loss_v)
        if i % 1000 == 0:
            net.save('AutoEncoder')
    # for i in tqdm(range(nb_batches), ascii=True, ncols=50):
    #     net.reset_lr([1e-3])
    #     s = next(dataset)
    #     loss_v = net.train_on_batch('AutoEncoder', [s[0]], [s[0]])
    #     if is_print_loss:
    #         print(' loss = ', loss_v)
    #     if i % 1000 == 0:
    #             net.save('AutoEncoder')
    # for i in tqdm(range(nb_batches), ascii=True, ncols=50):
    #     net.reset_lr([1e-3])
    #     s = next(dataset)
    #     loss_v = net.train_on_batch('AutoEncoder', [s[0]], [s[0]])
    #     if is_print_loss:
    #         print(' loss = ', loss_v)
    #     if i % 1000 == 0:
    #         net.save('AutoEncoder')
    net.save('AutoEncoder', is_print=True)
Пример #7
0
def train():
    dataset = MNIST(**data_settings)
    net = WGAN1D(**net_settings)
    net.define_net()
    for i in tqdm(range(nb_batches)):
        z = net.gen_latent()
        s = next(dataset)
        for j in range(5):
            net.train_on_batch('Cri', [s[0], z])
        z = net.gen_latent()
        net.train_on_batch('Gen', [z])
Пример #8
0
def show_mainfold(cfs):
    dataset = MNIST(filenames=cfs)
    nb_axis = int(np.sqrt(dataset._batch_size))
    x = np.linspace(-1.5, 1.5, nb_axis)
    y = np.linspace(-1.5, 1.5, nb_axis)
    pos = np.meshgrid(x, y)
    xs = pos[0]
    ys = pos[1]
    xs = xs.reshape([-1])
    ys = ys.reshape([-1])
    net = LSGAN(filenames=cfs)
    net.define_net()
    net.load('Gen')
    latents = np.array([xs, ys]).T
    p = net.predict('Gen', [latents])
    imgs = dataset.visualize(p[0])
    subplot_images((imgs, ),
                   nb_max_row=nb_axis,
                   is_gray=True,
                   size=1.0,
                   tight_c=0.5)
Пример #9
0
def show_mainfold():
    dataset = MNIST(**data_settings)
    nb_axis = int(np.sqrt(batch_size))
    x = np.linspace(-1.5, 1.5, nb_axis)
    y = np.linspace(-1.5, 1.5, nb_axis)
    pos = np.meshgrid(x, y)
    xs = pos[0]
    ys = pos[1]
    xs = xs.reshape([-1])
    ys = ys.reshape([-1])
    if net_name == 'ae':
        net = AutoEncoder1D(**net_settings)
    elif net_name == 'vae':
        net = VAE1D(**net_settings)
    net.define_net()
    net.load('AutoEncoder')
    latents = np.array([xs, ys]).T
    p = net.predict('Decoder', [latents])
    imgs = dataset.visualize(p[0])
    subplot_images((imgs, ), nb_max_row=nb_axis,
                   is_gray=True, size=1.0, tight_c=0.5)
Пример #10
0
def train(nb_batches, cfs):
    dataset = MNIST(filenames=cfs)
    print("=" * 30)
    print("DATASET SETTINGS:")
    print(dataset.pretty_settings())
    print("=" * 30)
    net = LSGAN(filenames=cfs)
    net.define_net()
    print("=" * 30)
    print("NETWORK SETTINGS:")
    print(net.pretty_settings())
    print("=" * 30)
    ptp = ProgressTimer(net.pre_train)
    for i in range(net.pre_train):
        s = next(dataset)
        z = net.gen_latent()
        loss_c = net.train_on_batch('Cri', [s[0], z], [])
        msg = 'loss_c= %f' % loss_c
        ptp.event(i, msg)
    pt = ProgressTimer(nb_batches)
    loss_c = np.nan
    loss_g = np.nan
    for i in range(nb_batches):
        s = next(dataset)
        z = net.gen_latent()
        if i % net.gen_freq > 0:
            loss_c = net.train_on_batch('Cri', [s[0], z], [])
            msg = 'c_step, loss_c= %f; loss_g= %f' % (loss_c, loss_g)
            pt.event(i, msg)
        else:
            loss_g = net.train_on_batch('WGan', [s[0], z], [])
            msg = 'g_step, loss_c= %f; loss_g= %f' % (loss_c, loss_g)
            pt.event(i, msg)
        if i % 1000 == 0:
            net.save('AutoEncoder')
    net.save('AutoEncoder', is_print=True)
Пример #11
0
def show_data_mainfold(cfs):
    """ show latent main fold for data """
    dataset = MNIST(filenames=cfs)
    # nb_axis = int(np.sqrt(dataset._batch_size))
    nb_axis = 32
    x = np.linspace(-5.0, 20.0, nb_axis)
    y = np.linspace(-5.0, 20.0, nb_axis)
    pos = np.meshgrid(x, y)
    xs = pos[0]
    ys = pos[1]
    xs = xs.reshape([-1])
    ys = ys.reshape([-1])
    net = AAE1D(filenames=cfs)
    net.define_net()
    net.load('Gen')
    latents = np.array([xs, ys]).T
    pall = None
    nb_latents = latents.shape[0]
    nb_batches = int(np.ceil(nb_latents / net.batch_size))
    nb_pad = nb_batches * net.batch_size - nb_latents
    latents_pad = np.pad(latents, ((0, nb_pad), (0, 0)), mode='constant')
    for i in tqdm(range(nb_batches)):
        data_batch = latents_pad[i * net.batch_size:(i + 1) *
                                 net.batch_size, :]
        p = net.predict('Gen', [data_batch])
        if pall is None:
            pall = p[0]
        else:
            pall = np.concatenate((pall, p[0]))
    p = pall[:nb_latents, ...]
    imgs = dataset.visualize(p)
    subplot_images((imgs, ),
                   nb_max_row=nb_axis,
                   is_gray=True,
                   size=1.0,
                   tight_c=0.5)
Пример #12
0
def create_dataset_net(is_load=False):
    dataset = MNIST(**data_settings)
    net = WGAN1D(**net_settings)
    if is_load:
        net.load()
    return dataset, net