Пример #1
0
 def save_history(self, path):
     verify_output_path(path)
     history = {
         field: np.array([
             transition.__getattribute__(field)
             for transition in self.buffer.data
         ])
         for field in Transition._fields
     }
     np.savez(path, **history)
Пример #2
0
 def save_models(self, path='./trained_models/scm.pth'):
     d = {node: self.nodes[node]['model'].state_dict() for node in self.nodes}
     verify_output_path(path)
     torch.save(d, path)
Пример #3
0
 def save_skeleton(self, path):
     verify_output_path(path)
     skeleton = SCM.copy_skeleton(self)
     data = json_graph.node_link_data(skeleton)
     with open(path, 'w+') as f:
         json.dump(data, f, indent=2)
Пример #4
0
 def save_model(self, model_save_path: str):
     verify_output_path(model_save_path)
     torch.save(self.net.state_dict(), model_save_path)
Пример #5
0
 def save_cache(self):
     filepath = self.cache_file_path()
     verify_output_path(filepath)
     pd.to_pickle(self.df, filepath)
Пример #6
0
def test_sine(num_epochs):
    gan = BiCoGAN(1, 1, 4)

    train_data_length = 1024
    train_data = np.zeros((train_data_length, 2, 1))
    train_data[:, 0] = 3 * np.random.randn(train_data_length)[:, None]
    train_data[:, 1] = np.sin(-train_data[:, 0] + 0.5) + 0.5 * np.random.randn(
        train_data_length)[:, None]
    train_data = torch.tensor(train_data).float()
    train_labels = torch.zeros(train_data_length)
    train_set = [(train_data[i], train_labels[i])
                 for i in range(train_data_length)]

    batch_size = 32
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)

    loss_gs = []
    loss_ds = []
    loss_es = []
    for epoch in range(num_epochs):

        for n, (samples, _) in enumerate(train_loader):
            samples = samples
            c = samples[:, 0]
            x = samples[:, 1]

            loss_D, loss_G, loss_E = gan.optimize(x, c)

            loss_ds.append(loss_D)
            loss_gs.append(loss_G)
            loss_es.append(loss_E)

        if epoch % 10 == 0:
            print(
                f'epoch {epoch:03d}: loss_D = {loss_D:.4f}, loss_G = {loss_G:.4f}, loss_E = {loss_E:.4f}'
            )

    # testing

    import matplotlib.pyplot as plt

    # plot loss
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_ds[500:],
             label='discriminator loss')
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_gs[500:],
             label='generator loss')
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_es[500:],
             label='encoder loss')
    plt.legend()
    verify_output_path('./output/generator.png')
    plt.savefig('./output/loss.png')
    plt.clf()

    # plot truth vs generated
    c = 3 * np.random.randn(train_data_length)
    x = np.sin(c + 0.5) + 0.5 * np.random.randn(train_data_length)

    plt.scatter(c, x, label='ground truth')

    gan.eval()
    z = np.random.randn(1024, 4)
    x_gen = gan.generate(c[:, None], z).detach().cpu().numpy()

    print('generated output range:')
    print(np.min(x_gen), np.max(x_gen))

    plt.scatter(c, x_gen, label='generator')
    plt.legend()
    plt.xlabel('condition')
    plt.ylabel('output')
    plt.savefig('./output/generator.png')
    plt.clf()
Пример #7
0
def test_age_weight(num_epochs):
    gan = BiCoGAN(1, 1, 8, sigmoid=True)

    train_data_length = 1024
    train_data = np.zeros((train_data_length, 2, 1))
    # age = np.random.uniform(20, 80, size=train_data_length)
    # weight = np.where(age <= 40,
    #                   0.25 * age + 60,
    #                   -0.25 * age + 80
    #                   ) + np.random.uniform(-2, 2, size=train_data_length)
    age = 30 * np.random.randn(train_data_length) + 50
    age = np.clip(age, 20, 80)
    weight = np.where(age <= 40, 0.25 * age + 60,
                      -0.25 * age + 80) + 6 * np.random.rand(train_data_length)
    weight = np.clip(weight, 40, 80)

    train_data[:, 0] = age[:, None]
    train_data[:, 1] = weight[:, None]
    train_data = torch.tensor(train_data).float()
    train_labels = torch.zeros(train_data_length)
    train_set = [(train_data[i], train_labels[i])
                 for i in range(train_data_length)]

    batch_size = 32
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)

    loss_gs = []
    loss_ds = []
    loss_es = []
    for epoch in range(num_epochs):

        for n, (samples, _) in enumerate(train_loader):
            samples = samples
            c = samples[:, 0]
            x = samples[:, 1]

            loss_D, loss_G, loss_E = gan.optimize(x, c)

            loss_ds.append(loss_D)
            loss_gs.append(loss_G)
            loss_es.append(loss_E)

        if epoch % 10 == 0:
            print(
                f'epoch {epoch:03d}: loss_D = {loss_D:.4f}, loss_G = {loss_G:.4f}, loss_E = {loss_E:.4f}'
            )

    # testing
    gan.eval()

    import matplotlib.pyplot as plt

    # plot loss
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_ds[500:],
             label='discriminator loss')
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_gs[500:],
             label='generator loss')
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_es[500:],
             label='encoder loss')
    plt.legend()
    verify_output_path('./output/generator.png')
    plt.savefig('./output/loss.png')
    plt.clf()

    # plot truth vs generated
    c = 30 * np.random.randn(1024) + 50
    c = np.clip(c, 20, 80)
    x = np.where(c <= 40, 0.25 * c + 60,
                 -0.25 * c + 80) + 6 * np.random.rand(1024)
    x = np.clip(x, 40, 80)

    plt.scatter(c, x, label='ground truth')

    gan.eval()
    z = np.random.randn(1024, 8)
    x_gen = gan.generate(c[:, None], z).detach().cpu().numpy()

    print('generated output range:')
    print(np.min(x_gen), np.max(x_gen))

    plt.scatter(c, x_gen, label='generator')
    plt.legend()
    plt.xlabel('condition')
    plt.ylabel('output')
    plt.savefig('./output/generator.png')
    plt.clf()
Пример #8
0
def test(num_epochs):
    # x ~ 3 * N(0, 1)
    # y = -4 * x + 1 * N(0, 1)
    gan = BiCoGAN(1, 1, 1)
    loss_gs = []
    loss_ds = []
    loss_es = []
    for epoch in range(num_epochs):
        train_data_length = 1024
        train_data = np.zeros((train_data_length, 2, 1))
        train_data[:, 0] = 3 * np.random.randn(train_data_length, 1)
        train_data[:, 1] = -4 * train_data[:, 0] + np.random.randn(
            train_data_length, 1)
        train_data = torch.tensor(train_data).float()
        train_labels = torch.zeros(train_data_length)
        train_set = [(train_data[i], train_labels[i])
                     for i in range(train_data_length)]

        batch_size = 32
        train_loader = torch.utils.data.DataLoader(train_set,
                                                   batch_size=batch_size,
                                                   shuffle=True)
        for n, (samples, _) in enumerate(train_loader):
            samples = samples
            c = samples[:, 0]
            x = samples[:, 1]

            loss_D, loss_G, loss_E = gan.optimize(x, c)

            loss_ds.append(loss_D)
            loss_gs.append(loss_G)
            loss_es.append(loss_E)

        if epoch % 10 == 0:
            print(
                f'epoch {epoch:03d}: loss_D = {loss_D:.4f}, loss_G = {loss_G:.4f}, loss_E = {loss_E:.4f}'
            )

    # testing

    import matplotlib.pyplot as plt

    # plot loss
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_ds[500:],
             label='discriminator loss')
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_gs[500:],
             label='generator loss')
    plt.plot(np.arange(501,
                       len(loss_ds) + 1),
             loss_es[500:],
             label='encoder loss')
    plt.legend()
    verify_output_path('./output/generator.png')
    plt.savefig('./output/loss.png')
    plt.clf()

    # plot truth vs generated
    c = 3 * np.random.randn(256, 1)
    x = -4 * c + np.random.randn(256, 1)

    print('groud truth range:')
    print(np.min(x), np.max(x))

    plt.scatter(c, x, label='ground truth')

    gan.eval()
    z = np.random.randn(256, 1)
    x_gen = gan.generate(c, z).detach().cpu().numpy()

    print('generated output range:')
    print(np.min(x_gen), np.max(x_gen))

    plt.scatter(c, x_gen, label='generator')
    plt.legend()
    plt.xlabel('condition')
    plt.ylabel('output')
    plt.savefig('./output/generator.png')
    plt.clf()

    # plot generated vs encoded
    fig = plt.figure()
    ax: plt.Axes = plt.axes(projection='3d')
    ax.scatter(c, z, x_gen, label='real condition/latent + generated output')

    z_enc = gan.encode(c, x_gen)
    z_enc = z_enc.detach().cpu().numpy()

    ax.scatter(c, z_enc, x, label='encoded condition/latent + real output')
    ax.legend()
    ax.set_xlabel('condition')
    ax.set_ylabel('latent')
    ax.set_zlabel('output')
    plt.savefig('./output/encoder.png')
    plt.clf()

    # testing the difference between generator and encoder
    print(f'average latent encoding error:')
    print(np.sum(np.abs(z_enc - z)) / len(z_enc))
    x_enc_gen = gan.generate(c, z_enc).detach().cpu().numpy()
    print(f'average distance between x_gen and x_enc_gen:')
    print(np.sum(np.abs(x_gen - x_enc_gen)) / len(x_gen))

    plt.scatter(z, x_gen, label='gen')
    plt.scatter(z_enc, x, label='enc')
    plt.legend()
    plt.show()