コード例 #1
0
def train_parallel_cycle(mode, loader):
    all_losses_g = []
    all_losses_d = []
    gradients = []
    current_loss_g = 0
    current_loss_d = 0
    # optimizer = torch.optim.SGD(dis_silly.parameters(), lr=0.005, momentum=0.9)
    # optimizer = torch.optim.SGD(dis_silly.parameters(), lr=learning_rate_dis, momentum=0.9)
    optimizer = torch.optim.Adam(dis_silly.parameters(), lr=learning_rate_dis)
    # optimizer_gen = torch.optim.SGD([gen_fixed_silly.A], lr=learning_rate_gen_p, momentum=0.9)
    optimizer_gen = torch.optim.Adam([gen_silly.A], lr=learning_rate_gen_p)
    # optimizer = torch.optim.SGD(dis_silly.parameters(), lr=0.001)
    start = time.time()
    for epoch in range(epochs_parallel):
        for index, data in enumerate(loader):
            for k_step in range(k):
                z_noise = get_noise(m_batch_size, sample_size).double()
                x_real = data.double()
                if mode == 'real':
                    x_real = torch.add(-set_mean, x_real) / (set_max - set_min) # mean normalization
                loss_d = train_dis(dis_silly, gen_silly, x_real, z_noise, optimizer)
                current_loss_d += loss_d
            z_noise = get_noise(m_batch_size, sample_size).double()
            loss_g, grad = train_gen(gen_silly, gen_fixed_clever, dis_silly, z_noise, optimizer_gen, mode, data)
            current_loss_g += loss_g
            gradients.append(grad)
            if (index + 1) % print_every == 0:  # 1
                print('%s (%d %d%%) %.10f %10f' % (
                    time_since(start), epoch, (index + 1) / dataset_size * 100, loss_d, loss_g))
            if (index + 1) % plot_every == 0:
                all_losses_d.append(current_loss_d / plot_every)
                all_losses_g.append(current_loss_g / plot_every)
                current_loss_d = 0
                current_loss_g = 0
    return all_losses_d, all_losses_g, gradients
コード例 #2
0
def generate_dataset():
    with open('data/generated.txt', 'w') as f:
        z_noise = get_noise(100, hardcoded_n_in_batch).double()
        gen = Generator(prepare_indices(z_noise[0]), torch.from_numpy(np.array(weights_for_generation)))
        for i in range(1):
            z_noise = get_noise(dataset_size_to_generate, hardcoded_n_in_batch).double()
            for m in range(dataset_size_to_generate):
                generated = gen.generate(z_noise[m]).detach().numpy()
                for i in range(len(generated)):
                    f.write("%f," % generated[i])
                f.write("\n")
コード例 #3
0
def plot_all():
    noise_for_initial_plot = get_noise(1, hardcoded_n_in_batch).double()
    gen_initial = Generator(prepare_indices(noise_for_initial_plot[0]),
                            torch.from_numpy(np.array(weights_for_generation)))
    # plot_initial(gen_initial, noise_for_initial_plot)

    z_noise_basis = get_noise(m_batch_size, size_for_basis_plot).double()
    gen_basis = Generator(prepare_indices(z_noise_basis[0]), torch.from_numpy(np.array(weights_for_generation)))
    # plot_basis(gen_basis)

    plot_losses_together(losses_d_parallel, losses_g_parallel)
    # plot_gradient(grad_both)
    plot_dis_accuracy(dis_accuracy)
コード例 #4
0
def test_parallel():
    for i in range(10):
        noise = get_noise(m_batch_size, sample_size).double()
        real_for_pred = torch.stack([gen_fixed_clever.generate(noise[m]) for m in range(m_batch_size)]).squeeze(2)
        fake_for_pred = torch.stack([gen_silly.generate(noise[m]) for m in range(m_batch_size)]).squeeze(2)
        _, prediction_fake = dis_silly(fake_for_pred)
        _, prediction_real = dis_silly(real_for_pred)
        print("chance that fake is taken for real: ", torch.mean(prediction_fake))
        print("chance that real is taken for real: ", torch.mean(prediction_real))
コード例 #5
0
def dis_training_cycle(mode, loader):
    # optimizer = torch.optim.SGD(dis_trainable.parameters(), lr=learning_rate_dis, momentum=0.9)
    optimizer = torch.optim.Adam(dis_trainable.parameters(), lr=learning_rate_dis)
    all_losses_d = []
    current_loss_d = 0
    start = time.time()
    for iter in range(epochs_dis):
        for index, data in enumerate(loader):
            z_noise = get_noise(m_batch_size, sample_size).double()
            x_real = data.double()
            if mode == 'real':
                x_real = torch.add(-set_mean, torch.log(x_real))  # normalization
            loss_d = train_dis(dis_trainable, gen_fixed_silly, x_real, z_noise, optimizer)
            current_loss_d += loss_d
            if (index + 1) % print_every == 0:
                print('%s (%d %d%%) %.10f' % (time_since(start), iter, index / dataset_size * 100, loss_d))
            if (index + 1) % plot_every == 0:
                all_losses_d.append(current_loss_d / plot_every)
                current_loss_d = 0
    return all_losses_d
コード例 #6
0
def gen_training_cycle(mode, loader):
    all_losses_g = []
    current_loss_g = 0
    gradients = []
    start = time.time()
    optimizer = torch.optim.Adam([gen_fixed_silly.A], lr=learning_rate_gen)
    # sheduler = torch.optim.lr_scheduler.StepLR(optimizer, 20, gamma=0.1)
    # optimizer = torch.optim.SGD([gen_fixed_silly.A], lr=learning_rate_gen, momentum=0.9)
    # optimizer = torch.optim.SGD([gen_fixed_silly.A], lr=learning_rate_gen)
    # optimizer = torch.optim.ASGD([gen_fixed_silly.A], lr=learning_rate_gen)
    for iter in range(epochs_gen):
        for index, data in enumerate(loader):
            z_noise = get_noise(m_batch_size, sample_size).double()
            loss_g, grad_norm = train_gen(gen_fixed_silly, gen_fixed_clever, dis_trainable, z_noise, optimizer, mode, data)
            gradients.append(grad_norm)
            current_loss_g += loss_g
            if (index ) % print_every == 0:
                print('%s (%d %d%%) %.10f' % (
                    time_since(start), iter, index / dataset_size * 100, loss_g))
            if (index) % plot_every == 0:
                # if index % 1 == 0:
                all_losses_g.append(current_loss_g / plot_every)
                current_loss_g = 0
    return all_losses_g, gradients
コード例 #7
0
    dataset_real = CSVDataset('real.csv')
    loader_real = torch.utils.data.DataLoader(dataset=dataset_real, batch_size=m_batch_size, shuffle=True,
                                              num_workers=1)
    dataset_size_real = dataset_real.len / m_batch_size
    set_mean = torch.mean(torch.stack([data for _, data in enumerate(loader_real)]))# compute mean value for normalization
    min_per_batch = []
    for _, data in enumerate(loader_real):
        min_per_batch.append(torch.min(data))
    set_min = np.amin(np.array(min_per_batch))
    max_per_batch = []
    for _, data in enumerate(loader_real):
        max_per_batch.append(torch.max(data))
    set_max = np.amax(np.array(max_per_batch))
    sample_size = len(next(enumerate(loader_real))[1][0])
    dataset_size = dataset_real.len / m_batch_size
    noise = get_noise(m_batch_size, sample_size).double()  # 1 sample for indices

    # 1. train discriminator
    # set_mean = 0
    gen_fixed_clever = Generator(prepare_indices(noise[0]), torch.from_numpy(np.array(weights_for_generation)))
    dis_trainable = Discriminator(sample_size)
    # dis_trainable.apply(weights_init)
    # dis_trainable.layer1.weight.data.fill_(0)
    # dis_trainable.layer1.bias.data.fill_(0)
    # dis_trainable.layer2.weight.data.fill_(0)
    # dis_trainable.layer2.bias.data.fill_(0.45)

    weights_random = torch.Tensor(2, 2).uniform_(0, 1)
    # weights_random = torch.from_numpy(np.array([[0.65, 0.85], [0.05, 0.55]]))
    gen_fixed_silly = Generator(prepare_indices(noise[0]), weights_random)
    losses_d = []