Exemplo n.º 1
0
def main():
    iters = 0
    costs = []
    for epoch in range(num_epochs):
        costs_per_epoch = []
        real_eegs.shuffle()
        for i, eegs in enumerate(real_eegs):
            if (eegs.shape[0] != batch_size):
                continue
            eegs = normalize(eegs)
            iters += 1
            optim.zero_grad()
            # eeg = estimated_eegs[i]
            eeg = Variable(eegs)
            if cuda:
                eeg = eeg.cuda()
            # eeg *= 1e5 *4
            x_prime = net(eeg)
            cost = critereon(x_prime, eeg) * 1e3
            cost.backward()
            optim.step()
            costs_per_epoch += [cost.item()]
            if iters % print_iter == 0:
                avg_cost_epoch = sum(costs_per_epoch) / len(costs_per_epoch)
                print("[Iter: " + str(iters) + "] [Epoch: " + str(epoch) +
                      "] [Avg cost in epoch %f ] [Loss: %f]" %
                      (avg_cost_epoch, cost.item()))
                save_EEG(eeg.cpu().detach().view(batch_size, 1004,
                                                 44).numpy(), 44, 200,
                         "./reonconstructed_eegs/E-orginal-" + str(epoch))
                save_EEG(
                    x_prime.cpu().detach().view(batch_size, 1004,
                                                44).numpy(), 44, 200,
                    "./reonconstructed_eegs/E-generated-" + str(epoch))
        avg_cost_epoch = sum(costs_per_epoch) / len(costs_per_epoch)
        costs += [avg_cost_epoch]
        np.save("./reonconstructed_eegs/convVAE-lr1e-4-N4390-C44-L1004-E",
                np.asarray(costs))
Exemplo n.º 2
0
def main():
    iters = 0
    for iteration in range(ITERS):
        real_eegs.shuffle()
        for i in range(len(real_eegs)):
            if (real_eegs[i].shape[0] != BATCH_SIZE):
                continue
            iters += 1
            ############################
            # (1) Update D network
            ###########################
            for p in netD.parameters():  # reset requires_grad
                p.requires_grad = True  # they are set to False below in netG update:

            # for p in netG.parameters():
            # 	p.requires_grad = False # to avoid computation

            for iter_d in range(CRITIC_ITERS):
                real = real_eegs[random.randint(0, len(real_eegs) - 2)]

                if USE_CUDA:
                    real = real.cuda()
                real_v = autograd.Variable(real)

                optimizerD.zero_grad()

                # train with real
                D_real = netD(real_v)
                D_real = D_real.mean()
                D_real.backward(mone)

                # train with fake
                # noise = netG.generate_noise(BATCH_SIZE, LENGTH, NUM_NODES)
                noise = noise_gen_G(BATCH_SIZE, LENGTH, NUM_NODES)

                if USE_CUDA:
                    noise = [x.cuda() for x in noise]

                noise_v = [autograd.Variable(x) for x in noise]

                if (len(noise) == 1):  #not conditional
                    fake = autograd.Variable(netG(noise_v[0]).data)
                if (len(noise) == 2):  #conditional with 1 Y
                    fake = autograd.Variable(netG(noise_v[0], noise_v[1]).data)
                if (len(noise) == 3):  #conditional with 2 Ys
                    fake = autograd.Variable(
                        netG(noise_v[0], noise_v[1], noise_v[2]).data)

                D_fake = netD(fake)
                D_fake = D_fake.mean()
                D_fake.backward(one)

                # train with gradient penalty
                gradient_penalty = calc_gradient_penalty(
                    netD, real_v.data, fake.data)
                #gradient_penalty.backward()

                D_cost = D_fake - D_real + gradient_penalty
                Wasserstein_D = D_real - D_fake
                optimizerD.step()

        ############################
        # (2) Update G network
        ###########################
            for p in netD.parameters():
                p.requires_grad = False  # to avoid computation

            # for p in netG.parameters():
            # 	p.requires_grad = True

            optimizerG.zero_grad()

            # noise = netG.generate_noise(BATCH_SIZE, LENGTH, NUM_NODES)
            noise = noise_gen_G(BATCH_SIZE, LENGTH, NUM_NODES)

            if USE_CUDA:
                noise = [x.cuda() for x in noise]

            noise_v = [autograd.Variable(x) for x in noise]

            if (len(noise) == 1):  #not conditional
                fake = autograd.Variable(netG(noise_v[0]).data)
            if (len(noise) == 2):  #conditional with 1 Y
                fake = autograd.Variable(netG(noise_v[0], noise_v[1]).data)
            if (len(noise) == 3):  #conditional with 2 Ys
                fake = autograd.Variable(
                    netG(noise_v[0], noise_v[1], noise_v[2]).data)

            G = netD(fake)
            G = G.mean()
            G.requires_grad = True
            G.backward(mone)
            G_cost = -G
            optimizerG.step()

            # if (iters % 1000 == 0):
            # 	save_EEG(fake.cpu().detach().numpy(), NUM_NODES, 200, "./generated_eegs/generated-iter"+ str(iters) + "-fake-rG-long")
            # 	print("Epoch", iteration)
            # 	print("G_cost" , G_cost)
            # 	print("D_cost", D_cost)
        save_EEG(
            fake.cpu().detach().numpy(), NUM_NODES, 200,
            "./generated_eegs/generated-" + str(iteration) + "-fake-rcG-short")
        # save_EEG(real.cpu().detach().numpy(), NUM_NODES, 200, "./generated_eegs/generated-" + str(iteration-1) + "-real-rG-long-norm")
        print("Epoch", iteration)
        print("G_cost", G_cost)
        print("D_cost", D_cost)
Exemplo n.º 3
0
def main():
    iters = 0
    for iteration in range(ITERS):
        real_eegs.shuffle()
        for i in range(len(real_eegs)):
            if (real_eegs[i].shape[0] != BATCH_SIZE):
                continue
            iters += 1
            ############################
            # (1) Update D network
            ###########################
            for p in netD.parameters():  # reset requires_grad
                p.requires_grad = True  # they are set to False below in netG update:

            # for p in netG.parameters():
            # 	p.requires_grad = False # to avoid computation

            optimizerD.zero_grad()
            inputv = autograd.Variable(real_eegs[i])

            if USE_CUDA:
                inputv = inputv.cuda()

            unl_output = netD(inputv)
            loss_unl_real = -torch.mean(LSE(unl_output), 0) + torch.mean(
                F.softplus(LSE(unl_output), 1), 0)

            # train with fake
            # noise = netG.generate_noise(BATCH_SIZE, LENGTH, NUM_NODES)
            noise = noise_gen_G(BATCH_SIZE, LENGTH, NUM_NODES)

            if USE_CUDA:
                noise = [x.cuda() for x in noise]

            noise_v = [autograd.Variable(x) for x in noise]

            if (len(noise) == 1):  #not conditional
                fake = autograd.Variable(netG(noise_v[0]).data)
            if (len(noise) == 2):  #conditional with 1 Y
                fake = autograd.Variable(netG(noise_v[0], noise_v[1]).data)
            if (len(noise) == 3):  #conditional with 2 Ys
                fake = autograd.Variable(
                    netG(noise_v[0], noise_v[1], noise_v[2]).data)

            unl_output = netD(
                fake.detach()
            )  #fake images are separated from the graph #results will never gradient(be updated), so G will not be updated
            loss_unl_fake = torch.mean(F.softplus(LSE(unl_output), 1), 0)
            loss_D = loss_unl_real + loss_unl_fake
            loss_D.backward(
            )  # because detach(), backward() will not influence netG
            optimizerD.step()

            ############################
            # (2) Update G network
            ###########################
            for p in netD.parameters():
                p.requires_grad = False  # to avoid computation

            # for p in netG.parameters():
            # 	p.requires_grad = True

            optimizerG.zero_grad()

            # noise = netG.generate_noise(BATCH_SIZE, LENGTH, NUM_NODES)
            noise = noise_gen_G(BATCH_SIZE, LENGTH, NUM_NODES)

            if USE_CUDA:
                noise = [x.cuda() for x in noise]

            noise_v = [autograd.Variable(x) for x in noise]

            if (len(noise) == 1):  #not conditional
                fake = autograd.Variable(netG(noise_v[0]).data)
            if (len(noise) == 2):  #conditional with 1 Y
                fake = autograd.Variable(netG(noise_v[0], noise_v[1]).data)
            if (len(noise) == 3):  #conditional with 2 Ys
                fake = autograd.Variable(
                    netG(noise_v[0], noise_v[1], noise_v[2]).data)

            inputv = autograd.Variable(real_eegs[i])
            feature_real, _ = netD(inputv, matching=True)
            feature_fake, output = netD(fake, matching=True)
            feature_real = torch.mean(feature_real, 0)
            feature_fake = torch.mean(feature_fake, 0)
            loss_G = criterionG(feature_fake, feature_real.detach())
            optimizerG.step()
            # if (iters % 1000 == 0):
            # 	save_EEG(fake.cpu().detach().numpy(), NUM_NODES, 200, "./generated_eegs/generated-iter"+ str(iters) + "-fake-rG-long")
            # 	print("Epoch", iteration)
            # 	print("G_cost" , G_cost)
            # 	print("D_cost", D_cost)
        save_EEG(
            fake.cpu().detach().numpy(), NUM_NODES, 200,
            "./generated_eegs/generated-" + str(iteration) +
            "-fake-cG-matching-minB")
        # save_EEG(real.cpu().detach().numpy(), NUM_NODES, 200, "./generated_eegs/generated-" + str(iteration-1) + "-real-rG-long-norm")
        print("Epoch", iteration)
        print("G_cost", loss_G)
        print("D_cost", loss_D)
Exemplo n.º 4
0
            cleaned_noisy = Variable(cleaner(gen_imgs), requires_grad=True)
            cleaned_clean = Variable(cleaner(estimated), requires_grad=True)
            noisy_loss = torch.dist(cleaned_noisy, estimated)
            clean_loss = torch.dist(cleaned_clean, estimated)

            cleaner_loss = noisy_loss + clean_loss
            # print("cleaner loss", cleaner_loss)
            cleaner_loss.backward()
            optimizer_C.step()
        iter += 1
        if iter % print_iter == 0:
            print(
                "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [C Loss: %f]"
                % (epoch, n_epochs, i, len(real_eegs), d_loss.item(),
                   g_loss.item(), clean_loss.item()))
        print("cleaner loss", clean_loss)
    save_EEG(gen_imgs.cpu().detach().view(batch_size, 1004,
                                          44).numpy(), 44, 200,
             "./generated_eegs/generated-" + str(epoch) + "-fake-conv-add-cg")
    save_EEG(
        estimated.cpu().detach().view(batch_size, 1004, 44).numpy(), 44, 200,
        "./generated_eegs/generated-" + str(epoch) + "-estimated-conv-add-cg")
    save_EEG(
        cleaned_noisy.cpu().detach().view(batch_size, 1004,
                                          44).numpy(), 44, 200,
        "./generated_eegs/generated-" + str(epoch) + "-cleaned-conv-add-cg")
    print("Save @ Epoch", epoch)
    # batches_done = epoch * len(dataloader) + i
    # if batches_done % sample_interval == 0:
    # save_image(gen_imgs.data[:25], 'images/%d.png' % batches_done, nrow=5, normalize=True)
Exemplo n.º 5
0
            fake_validity) + lambda_gp * gradient_penalty
        #g_loss = adversarial_loss(discriminator(gen_imgs), valid)

        d_loss.backward()
        optimizer_D.step()

        # ---------------------
        #  Train Generator
        # ---------------------
        if i % n_critic == 0:
            optimizer_G.zero_grad()

            # Measure discriminator's ability to classify real from generated samples
            fake_imgs = generator(z)

            fake_validity = discriminator(fake_imgs)
            g_loss = -torch.mean(fake_validity)

            g_loss.backward()
            optimizer_G.step()

        print(
            "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
            (epoch, n_epochs, i, len(real_eegs), d_loss.item(), g_loss.item()))
    save_EEG(gen_imgs.cpu().detach().view(batch_size, 1004,
                                          44).numpy(), 44, 200,
             "./generated_eegs/generated-" + str(epoch) + "-fake-conv-wgp")
    # batches_done = epoch * len(dataloader) + i
    # if batches_done % sample_interval == 0:
    # save_image(gen_imgs.data[:25], 'images/%d.png' % batches_done, nrow=5, normalize=True)