예제 #1
0
def get_test_accuracy(model_d,f, iteration, label='semi'):
    # don't forget to do model_d.eval() before doing evaluation
    top1 = AverageMeter()
    for i, (input, target) in enumerate(dataloader_test):
        target = target.to(device)
        input = input.view(-1, opt.imageSize).to(device)
        output = model_d(input)

        probs = output.data[:, 1:] # discard the zeroth index
        prec1 = accuracy(probs, target, topk=(1,))[0]
        top1.update(prec1.item(), input.size(0))
        if i % 50 == 0:
              print("{} Test: [{}/{}]\t Prec@1 {top1.val:.3f} ({top1.avg:.3f})"\
                .format(label, i, len(dataloader_test), top1=top1))
    print('{label} Test Prec@1 {top1.avg:.2f}'.format(label=label, top1=top1))
    f.write("%s\n" % top1.avg)
    log_value('test_acc_{}'.format(label), top1.avg, iteration)
예제 #2
0
def get_test_accuracy(model_d, iteration, label='semi'):
    # don't forget to do model_d.eval() before doing evaluation
    top1 = AverageMeter()
    for i, (input, target) in enumerate(dataloader_test):
        target = target.cuda()
        input = input.cuda()
        input_var = torch.autograd.Variable(input.cuda(), volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)
        output = model_d(input_var)

        probs = output.data[:, 1:] # discard the zeroth index
        prec1 = accuracy(probs, target, topk=(1,))[0]
        #top1.update(prec1[0], input.size(0))
        top1.update(prec1, input.size(0))
        if i % 50 == 0:
              print("{} Test: [{}/{}]\t Prec@1 {top1.val:.3f} ({top1.avg:.3f})".format(label, i, len(dataloader_test), top1=top1))
    print('{label} Test Prec@1 {top1.avg:.2f}'.format(label=label, top1=top1))
    log_value('test_acc_{}'.format(label), top1.avg, iteration)
예제 #3
0
def get_test_accuracy(model_d, acc, f, label='semi'):
    # don't forget to do model_d.eval() before doing evaluation
    top1 = AverageMeter()
    for i, (x, y) in enumerate(test_loader):
        x = x.to(device)
        y = y.to(device)

        output = model_d(x.view(-1, args.n_input))

        probs = output.data[:, 1:]  # discard the zeroth index

        prec1 = accuracy(probs, y, topk=(1, ))[0]
        top1.update(prec1.item(), x.size(0))
        if i % 50 == 0:
            print("{} Test: [{}/{}]\t Prec@1 {top1.val:.3f} ({top1.avg:.3f})".
                  format(label, i, len(test_loader), top1=top1))
    f.write("%s\n" % top1.avg)
    acc.append(top1.avg)
    print('{label} Test Prec@1 {top1.avg:.2f}'.format(label=label, top1=top1))
예제 #4
0
 errD_fake.backward()
 
 D_G_z1 = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()
 
 #######
 # 3. Labeled Data Part (for semi-supervised learning)
 for ii, (input_sup, target_sup) in enumerate(dataloader_semi):
     input_sup, target_sup = input_sup.cuda(), target_sup.cuda()
     break
 input_sup_v = Variable(input_sup.cuda())
 # convert target indicies from 0 to 9 to 1 to 10
 target_sup_v = Variable( (target_sup + 1).cuda())
 output_sup = netD(input_sup_v) #used to have no [0] index
 err_sup = criterion(output_sup, target_sup_v)
 err_sup.backward()
 prec1 = accuracy(output_sup.data, target_sup + 1, topk=(1,))[0]
 #top1.update(prec1[0], input_sup.size(0))
 top1.update(prec1,input_sup.size(0))
 if opt.bayes:
     errD_prior = dprior_criterion(netD.parameters())
     errD_prior.backward()
     errD_noise = dnoise_criterion(netD.parameters())
     errD_noise.backward()
     errD = errD_real + errD_fake + err_sup + errD_prior + errD_noise
 else:
     errD = errD_real + errD_fake + err_sup
 optimizerD.step()
 
 # 4. Generator
 for netG in netGs:
     netG.zero_grad()
예제 #5
0
        # x_rec = decoder(z_enc)
        # outputs_rec = discriminator(x_rec.detach())
        # d_loss_rec = ce(outputs_rec, fake_labels)
        # d_loss_rec.backward()

        #Labeled Data Part (for semi-supervised learning)
        for ii, (x_sup, y_sup) in enumerate(dataloader_semi):
            # print("input", input_sup.data.mean())  #suffle, different every time
            # convert target indicies from 0 to 9 to 1 to 10, cuz 0 represent "fake" now
            x_sup, y_sup = x_sup.view(
                -1, args.n_input).to(device), (y_sup + 1).to(device)
            break

        output_sup = discriminator(x_sup)
        d_loss_sup = ce(output_sup, y_sup)
        prec1 = accuracy(output_sup.data, y_sup, topk=(1, ))[0]
        top1.update(prec1.item(), x_sup.size(0))
        #d_loss_sup.backward()

        # print("d_loss_real", d_loss_real.item())
        # print("d_loss_fake", d_loss_fake.item())
        # print("d_loss_sup", d_loss_sup.item())
        d_loss = (d_loss_real + d_loss_fake + d_loss_sup)

        reset_grad()
        d_loss.backward()
        dis_optimizer.step()

        if (i + 1) % len(train_loader) == 0:
            # get test accuracy on train and test
            discriminator.eval()
예제 #6
0
def Legacy():
    # --- the training part ---
    iteration = 0
    torch.autograd.set_detect_anomaly(True)
    for epoch in range(opt.niter):
        top1 = AverageMeter()
        top1_weakD = AverageMeter()
        for i, data in enumerate(dataloader):
            iteration += 1
            #######
            # 1. real input
            netD.zero_grad()
            _input, _label = data
            #print(_label)
            #print(_input.shape,_label.shape) #both 64 of course.
            batch_size = _input.size(0)
            if opt.cuda:
                _input = _input.cuda()
            input.resize_as_(_input).copy_(_input)
            label.resize_(batch_size).fill_(real_label)
            inputv = Variable(input)
            labelv = Variable(label)
            output, kl = netD(inputv)
            #print(output)
            # --- the backprop for bayesian conv ---
            label = label.type(torch.cuda.LongTensor)
            #errD_real = elbo(output, label, kl, get_beta(epoch, len(dataset)))
            errD_real = elbo(output, label, 0, get_beta(epoch, len(dataset)))
            errD_real.backward()
            # calculate D_x, the probability that real data are classified
            D_x = 1 - torch.nn.functional.softmax(output,
                                                  dim=1).data[:, 0].mean()

            #######
            # 2. Generated input
            noise.resize_(batch_size, opt.nz, 1, 1).normal_(0, 1)
            noisev = Variable(noise)
            _fake = netG(noisev)
            #print(_fake.shape)
            if opt.is_bayesian_generator == False:
                fake = _fake
            else:
                fake = _fake[0]
            output, kl = netD(fake.detach())
            labelv = Variable(
                torch.LongTensor(fake.data.shape[0]).cuda().fill_(fake_label))
            # --- the backprop for bayesian conv ---
            #errD_fake = elbo(output, labelv, kl, get_beta(epoch, 1))
            errD_fake = elbo(output, labelv, 0, get_beta(epoch, 1))
            errD_fake.backward()
            D_G_z1 = 1 - \
                torch.nn.functional.softmax(output, dim=1).data[:, 0].mean()

            #######
            # 3. Labeled Data Part (for semi-supervised learning)
            if opt.semi_supervised_boost == True:
                for ii, (input_sup, target_sup) in enumerate(dataloader_semi):
                    input_sup, target_sup = input_sup.cuda(), target_sup.cuda()
                    break
                input_sup_v = Variable(input_sup.cuda())
                # convert target indicies from 0 to 9 to 1 to 10
                target_sup_v = Variable((target_sup + 1).cuda())
                output_sup, kl_sup = netD(input_sup_v)
                #err_sup = criterion(output_sup, target_sup_v)
                # --- the backprop for bayesian conv ---
                print("kl is :",
                      kl_sup * get_beta(epoch, len(dataset_partial)))
                err_sup = elbo(output_sup, target_sup_v, kl_sup,
                               get_beta(epoch, len(dataset_partial)))
                err_sup.backward()
                prec1 = accuracy(output_sup.data, target_sup + 1,
                                 topk=(1, ))[0]
                top1.update(prec1, input_sup.size(0))
                errD = errD_real + errD_fake + err_sup
                optimizerD.step()
            else:
                errD = errD_real + errD_fake
                optimizerD.step()

            # A. Classifier Discriminator
            if opt.is_using_classification == True:
                #label for classification definition start
                label_classification_real = Variable(
                    torch.LongTensor(_label).cuda())
                _label_random = np.random.randint(0, 9, batch_size)
                label_classification_fake = Variable(
                    torch.IntTensor(_label_random).cuda())
                #definition end
                #the real input start
                outputA, outputB, klA, klB = netD_class(inputv)
                print("A:", outputA.shape, "B:", outputB.shape)
                print(label_classification_real.shape)
                print(outputB)
                label_is_generated = Variable(
                    torch.LongTensor(
                        fake.data.shape[0]).cuda().fill_(real_label))
                errD_is_generated = elbo(outputA, label_is_generated, klA,
                                         get_beta(epoch, len(dataset)))
                errD_classification = elbo(outputB, label_classification_real,
                                           klB, get_beta(epoch, len(dataset)))
                errD_sum_real = errD_is_generated + errD_classification
                #the real input end
                #the generated input start
                outputA, outputB, klA, klB = netD_class(fake.detach())
                label_is_generated = Variable(
                    torch.LongTensor(
                        fake.data.shape[0]).cuda().fill_(fake_label))
                errD_is_generated = elbo(outputA, label_is_generated, klA,
                                         get_beta(epoch, len(dataset)))
                errD_classification = elbo(outputB, label_classification_fake,
                                           klB, get_beta(epoch, len(dataset)))
                errD_sum_fake = errD_is_generated + errD_classification
                #generated input part end
                errD_sum = errD_sum_fake + errD_sum_real
                errD_sum.backward()
                optimizerD_class.step()

            # 4. Generator
            netG.zero_grad()
            labelv = Variable(
                torch.LongTensor(fake.data.shape[0]).cuda().fill_(real_label))
            output, kl = netD(fake)
            #print(netG.parameters)
            #errG = criterion_comp(output)
            # print(labelv) #the out put is all 1, not sure why in the original code they put it in a float tensor.
            #errG = elbo(output, labelv, kl, get_beta(epoch, 1))
            errG = elbo(output, labelv, 0, get_beta(epoch, 1))
            errG.backward()
            D_G_z2 = 1 - torch.nn.functional.softmax(output, 1).data[:,
                                                                     0].mean()
            optimizerG.step()
            '''
            # 5. Fully supervised training (running in parallel for comparison)
            netD_fullsup.zero_grad()
            try:
                input_fullsup = Variable(input_sup)
            except NameError as e:
                #print(e, '*** Not Defined!!!!! *** draw a new one from the deck')
                for ii, (input_sup, target_sup) in enumerate(dataloader_semi):
                    input_sup, target_sup = input_sup.cuda(), target_sup.cuda()
                    #print(input_sup)
                    break
            finally:
                input_fullsup = Variable(input_sup)
            target_fullsup = Variable((target_sup + 1))
            output_fullsup, kl_fullsup = netD_fullsup(input_fullsup)
            #err_fullsup = criterion_fullsup(output_fullsup, target_fullsup)
            # --- the backprop for bayesian conv ---
            err_fullsup = elbo(output_fullsup, target_fullsup,
                            kl_fullsup, get_beta(epoch, len(dataset)))

            optimizerD_fullsup.zero_grad()
            err_fullsup.backward()
            optimizerD_fullsup.step()
            errD += err_fullsup
            if opt.semi_supervised_boost == False:
                optimizerD.step()
            

            # 6. get test accuracy after every interval
            if iteration % opt.stats_interval == 0:
                # get test accuracy on train and test
                netD.eval()
                if opt.semi_supervised_boost == True:
                    get_test_accuracy(netD, iteration, label='semi')
                get_test_accuracy(netD_fullsup, iteration, label='sup')
                netD.train()
            '''

            # 7. Report for this iteration
            cur_val, ave_val = top1.val, top1.avg
            log_value('train_acc', top1.avg, iteration)
            print(
                '[%d/%d][%d/%d] Loss_D: %.2f Loss_G: %.2f D(x): %.2f D(G(z)): %.2f / %.2f | Acc %.1f / %.1f'
                % (epoch, opt.niter, i, len(dataloader), errD.data, errG.data,
                   D_x, D_G_z1, D_G_z2, cur_val, ave_val))
        # after each epoch, save images
        vutils.save_image(_input,
                          '%s/real_samples.png' % opt.outf,
                          normalize=True)
        if opt.is_bayesian_generator == False:
            fake = netG(fixed_noise)
        else:
            fake = netG(fixed_noise)[0]
        vutils.save_image(fake.data,
                          '%s/fake_samples_epoch_%03d_G.png' % (
                              opt.outf,
                              epoch,
                          ),
                          normalize=True)
        torch.save(netG.state_dict(),
                   '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(netD_fullsup.state_dict(),
                   '%s/netD_fullsup_epoch_%d.pth' % (opt.outf, epoch))

    #from tensorflow.python.summary import event_accumulator
    ea = event_accumulator.EventAccumulator(opt.outf)
    ea.Reload()

    _df1 = pd.DataFrame(ea.Scalars('test_acc_semi'))
    _df2 = pd.DataFrame(ea.Scalars('test_acc_sup'))
    df = pd.DataFrame()
    df['Iteration'] = pd.concat([_df1['step'], _df2['step']])
    df['Accuracy'] = pd.concat([_df1['value'], _df2['value']])
    df['Classification'] = ['BayesGAN'] * \
        len(_df1['step']) + ['Baseline']*len(_df2['step'])

    # The results show that the Bayesian discriminator trained with the Bayesian generator outperforms the discriminator trained on partial data.

    p = ggplot(
        df,
        aes(x='Iteration',
            y='Accuracy',
            color='Classification',
            label='Classification')) + geom_point(size=0.5)
    print(p)