batch_size=mini_batch_size,
                             shuffle=False,
                             num_workers=0)

# determine if CUDA is available at the compute node
if (torch.backends.cudnn.version() != None) and (USE_CUDA == True):

    # push dataloader to CUDA
    dataloader_eval = DataLoader(torch_dataset.cuda(),
                                 batch_size=mini_batch_size,
                                 shuffle=False)

# VISUALIZE LATENT SPACE REPRESETATION
# set networks in evaluation mode (don't apply dropout)
encoder_eval.eval()
decoder_eval.eval()

# init batch count
batch_count = 0

# iterate over epoch mini batches
for enc_transactions_batch in dataloader_eval:

    # determine latent space representation of all transactions
    z_enc_transactions_batch = encoder_eval(enc_transactions_batch)

    # case: initial batch
    if batch_count == 0:

        # collect reconstruction errors of batch
        z_enc_transactions_all = z_enc_transactions_batch
예제 #2
0
        type=int,
        help='Which model epoch you want to use. Must be present',
        default=200)
    args = parser.parse_args()
    stats_path = 'fid_stats.npz'  # training set statistics
    model_args = json.load(open(os.path.join(args.path, 'args.json')))
    inception_path = fid.check_or_download_inception(
        None)  # download inception network

    model = Decoder((3, 32, 32), model_args['gen_h_size'],
                    model_args['z_size'], True, nn.ReLU(True), 4).cuda()
    model.load_state_dict(
        torch.load(
            os.path.join(model_args['model_path'],
                         'model[%s].ph' % args.epoch)))
    model.eval()
    print('Generating samples')
    batches = [
        generate_samples(1000, model, model_args['z_size']) for _ in range(50)
    ]
    images = np.array([sample for batch in batches
                       for sample in batch]).transpose(0, 2, 3, 1)

    # load precalculated training set statistics
    f = np.load(stats_path)
    mu_real, sigma_real = f['mu'][:], f['sigma'][:]
    f.close()

    fid.create_inception_graph(
        inception_path)  # load the graph into the current TF graph
    with tf.Session() as sess:
예제 #3
0
파일: main.py 프로젝트: lavoiems/Playground
        print('Epoch: %s' % i)
        gan.train()
        discriminator.train()
        start = time.time()
        for data in train_loader:
            t += 1
            discriminator_optimizer.zero_grad()
            loss = get_disc_loss(data, gan, discriminator, data[0].shape[0],
                                 args.z_size, args.use_penalty)
            discriminator_optimizer.step()
            if args.use_weight_clip:
                discriminator.apply(weight_clip)
            if t == args.n_dis:
                t = 0
                generator_optimizer.zero_grad()
                loss = get_gen_loss(gan, discriminator, data[0].shape[0],
                                    args.z_size)
                generator_optimizer.step()

        end = time.time()
        print('Epoch time: %s' % (end - start))
        gan.eval()
        discriminator.eval()

        data, labels = get_data(test_loader)
        vizualize(data, gan, 0, args.z_size, viz, args.save_path,
                  args.batch_size)
        if i % 10 == 0:
            torch.save(gan.state_dict(),
                       os.path.join(args.model_path, 'model[%s].ph' % i))
예제 #4
0
				real_center.data[m,0,sp] = real_point[m,0,distance_order[sp][0]]  
	real_center = real_center.to(device)
	real_center = torch.squeeze(real_center,1)
	input_cropped1 = input_cropped1.to(device) 
	input_cropped1 = torch.squeeze(input_cropped1,1)
	input_cropped2_idx = utils.farthest_point_sample(input_cropped1,opt.point_scales_list[1],RAN = True)
	input_cropped2 = utils.index_points(input_cropped1,input_cropped2_idx)

	input_cropped1 = Variable(input_cropped1,requires_grad = False)
	input_cropped2 = Variable(input_cropped2,requires_grad = False)
	input_cropped2 = input_cropped2.to(device)

	gen_net = Decoder(opt.point_scales_list[0], opt.crop_point_num)
	gen_net = torch.nn.DataParallel(gen_net)
	gen_net.to(device)
	gen_net.load_state_dict(torch.load(opt.netG,map_location=lambda storage, location: storage)['state_dict'])   
	gen_net.eval()

	fake_center1, fake_fine = gen_net(input_cropped1)
	CD_loss_all, dist1, dist2 = criterion_PointLoss(torch.squeeze(fake_fine,1),torch.squeeze(real_center,1))
	#print('test CD loss: %.4f'%(dist1.item()))
	print('pred->GT|GT->pred:', dist1.item(), dist2.item())
	losses1.append(dist1.item())
	losses2.append(dist2.item())

print('mean CD loss pred->GT|GT->pred:', np.mean(losses1)*1000, np.mean(losses2)*1000)
print('max CD loss pred->GT|GT->pred: ', np.amax(losses1)*1000, np.amax(losses2)*1000)
print('min CD loss: pred->GT|GT->pred', np.amin(losses1)*1000, np.amin(losses2)*1000)