def scoring_function(solution_file, predict_file):
    np.random.seed(1337)
    score = 0.

    solution = np.load(solution_file, allow_pickle=True)
    predict = np.load(predict_file, allow_pickle=True)

    EnergyDeposit_sol = solution['EnergyDeposit']
    ParticleMomentum_sol = solution['ParticleMomentum']
    ParticlePoint_sol = solution['ParticlePoint']

    EnergyDeposit_pred = predict['EnergyDeposit']
    ParticleMomentum_pred = solution['ParticleMomentum']
    ParticlePoint_pred = solution['ParticlePoint']

    embedder = load_embedder('./embedder.tp')
    EnergyDeposit_sol_emb = embedder.get_encoding(
        torch.tensor(EnergyDeposit_sol).float().view(-1, 1, 30,
                                                     30)).detach().numpy()

    EnergyDeposit_pred_emb = embedder.get_encoding(
        torch.tensor(EnergyDeposit_sol).float().view(-1, 1, 30,
                                                     30)).detach().numpy()

    precision, recall = compute_prd_from_embedding(
        EnergyDeposit_sol_emb.reshape(len(EnergyDeposit_sol), -1),
        EnergyDeposit_pred_emb.reshape(len(EnergyDeposit_sol), -1),
        num_clusters=100,
        num_runs=100)

    auc_img = auc(precision, recall)

    physical_metrics_sol = get_physical_stats(EnergyDeposit_sol,
                                              ParticleMomentum_sol,
                                              ParticlePoint_sol)

    physical_metrics_pred = get_physical_stats(EnergyDeposit_pred,
                                               ParticleMomentum_pred,
                                               ParticlePoint_pred)

    precision, recall = compute_prd_from_embedding(physical_metrics_sol,
                                                   physical_metrics_pred,
                                                   num_clusters=100,
                                                   num_runs=100)

    auc_physical_metrics = auc(precision, recall)

    return min(auc_img, auc_physical_metrics)
Beispiel #2
0
def getPRD():
    realdata = getRealData(10000)
    fakedata = getFakedata(10000)
    ref_emb = fid_tf.get_inception_activations(realdata)
    eval_emb = fid_tf.get_inception_activations(fakedata)
    prd_res = prd.compute_prd_from_embedding(eval_data=eval_emb,
                                             ref_data=ref_emb)
    print("PRD =", prd_res)
    return prd_res
Beispiel #3
0
def plot_prd_train(learner, index, training_batch):
    sample_num = 32
    true_data = training_batch.detach().cpu().numpy()

    samples = torch.randn(sample_num, 3).cuda()
    reconst_x = learner.decoder(samples).detach().cpu().numpy()

    prd_data = prd.compute_prd_from_embedding(
        reconst_x, true_data.reshape(sample_num, -1))

    prd.plot([prd_data], [str(index)],
             out_path='./result/prd_train_' + str(index) + '.png')
Beispiel #4
0
def plot_prd(learner, index, prd_train_tasks):
    train_task = prd_train_tasks.sample()
    true_data, label = train_task

    sample_num = 1000
    true_data = true_data[index * sample_num:index * sample_num + sample_num]
    label = label[index * sample_num:index * sample_num + sample_num]

    samples = torch.randn(sample_num, 3).cuda()
    reconst_x = learner.decoder(samples).detach().cpu().numpy()

    print(label, index)
    print(reconst_x.shape, true_data.shape)

    prd_data = prd.compute_prd_from_embedding(
        reconst_x, true_data.reshape(sample_num, -1))

    prd.plot([prd_data], [str(index)],
             out_path='./result/prd_' + str(index) + '.png')
def scoring_function(solution_file, predict_file):
    np.random.seed(1337)
    score = 0.
    
    solution = np.load(solution_file, allow_pickle=True)
    predict = np.load(predict_file, allow_pickle=True)
    
    EnergyDeposit_sol = solution['EnergyDeposit']
    ParticleMomentum_sol = solution['ParticleMomentum']
    ParticlePoint_sol = solution['ParticlePoint']
    
    EnergyDeposit_pred = predict['EnergyDeposit']
    
    precision, recall = compute_prd_from_embedding(
                        EnergyDeposit_sol.reshape(len(EnergyDeposit_sol), -1), 
                        EnergyDeposit_pred.reshape(len(EnergyDeposit_sol), -1),,
                        num_clusters=100,
                        num_runs=100)
    
    auc_img = auc(precision, recall)

    
    physical_metrics_sol = get_physical_stats(
        EnergyDeposit_sol, 
        ParticleMomentum_sol,
        ParticlePoint_sol)

    physical_metrics_pred = get_physical_stats(
        EnergyDeposit_pred, 
        ParticleMomentum_sol,
        ParticlePoint_sol)
    
    precision, recall = compute_prd_from_embedding(
        physical_metrics_sol, 
        physical_metrics_pred,
        num_clusters=100,
        num_runs=100)
    
    auc_physical_metrics = auc(precision, recall)

    
    return min(auc_img, auc_physical_metrics)
def cluster_main(reference_dir, eval_dirs):
    if args.verbose:
        print('computing inception embeddings for ' + reference_dir)
    real_embeddings = load_or_generate_inception_embedding(
        reference_dir, args.cache_dir, args.use_raw_images)
    prd_data = []
    for directory in eval_dirs:
        if args.verbose:
            print('computing inception embeddings for ' + directory)
        eval_embeddings = load_or_generate_inception_embedding(
            directory, args.cache_dir, args.use_raw_images)
        if args.verbose:
            print('computing PRD')
        prd_data.append(prd.compute_prd_from_embedding(
            eval_data=eval_embeddings,
            ref_data=real_embeddings,
            num_clusters=args.num_clusters,
            num_angles=args.num_angles,
            num_runs=args.num_runs))

    return prd_data
Beispiel #7
0
def compute_prd(reference_dir, eval_dirs, inception_path):
    real_embeddings = load_or_generate_inception_embedding(
        reference_dir, '/tmp/prd_cache/', inception_path)
    prd_data = []
    for directory in eval_dirs:
        print('computing inception embeddings for ' + directory)
        eval_embeddings = load_or_generate_inception_embedding(
            directory, '/tmp/prd_cache/', inception_path)
        print('computing PRD')
        prd_data.append(
            prd.compute_prd_from_embedding(eval_data=eval_embeddings,
                                           ref_data=real_embeddings,
                                           num_clusters=20,
                                           num_angles=1001,
                                           num_runs=10))
    f_beta_data = [
        prd.prd_to_max_f_beta_pair(precision, recall, beta=8)
        for precision, recall in prd_data
    ]
    prd.plot(prd_data)
    print('F_8   F_1/8     model')
    for directory, f_beta in zip(eval_dirs, f_beta_data):
        print('%.3f %.3f     %s' % (f_beta[0], f_beta[1], directory))
Beispiel #8
0
    # Transform test samples and generated samples with it
    ref_np_original = test_dataset.get_subset(len(test_dataset), n_samples)
    ref_np = transformer.transform(ref_np_original)

    # Fully trained model
    model_np = sample_prior(vae_algorithm.model)

    # Load a chpt for comparisson
    vae_garbage = getattr(alg,
                          vae_config['algorithm_type'])(vae_config['vae_opt'])
    chp1_path = 'models/{0}/vae_checkpoint{1}.pth'.format(
        args_opt.exp_vae, chpnt1)
    vae_garbage.load_checkpoint(chp1_path, eval=True)
    chpnt1_np = sample_prior(vae_garbage.model)

    # Init a new model
    vae_garbage2 = getattr(alg,
                           vae_config['algorithm_type'])(vae_config['vae_opt'])
    chp2_path = 'models/{0}/vae_checkpoint{1}.pth'.format(
        args_opt.exp_vae, chpnt2)
    vae_garbage2.load_checkpoint(chp2_path, eval=True)
    chpnt2_np = sample_prior(vae_garbage2.model)

    # Compute prd
    prd_data_model = prd.compute_prd_from_embedding(model_np, ref_np)
    prd_data_chpnt1 = prd.compute_prd_from_embedding(chpnt1_np, ref_np)
    prd_data_chpnt2 = prd.compute_prd_from_embedding(chpnt2_np, ref_np)
    prd.plot([prd_data_model, prd_data_chpnt1, prd_data_chpnt2],
             [args_opt.exp_vae, 'chpt' + str(chpnt1), 'chpt' + str(chpnt2)],
             out_path='models/{0}/prd.png'.format(args_opt.exp_vae))
Beispiel #9
0
    if args.verbose:
        print('computing inception embeddings for ' + reference_dir)
    real_embeddings = load_or_generate_inception_embedding(
        reference_dir, args.cache_dir, args.inception_path)
    prd_data = []
    for directory in eval_dirs:
        if args.verbose:
            print('computing inception embeddings for ' + directory)
        eval_embeddings = load_or_generate_inception_embedding(
            directory, args.cache_dir, args.inception_path)
        if args.verbose:
            print('computing PRD')
        prd_data.append(
            prd.compute_prd_from_embedding(eval_data=eval_embeddings,
                                           ref_data=real_embeddings,
                                           num_clusters=args.num_clusters,
                                           num_angles=args.num_angles,
                                           num_runs=args.num_runs))
    if args.verbose:
        print('plotting results')

    print()
    f_beta_data = [
        prd.prd_to_max_f_beta_pair(precision, recall, beta=8)
        for precision, recall in prd_data
    ]
    print('F_8   F_1/8     model')
    for directory, f_beta in zip(eval_dirs, f_beta_data):
        print('%.3f %.3f     %s' % (f_beta[0], f_beta[1], directory))

    prd.plot(prd_data, labels=args.eval_labels, out_path=args.plot_path)
            model.Dnet.eval()
            model.Qnet.eval()
        
        n_prd_samples = eval_config['n_prd_samples']
        
        # Get the ground truth np array
        test_dataset = TrajDataset(path_to_data, device)
        ref_np = test_dataset.get_subset(len(test_dataset), n_prd_samples)

        # Get the sampled np array
        with torch.no_grad():
            z_noise, con_noise = model.ginput_noise(n_prd_samples)
            eval_data = model.g_forward(z_noise, con_noise)
            eval_np = eval_data.cpu().numpy().reshape(n_prd_samples, -1)
     
        prd_data = prd.compute_prd_from_embedding(eval_np, ref_np)
        
        # Only evaluate the last model
        if not compute_chpnt_prds:
            # Compute prd
            prd.plot([prd_data], [args.config_name], 
                     out_path='models/{0}/prd.png'.format(args.config_name))
        
        # Evaluate the intermediate checkponts 
        else:
            # Load the chpt
            chpr_prd_list = []
            for c in chnpt_list:
                model_chpt = models.InfoGAN(config_file)
                model_chpt.load_checkpoint(
                    'models/{0}/infogan_checkpoint{1}.pth'.format(args.config_name, c))
    if args.verbose:
        print('computing inception embeddings for ' + reference_dir)
    real_embeddings = load_or_generate_inception_embedding(
        reference_dir, args.cache_dir, args.inception_path)
    prd_data = []
    for directory in eval_dirs:
        if args.verbose:
            print('computing inception embeddings for ' + directory)
        eval_embeddings = load_or_generate_inception_embedding(
            directory, args.cache_dir, args.inception_path)
        if args.verbose:
            print('computing PRD')
        prd_data.append(prd.compute_prd_from_embedding(
            eval_data=eval_embeddings,
            ref_data=real_embeddings,
            num_clusters=args.num_clusters,
            num_angles=args.num_angles,
            num_runs=args.num_runs))
    if args.verbose:
        print('plotting results')

    print()
    f_beta_data = [prd.prd_to_max_f_beta_pair(precision, recall, beta=8)
                   for precision, recall in prd_data]
    print('F_8   F_1/8     model')
    for directory, f_beta in zip(eval_dirs, f_beta_data):
        print('%.3f %.3f     %s' % (f_beta[0], f_beta[1], directory))

    prd.plot(prd_data, labels=args.eval_labels, out_path=args.plot_path)