예제 #1
0
k_max = 100
evaluation_k_set = np.arange(1, k_max + 1, 1)

#nn settings
epochs_in_batch = 25
epochs_overall = 10
back_propagation_batch_size = 64
training_batch_size = 6000
min_skill_size = 0
min_member_size = 0
latent_dim = 50

print(K.tensorflow_backend._get_available_gpus())

if dblp.preprocessed_dataset_exist() and dblp.train_test_indices_exist():
    dataset = dblp.load_preprocessed_dataset()
    train_test_indices = dblp.load_train_test_indices()
else:
    if not dblp.ae_data_exist(file_path='../dataset/ae_dataset.pkl'):
        dblp.extract_data(filter_journals=True,
                          skill_size_filter=min_skill_size,
                          member_size_filter=min_member_size)
    if not dblp.preprocessed_dataset_exist(
    ) or not dblp.train_test_indices_exist():
        dblp.dataset_preprocessing(
            dblp.load_ae_dataset(file_path='../dataset/ae_dataset.pkl'),
            seed=seed,
            kfolds=k_fold)
    dataset = dblp.load_preprocessed_dataset()
    train_test_indices = dblp.load_train_test_indices()
예제 #2
0
import csv
import numpy as np
import eval.ranking as rk
import ml_metrics as metrics
import matplotlib.pyplot as plt
import dal.load_dblp_data as dblp
import eval.evaluator as dblp_eval

filter_zero = True
at_k_mx = 10
at_k_set = range(1, at_k_mx + 1, 1)

user_HIndex = dblp.get_user_HIndex()
user_skill_dict = dblp.get_user_skill_dict(dblp.load_preprocessed_dataset())
foldIDsampleID_strata_dict = dblp.get_foldIDsampleID_stata_dict(
    data=dblp.load_preprocessed_dataset(),
    train_test_indices=dblp.load_train_test_indices(),
    kfold=10)

OKLO = '../output/predictions/O_KL_O_output.csv'
OKLU = '../output/predictions/O_KL_U_output.csv'
OVAEO = '../output/predictions/O_VAE_O_output.csv'
OVAEU = '../output/predictions/O_VAE_U_output.csv'
SKLO = '../output/predictions/S_KL_O_output.csv'
SKLU = '../output/predictions/S_KL_U_output.csv'
SVAEO = '../output/predictions/S_VAE_O_output.csv'
SVAEU = '../output/predictions/S_VAE_U_output.csv'
Sapienza = '../output/predictions/Sapienza_output.csv'
SVDpp = '../output/predictions/SVDpp_output.csv'
RRN = '../output/predictions/RRN_output.csv'
BL2009 = '../output/predictions/BL2009_output.csv'
min_member_size = 0
latent_dim = 2
beta = 30

print(tf.test.is_gpu_available())
m2v_path = '../dataset/embedding_dict.pkl'


if dblp.ae_data_exist(file_path='../dataset/ae_e_m2v_tSkill_dataset.pkl'):
    dataset = dblp.load_ae_dataset(file_path='../dataset/ae_e_m2v_tSkill_dataset.pkl')
else:
    if not dblp.ae_data_exist(file_path='../dataset/ae_dataset.pkl'):
        dblp.extract_data(filter_journals=True, skill_size_filter=min_skill_size, member_size_filter=min_member_size, output_dir='../dataset/ae_dataset.pkl')
    if not dblp.preprocessed_dataset_exist(file_path='../dataset/dblp_preprocessed_dataset.pkl') or not dblp.train_test_indices_exist(file_path='../dataset/Train_Test_indices.pkl'):
        dblp.dataset_preprocessing(dblp.load_ae_dataset(file_path='../dataset/ae_dataset.pkl'), indices_dict_file_path='../dataset/Train_Test_indices.pkl', preprocessed_dataset_file_path='../dataset/dblp_preprocessed_dataset.pkl', seed=seed, kfolds=k_fold)
    preprocessed_dataset = dblp.load_preprocessed_dataset(file_path='../dataset/dblp_preprocessed_dataset.pkl')

    dblp.nn_m2v_embedding_dataset_generator(model_path=m2v_path, dataset=preprocessed_dataset, output_file_path='../dataset/ae_e_m2v_tSkill_dataset.pkl', mode='skill', max_length=22)
    del preprocessed_dataset
    dataset = dblp.load_ae_dataset(file_path='../dataset/ae_e_m2v_tSkill_dataset.pkl')



# reparameterization trick
# instead of sampling from Q(z|X), sample epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def sampling(args):
    """Reparameterization trick by sampling from an isotropic unit Gaussian.

    # Arguments
        args (tensor): mean and log of variance of Q(z|X)