snoring_dataset
)  # creo i trainset per calcolare media e varianza per poter normalizzare
labels = dm.label_loading(
    os.path.join(root_dir, 'lab', 'ComParE2017_Snore.tsv'))
trainset_l, develset_l, _ = dm.split_ComParE2017_simple(labels)
del snoring_dataset

y = []
for seq in trainset:
    y.append(seq[0])

yd = []
for seq in develset:
    yd.append(seq[0])

y_train, y_train_lab = dm.label_organize(trainset_l, y)
y_devel, y_devel_lab = dm.label_organize(develset_l, yd)

##EXTEND TRAINSET
#y_train_lab = np.append(y_train_lab,y_devel_lab[:140])
#y_devel_lab = y_devel_lab[140:]


def compute_score(predictions, labels):
    #print("compute_score")

    y_pred = []
    for d in predictions:
        y_pred.append(int(d))

    y_true = []
#TODO FIX PATHs
sys.stdout = open(os.path.join(GanPath,'GAN_test.txt'), 'w')   #log to a file
sys.stderr = open(os.path.join(GanPath,'GAN_test_err.txt'), 'w')   #log to a file

#LOAD DATASET
snoring_dataset = dm.load_ComParE2017(featPath, filetype)
trainset, develset, testset = dm.split_ComParE2017_simple(snoring_dataset)
labels = dm.label_loading(os.path.join(root_dir,'lab','ComParE2017_Snore.tsv'))
trainset_l, develset_l, _ = dm.split_ComParE2017_simple(labels)
del snoring_dataset

y = []
for seq in trainset:
    y.append(seq[0])

y_train, y_train_lab, _ = dm.label_organize(trainset_l, y)

V, O, T, E = dm.label_split(trainset_l, y)

nMixtures = joblib.load(os.path.join(scoresPath,'nmix2'));
Cs = joblib.load(os.path.join(scoresPath,'cBestValues2')); # Best
gammas =joblib.load(os.path.join(scoresPath,'gBestValues2')); # Best
Best_model = joblib.load(os.path.join(scoresPath,'best_model')); # Best
fold = 0;

print("Fold: " + str(fold));
C = Cs[fold];
gamma = gammas[fold];
BM = Best_model

print "Loading Features"