for m in mixtures:
    print("Mixture: " + str(m))
    sys.stdout.flush()
    mixScores = np.zeros((nFolds * (nFolds - 1), 1))
    fIdx = 0
    for fold in range(0, nFolds):
        cGammaScores = np.zeros(
            (C_range.shape[0],
             gamma_range.shape[0]))  #inizializza matrice dei punteggi
        cGammaScoresUAR = np.zeros(
            (C_range.shape[0],
             gamma_range.shape[0]))  # inizializza matrice dei punteggi
        curSupervecPath = os.path.join(supervecPath, "trainset_" + str(fold))
        for sf in range(0, nFolds):
            curSupervecSubPath = os.path.join(curSupervecPath, str(m))
            trainFeatures = utl.readfeatures(curSupervecSubPath, y)
            trainClassLabels = y_train_bin

            devFeatures = utl.readfeatures(curSupervecSubPath, yd)
            devClassLabels = y_devel_bin

            ##EXTEND TRAINSET
            #trainFeatures = np.vstack((trainFeatures,devFeatures[:140,:]))
            #devFeatures = devFeatures[140:]

            cIdx = 0
            for C in C_range:
                gIdx = 0
                for gamma in gamma_range:
                    scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
                    scaler.fit(trainFeatures)
for fold in range(0, nFolds):
    print "Binary Predictions"
    print("Fold: " + str(fold))
    C_bin = Cs_bin[fold - 1]
    gamma_bin = gammas_bin[fold - 1]

    # FIRST STAGE: BINARY CLASSIFICATION
    print "First Stage: Binary Classification"
    # Organize label for BIN CLASSIFICATOR
    y_train_bin = dm.data_bin_organize(trainset_l, y)
    y_devel_bin = dm.data_bin_organize(develset_l, yd)

    curSupervecPath_bin = os.path.join(supervecPath, "trainset_" + str(fold),
                                       str(nMixtures_bin))
    #TODO LOAD FEATURES
    trainFeatures = utl.readfeatures(curSupervecPath_bin, y)
    testFeatures = utl.readfeatures(curSupervecPath_bin, yd)
    trainClassLabels = y_train_lab
    scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
    scaler.fit(trainFeatures)
    svm = SVC(C=C_bin, kernel='rbf', gamma=gamma_bin)
    svm.fit(scaler.transform(trainFeatures), trainClassLabels)
    predLabels = svm.predict(scaler.transform(testFeatures))

    print "Multiclass Predictions"
    C_class = Cs_class[fold - 1]
    gamma_class = gammas_class[fold - 1]

    curSupervecPath_class = os.path.join(supervecPath, "trainset_" + str(fold),
                                         str(nMixtures_class))
    # TODO LOAD FEATURES
    print(classification_report(y_true, y_pred, target_names=['V', 'O', 'T', 'E']))
    recall_report = recall_score(y_true, y_pred, labels=['0', '1', '2', '3'], average=None)

    return A, UAR, CM, y_pred, recall_report


for fold in range(0, nFolds):
        print("Fold: " + str(fold));
        C = Cs[fold];
        gamma = gammas[fold];
        BM = Best_model

        print "Final Test SVM Classifier"
        curSupervecPath = os.path.join(supervecPath, "trainset_" + str(fold), str(nMixtures));
        #TODO LOAD FEATURES
        trainFeatures = utl.readfeatures(curSupervecPath, y)
        devFeatures = utl.readfeatures(curSupervecPath, yd)
        testFeatures = utl.readfeatures(curSupervecPath, yt)
        trainClassLabels = y_train_lab

        #EXTEND TRAINSET
        #trainFeatures = np.vstack((trainFeatures,devFeatures[:140,:]))
        #devFeatures = devFeatures[140:]

        scaler = preprocessing.MinMaxScaler(feature_range=(-1,1));
        scaler.fit(trainFeatures);
        svm = SVC(C=C, kernel='rbf', gamma=gamma, class_weight='auto', probability=True)
        svm.fit(scaler.transform(trainFeatures), trainClassLabels);
        predLabels_train = svm.predict(scaler.transform(trainFeatures));
        predLabels_dev = svm.predict(scaler.transform(devFeatures));
Пример #4
0
    print("Mixture: " + str(m))
    sys.stdout.flush()
    mixScores = np.zeros((nFolds * (nFolds - 1), 1))
    fIdx = 0
    for fold in range(0, nFolds):
        cGammaScores = np.zeros(
            (C_range.shape[0],
             gamma_range.shape[0]))  #inizializza matrice dei punteggi
        print("Fold: " + str(fold))
        sys.stdout.flush()
        curSupervecPath = os.path.join(supervecPath, "trainset_" + str(fold))
        for sf in range(0, nFolds):
            print("Subfold: " + str(sf))
            sys.stdout.flush()
            curSupervecSubPath = os.path.join(curSupervecPath, str(m))
            trainFeatures = utl.readfeatures(curSupervecSubPath, y)
            trainClassLabels = y_train_lab

            # devFeatures = utl.readfeatures(curSupervecSubPath, yd)
            # devClassLabels = y_devel_lab

            #ERROR TEST
            devFeatures = utl.readfeatures(curSupervecSubPath, y)
            devClassLabels = y_train_lab

            # #EXTEND TRAINSET
            # trainFeatures = np.vstack((trainFeatures,devFeatures[:140,:]))
            # devFeatures = devFeatures[140:]

            cIdx = 0
            for C in C_range:
Пример #5
0
nMixtures = joblib.load(os.path.join(scoresPath,'nmix2'));
Cs = joblib.load(os.path.join(scoresPath,'cBestValues2')); # Best
gammas =joblib.load(os.path.join(scoresPath,'gBestValues2')); # Best
Best_model = joblib.load(os.path.join(scoresPath,'best_model')); # Best
fold = 0;

print("Fold: " + str(fold));
C = Cs[fold];
gamma = gammas[fold];
BM = Best_model

print "Loading Features"
curSupervecPath = os.path.join(supervecPath, "trainset_" + str(fold), str(nMixtures));

V_feat = utl.readfeatures(curSupervecPath, V)
O_feat = utl.readfeatures(curSupervecPath, O)
T_feat = utl.readfeatures(curSupervecPath, T)
E_feat = utl.readfeatures(curSupervecPath, E)

X_t = np.concatenate((T_feat,E_feat),axis=0)
X_train = normalize(X_t)

input_shape = X_train.shape[1]
dropout_rate = 0.25
opt = Adam(lr=1e-4) #Generator optimizer
dopt = Adam(lr=1e-3) #Discriminator optimizer

# Build Generative model ...

g_input = Input(shape=(input_shape,))