Beispiel #1
0
        line_spacing, comment = categorize.determine_line_spacing(
            raw_line_spacing)
        print "Line Spacing: " + comment

        raw_word_spacing = raw_features[4]
        word_spacing, comment = categorize.determine_word_spacing(
            raw_word_spacing)
        print "Word Spacing: " + comment

        raw_pen_pressure = raw_features[5]
        pen_pressure, comment = categorize.determine_pen_pressure(
            raw_pen_pressure)
        print "Pen Pressure: " + comment

        raw_slant_angle = raw_features[6]
        slant_angle, comment = categorize.determine_slant_angle(
            raw_slant_angle)
        print "Slant: " + comment

        print
        print "Emotional Stability: ", clf1.predict(
            [[baseline_angle, slant_angle]])
        print "Mental Energy or Will Power: ", clf2.predict(
            [[letter_size, pen_pressure]])
        print "Modesty: ", clf3.predict([[letter_size, top_margin]])
        print "Personal Harmony and Flexibility: ", clf4.predict(
            [[line_spacing, word_spacing]])
        print "Lack of Discipline: ", clf5.predict([[slant_angle, top_margin]])
        print "Poor Concentration: ", clf6.predict(
            [[letter_size, line_spacing]])
        print "Non Communicativeness: ", clf7.predict(
            [[letter_size, word_spacing]])
Beispiel #2
0
def learning():
    score1=0
    score2=0
    score3=0
    score4=0
    score5=0
    score6=0
    score7=0
    score8=0

    if request.method=='POST':
        data1=request.form['userinput1']
        data2=request.form['userinput2']
        data3=request.form['userinput3']
        data4=request.form['userinput4']
        data5=request.form['userinput5']
        response1=int(sc_tf_idf.classify(process_message(data1)))
        response2=int(sc_tf_idf.classify(process_message(data2)))
        response3=int(sc_tf_idf.classify(process_message(data3)))
        response4=int(sc_tf_idf.classify(process_message(data4)))
        response5=int(sc_tf_idf.classify(process_message(data5)))
        score=0.2*(response1+response2+response3+response4+response5)
    

        sex=request.form['sex']
        age=request.form['age']
        p_status=request.form['p_status']
        mjob=request.form['mjob']
        fjob=request.form['fjob']
        reason=request.form['reason']
        stime=request.form['stime']
        fail=request.form['fail']
        famsup=request.form['famsup']
        act=request.form['act']
        net=request.form['net']
        rel=request.form['rel']
        famrel=request.form['famrel']
        free=request.form['free']
        goout=request.form['goout']
        dalc=request.form['dalc']
        walc=request.form['walc']
        health=request.form['health']
        abscences=request.form['abscences']
        cgpa=request.form['cgpa']
        data=[sex,age,p_status,mjob,fjob,reason,stime,fail,famsup,act,net,rel,famrel,free,goout,dalc,walc,health,abscences]
        with open('testing.csv','a') as fd:
            for i in data:
                fd.write(i)
                fd.write(',')
            fd.write(cgpa)
            fd.write('\n')
        fd.close()

        student_data = pd.read_csv("students.csv", encoding='utf-8')
        test = pd.read_csv("testing.csv", encoding='utf-8')
        n_students = student_data.shape[0]
        n_features = student_data.shape[1] - 1
        n_passed = student_data["risk"].value_counts()[1]
        n_failed = student_data["risk"].value_counts()[0]
        grad_rate = float(n_passed)/n_students*100


        feature_cols = list(student_data.columns[:-1])  # all columns but last are features
        target_col = student_data.columns[-1]  # last column is the target/label

        X_all = student_data[feature_cols]  # feature values for all students
        y_all = student_data[target_col]  # corresponding targets/labels

        test_example = test[feature_cols]

        X_all = dews.preprocess_features(X_all)
        test_example = dews.preprocess_features(test_example)
        test_example = test_example.iloc[-1:]

        num_all = student_data.shape[0]  # same as len(student_data)
        num_train = 316  # about 80% of the data
        num_test = 79

        X_train, X_test, y_train, y_test = train_test_split(X_all,y_all,train_size=num_train,test_size=num_test,stratify=y_all)


        f1_scorer = make_scorer(f1_score, pos_label=1)

        parameters = {'max_depth': range(1,15)}
        dt = DecisionTreeClassifier()
        grid_search = GridSearchCV(dt,parameters,scoring=f1_scorer)
        grid_search.fit(X_train,y_train)

        dt_tuned = DecisionTreeClassifier(max_depth=3)
        dt_tuned.fit(X_train,y_train)

        # Subset the Dataset by removing features whose 'importance' is zero, 
        # according to a tuned Decision tree in 1.1 
        sub = np.nonzero(dt_tuned.feature_importances_)[0].tolist()
        subset_cols = list(X_train.columns[sub])
        X_train_subset = X_train[subset_cols]
        X_test_subset = X_test[subset_cols]
        test_example_subset = test_example[subset_cols]

        clf_default = KNeighborsClassifier()

        # Determine the number of nearest neighbors that optimizes accuracy 
        parameters = {'n_neighbors': range(1,30)}
        knn = KNeighborsClassifier()
        knn_tuned = GridSearchCV(knn,parameters,scoring=f1_scorer)
        knn_tuned.fit(X_train_subset,y_train)
        clf_tuned = KNeighborsClassifier(n_neighbors=knn_tuned.best_params_['n_neighbors'])
        y = dews.train_predict("Subset_KNN", X_train_subset, y_train, X_test_subset, y_test, test_example_subset, 300, clf_default, clf_tuned)

        file1 = request.files['file1']
        file2 = request.files['file2']
        file3 = request.files['file3']
        file4 = request.files['file4']
        file5 = request.files['file5']

        if file1 and allowed_file(file1.filename) and file2 and allowed_file(file2.filename) and file3 and allowed_file(file3.filename) and file4 and allowed_file(file4.filename) and file5 and allowed_file(file5.filename):
            file_names=[secure_filename(file1.filename),secure_filename(file2.filename),secure_filename(file3.filename),secure_filename(file4.filename),secure_filename(file5.filename)]

            for file_name in file_names:
                    
                raw_features = extract.start(file_name)
                
                raw_baseline_angle = raw_features[0]
                baseline_angle, comment = categorize.determine_baseline_angle(raw_baseline_angle)
                #print ("Baseline Angle: "+comment)
                
                raw_top_margin = raw_features[1]
                top_margin, comment = categorize.determine_top_margin(raw_top_margin)
                #print ("Top Margin: "+comment)
                
                raw_letter_size = raw_features[2]
                letter_size, comment = categorize.determine_letter_size(raw_letter_size)
                #print ("Letter Size: "+comment)
                
                raw_line_spacing = raw_features[3]
                line_spacing, comment = categorize.determine_line_spacing(raw_line_spacing)
                #print ("Line Spacing: "+comment)
                
                raw_word_spacing = raw_features[4]
                word_spacing, comment = categorize.determine_word_spacing(raw_word_spacing)
                #print ("Word Spacing: "+comment)
                
                raw_pen_pressure = raw_features[5]
                pen_pressure, comment = categorize.determine_pen_pressure(raw_pen_pressure)
                #print ("Pen Pressure: "+comment)
                
                raw_slant_angle = raw_features[6]
                slant_angle, comment = categorize.determine_slant_angle(raw_slant_angle)
                #print ("Slant: "+comment)
                
                #print
                score1+=0.2*clf1.predict([[baseline_angle, slant_angle]])[0]
                score2+=0.2*clf2.predict([[letter_size, pen_pressure]])[0]
                score3+=0.2*clf3.predict([[letter_size, top_margin]])[0]
                score4+=0.2*clf4.predict([[line_spacing, word_spacing]])[0]
                score5+=0.2*clf5.predict([[slant_angle, top_margin]])[0]
                score6+=0.2*clf6.predict([[letter_size, line_spacing]])[0]
                score7+=0.2*clf7.predict([[letter_size, word_spacing]])[0]
                score8+=0.2*clf8.predict([[line_spacing, word_spacing]])[0]
                score_hand=[str(round(score1,2)),str(round(score2,2)),str(round(score3,2)),str(round(score4,2)),str(round(score5,2)),str(round(score6,2)),str(round(score7,2)),str(round(score8,2))]
        max1=0
        max2=0
        i1=0
        i2=0
        print(score_hand)
        for i in range(8): 
            if float(score_hand[i]) > max1: 
                max1 = float(score_hand[i]) 
                i1=i    
            if float(score_hand[i]) > max2 and float(score_hand[i]) <= max1 and i!=i1:
                max2 = float(score_hand[i])
                i2=i
        array=['emotional stability','mental energy or will power','modesty','personal harmony and flexibility','lack of discipline','poor concentration','non communicativeness','social isolation']
        final_score=[fuzzy(scoring(score_hand),score,y),array[i1],max1,array[i2],max2]
        return render_template('result.html', result=final_score)
Beispiel #3
0
def predict_all(file_name):
    result = {}
    X_baseline_angle = []
    X_top_margin = []
    X_letter_size = []
    X_line_spacing = []
    X_word_spacing = []
    X_pen_pressure = []
    X_slant_angle = []
    y_t1 = []
    y_t2 = []
    y_t3 = []
    y_t4 = []
    y_t5 = []
    y_t6 = []
    y_t7 = []
    y_t8 = []
    page_ids = []

    if os.path.isfile("label_list"):
        print(("Info: label_list found."))
        # =================================================================
    with open('label_list', "r") as labels:
        for line in labels:
            content = line.split()

            baseline_angle = float(content[0])
            X_baseline_angle.append(baseline_angle)

            top_margin = float(content[1])
            X_top_margin.append(top_margin)

            letter_size = float(content[2])
            X_letter_size.append(letter_size)

            line_spacing = float(content[3])
            X_line_spacing.append(line_spacing)

            word_spacing = float(content[4])
            X_word_spacing.append(word_spacing)

            pen_pressure = float(content[5])
            X_pen_pressure.append(pen_pressure)

            slant_angle = float(content[6])
            X_slant_angle.append(slant_angle)

            trait_1 = float(content[7])
            y_t1.append(trait_1)

            trait_2 = float(content[8])
            y_t2.append(trait_2)

            trait_3 = float(content[9])
            y_t3.append(trait_3)

            trait_4 = float(content[10])
            y_t4.append(trait_4)

            trait_5 = float(content[11])
            y_t5.append(trait_5)

            trait_6 = float(content[12])
            y_t6.append(trait_6)

            trait_7 = float(content[13])
            y_t7.append(trait_7)

            trait_8 = float(content[14])
            y_t8.append(trait_8)

            page_id = content[15]
            page_ids.append(page_id)
    #===============================================================

    # emotional stability
    X_t1 = []
    for a, b in itertools.zip_longest(X_baseline_angle, X_slant_angle):
        X_t1.append([a, b])

    # mental energy or will power
    X_t2 = []
    for a, b in itertools.zip_longest(X_letter_size, X_pen_pressure):
        X_t2.append([a, b])

    # modesty
    X_t3 = []
    for a, b in itertools.zip_longest(X_letter_size, X_top_margin):
        X_t3.append([a, b])

    # personal harmony and flexibility
    X_t4 = []
    for a, b in itertools.zip_longest(X_line_spacing, X_word_spacing):
        X_t4.append([a, b])

    # lack of discipline
    X_t5 = []
    for a, b in itertools.zip_longest(X_slant_angle, X_top_margin):
        X_t5.append([a, b])

    # poor concentration
    X_t6 = []
    for a, b in itertools.zip_longest(X_letter_size, X_line_spacing):
        X_t6.append([a, b])

    # non communicativeness
    X_t7 = []
    for a, b in itertools.zip_longest(X_letter_size, X_word_spacing):
        X_t7.append([a, b])

    # social isolation
    X_t8 = []
    for a, b in itertools.zip_longest(X_line_spacing, X_word_spacing):
        X_t8.append([a, b])

    #print(X_t1)
    #print(type(X_t1))
    #print(len(X_t1))

    X_train, X_test, y_train, y_test = train_test_split(X_t1,
                                                        y_t1,
                                                        test_size=.30,
                                                        random_state=8)
    clf1 = SVC(kernel='rbf')
    clf1.fit(X_train, y_train)
    print("Classifier 1 accuracy: ",
          accuracy_score(clf1.predict(X_test), y_test))

    X_train, X_test, y_train, y_test = train_test_split(X_t2,
                                                        y_t2,
                                                        test_size=.30,
                                                        random_state=16)
    clf2 = SVC(kernel='rbf')
    clf2.fit(X_train, y_train)
    print("Classifier 2 accuracy: ",
          accuracy_score(clf2.predict(X_test), y_test))

    X_train, X_test, y_train, y_test = train_test_split(X_t3,
                                                        y_t3,
                                                        test_size=.30,
                                                        random_state=32)
    clf3 = SVC(kernel='rbf')
    clf3.fit(X_train, y_train)
    print("Classifier 3 accuracy: ",
          accuracy_score(clf3.predict(X_test), y_test))

    X_train, X_test, y_train, y_test = train_test_split(X_t4,
                                                        y_t4,
                                                        test_size=.30,
                                                        random_state=64)
    clf4 = SVC(kernel='rbf')
    clf4.fit(X_train, y_train)
    print("Classifier 4 accuracy: ",
          accuracy_score(clf4.predict(X_test), y_test))

    X_train, X_test, y_train, y_test = train_test_split(X_t5,
                                                        y_t5,
                                                        test_size=.30,
                                                        random_state=42)
    clf5 = SVC(kernel='rbf')
    clf5.fit(X_train, y_train)
    print("Classifier 5 accuracy: ",
          accuracy_score(clf5.predict(X_test), y_test))

    X_train, X_test, y_train, y_test = train_test_split(X_t6,
                                                        y_t6,
                                                        test_size=.30,
                                                        random_state=52)
    clf6 = SVC(kernel='rbf')
    clf6.fit(X_train, y_train)
    print("Classifier 6 accuracy: ",
          accuracy_score(clf6.predict(X_test), y_test))

    X_train, X_test, y_train, y_test = train_test_split(X_t7,
                                                        y_t7,
                                                        test_size=.30,
                                                        random_state=21)
    clf7 = SVC(kernel='rbf')
    clf7.fit(X_train, y_train)
    print("Classifier 7 accuracy: ",
          accuracy_score(clf7.predict(X_test), y_test))

    X_train, X_test, y_train, y_test = train_test_split(X_t8,
                                                        y_t8,
                                                        test_size=.30,
                                                        random_state=73)
    clf8 = SVC(kernel='rbf')
    clf8.fit(X_train, y_train)
    print("Classifier 8 accuracy: ",
          accuracy_score(clf8.predict(X_test), y_test))

    #================================================================================================

    raw_features = extract.start(file_name)

    raw_baseline_angle = raw_features[0]
    baseline_angle, comment = categorize.determine_baseline_angle(
        raw_baseline_angle)
    result["Baseline Angle"] = comment

    raw_top_margin = raw_features[1]
    top_margin, comment = categorize.determine_top_margin(raw_top_margin)
    result["Top Margin"] = comment

    raw_letter_size = raw_features[2]
    letter_size, comment = categorize.determine_letter_size(raw_letter_size)
    result["Letter Size"] = comment

    raw_line_spacing = raw_features[3]
    line_spacing, comment = categorize.determine_line_spacing(raw_line_spacing)
    result["Line Spacing"] = comment

    raw_word_spacing = raw_features[4]
    word_spacing, comment = categorize.determine_word_spacing(raw_word_spacing)
    result["Word Spacing"] = comment

    raw_pen_pressure = raw_features[5]
    pen_pressure, comment = categorize.determine_pen_pressure(raw_pen_pressure)
    result["Pen Pressure"] = comment

    raw_slant_angle = raw_features[6]
    slant_angle, comment = categorize.determine_slant_angle(raw_slant_angle)
    result["Slant"] = comment

    result["Emotional Stability"] = clf1.predict(
        [[baseline_angle, slant_angle]])
    result["Mental Energy or Will Power"] = clf2.predict(
        [[letter_size, pen_pressure]])
    result["Modesty"] = clf3.predict([[letter_size, top_margin]])
    result["Personal Harmony and Flexibility"] = clf4.predict(
        [[line_spacing, word_spacing]])
    result["Lack of Discipline"] = clf5.predict([[slant_angle, top_margin]])
    result["Poor Concentration"] = clf6.predict([[letter_size, line_spacing]])
    result["Non Communicativeness"] = clf7.predict(
        [[letter_size, word_spacing]])
    result["Social Isolation"] = clf8.predict([[line_spacing, word_spacing]])

    return result
Beispiel #4
0
def identifyPersonalityTraits(file_name_orig):

    clf_emotional_stability = load('knowme_EmotionalSt.joblib')
    clf_knowme_MentalE_WlPower = load('knowme_MentalE_WlPower.joblib')
    clf_knowme_Modesty = load('knowme_Modesty.joblib')
    clf_lackOfDiscipline = load('lackOfDiscipline.joblib')
    clf_PoorConcentration = load('PoorConcentration.joblib')
    clf_SocialIsolation = load('SocialIsolation.joblib')

    # file_name_orig ="Michael_HW.png"
    # crop(file_name_orig)
    file_name = resize(file_name_orig)

    raw_features = extract.start(file_name)
    raw_baseline_angle = raw_features[0]
    baseline_angle, comment = categorize.determine_baseline_angle(
        raw_baseline_angle)
    print("Baseline Angle: " + comment)

    raw_top_margin = raw_features[1]
    top_margin, comment = categorize.determine_top_margin(raw_top_margin)
    print("Top Margin: " + comment)

    raw_letter_size = raw_features[2]
    letter_size, comment = categorize.determine_letter_size(raw_letter_size)
    print("Letter Size: " + comment)

    raw_line_spacing = raw_features[3]
    line_spacing, comment = categorize.determine_line_spacing(raw_line_spacing)
    print("Line Spacing: " + comment)

    raw_word_spacing = raw_features[4]
    word_spacing, comment = categorize.determine_word_spacing(raw_word_spacing)
    print("Word Spacing: " + comment)

    raw_pen_pressure = raw_features[5]
    pen_pressure, comment = categorize.determine_pen_pressure(raw_pen_pressure)
    print("Pen Pressure: " + comment)

    raw_slant_angle = raw_features[6]
    slant_angle, comment = categorize.determine_slant_angle(raw_slant_angle)
    print("Slant: " + comment)

    emotional_stability = clf_emotional_stability.predict(
        [[baseline_angle, slant_angle]])
    MentalE_WlPower = clf_knowme_MentalE_WlPower.predict(
        [[letter_size, pen_pressure]])
    Modesty = clf_knowme_Modesty.predict([[letter_size, top_margin]])
    Discipline = clf_lackOfDiscipline.predict([[slant_angle, top_margin]])
    Concentration = clf_PoorConcentration.predict([[letter_size,
                                                    line_spacing]])
    SocialIsolation = clf_SocialIsolation.predict(
        [[line_spacing, word_spacing]])

    if (emotional_stability[0] == 1):
        emotional_stability = "Stable"
    else:
        emotional_stability = "Not Stable"
    if (MentalE_WlPower[0] == 1):
        MentalE_WlPower = "High or Average"
    else:
        MentalE_WlPower = "Low"
    if (Modesty[0] == 1):
        modesty = "Observed"
    else:
        modesty = "Not Observed"
    if (Concentration[0] == 1):
        concentration = "Observed"
    else:
        concentration = "Not Observed"
    if (Discipline[0] == 1):
        discipline = "Observed"
    else:
        discipline = "Not Observed"
    if (SocialIsolation[0] == 1):
        SocialIsolation = "Observed"
    else:
        SocialIsolation = "Not Observed"

    personality_Trait_dict = {
        "Emotional_Stability": emotional_stability,
        "Mental_Power": MentalE_WlPower,
        "Modesty": modesty,
        "Discipline": discipline,
        "Concentration": concentration,
        "Social_Isolation": SocialIsolation
    }

    print(personality_Trait_dict)
    return personality_Trait_dict
Beispiel #5
0
def predict(fname):
    X_baseline_angle = []
    X_top_margin = []
    X_letter_size = []
    X_line_spacing = []
    X_word_spacing = []
    X_pen_pressure = []
    X_slant_angle = []
    y_t1 = []
    y_t2 = []
    y_t3 = []
    y_t4 = []
    y_t5 = []
    y_t6 = []
    y_t7 = []
    y_t8 = []
    page_ids = []

    if os.path.isfile("prediction\label_list"):
        print("Info: label_list found.")
        #=================================================================
        with open("prediction\label_list", "r") as labels:
            for line in labels:
                content = line.split()

                baseline_angle = float(content[0])
                X_baseline_angle.append(baseline_angle)

                top_margin = float(content[1])
                X_top_margin.append(top_margin)

                letter_size = float(content[2])
                X_letter_size.append(letter_size)

                line_spacing = float(content[3])
                X_line_spacing.append(line_spacing)

                word_spacing = float(content[4])
                X_word_spacing.append(word_spacing)

                pen_pressure = float(content[5])
                X_pen_pressure.append(pen_pressure)

                slant_angle = float(content[6])
                X_slant_angle.append(slant_angle)

                trait_1 = float(content[7])
                y_t1.append(trait_1)

                trait_2 = float(content[8])
                y_t2.append(trait_2)

                trait_3 = float(content[9])
                y_t3.append(trait_3)

                trait_4 = float(content[10])
                y_t4.append(trait_4)

                trait_5 = float(content[11])
                y_t5.append(trait_5)

                trait_6 = float(content[12])
                y_t6.append(trait_6)

                trait_7 = float(content[13])
                y_t7.append(trait_7)

                trait_8 = float(content[14])
                y_t8.append(trait_8)

                page_id = content[15]
                page_ids.append(page_id)
        #===============================================================

        # emotional stability
        X_t1 = []
        for a, b in itertools.izip(X_baseline_angle, X_slant_angle):
            X_t1.append([a, b])

        # mental energy or will power
        X_t2 = []
        for a, b in itertools.izip(X_letter_size, X_pen_pressure):
            X_t2.append([a, b])

        # modesty
        X_t3 = []
        for a, b in itertools.izip(X_letter_size, X_top_margin):
            X_t3.append([a, b])

        # personal harmony and flexibility
        X_t4 = []
        for a, b in itertools.izip(X_line_spacing, X_word_spacing):
            X_t4.append([a, b])

        # lack of discipline
        X_t5 = []
        for a, b in itertools.izip(X_slant_angle, X_top_margin):
            X_t5.append([a, b])

        # poor concentration
        X_t6 = []
        for a, b in itertools.izip(X_letter_size, X_line_spacing):
            X_t6.append([a, b])

        # non communicativeness
        X_t7 = []
        for a, b in itertools.izip(X_letter_size, X_word_spacing):
            X_t7.append([a, b])

        # social isolation
        X_t8 = []
        for a, b in itertools.izip(X_line_spacing, X_word_spacing):
            X_t8.append([a, b])

        #print X_t1
        #print type(X_t1)
        #print len(X_t1)

        X_train, X_test, y_train, y_test = train_test_split(X_t1,
                                                            y_t1,
                                                            test_size=.30,
                                                            random_state=8)
        clf1 = SVC(kernel='rbf')
        clf1.fit(X_train, y_train)
        print "Classifier 1 accuracy: ", accuracy_score(
            clf1.predict(X_test), y_test)

        X_train, X_test, y_train, y_test = train_test_split(X_t2,
                                                            y_t2,
                                                            test_size=.30,
                                                            random_state=16)
        clf2 = SVC(kernel='rbf')
        clf2.fit(X_train, y_train)
        print "Classifier 2 accuracy: ", accuracy_score(
            clf2.predict(X_test), y_test)

        X_train, X_test, y_train, y_test = train_test_split(X_t3,
                                                            y_t3,
                                                            test_size=.30,
                                                            random_state=32)
        clf3 = SVC(kernel='rbf')
        clf3.fit(X_train, y_train)
        print "Classifier 3 accuracy: ", accuracy_score(
            clf3.predict(X_test), y_test)

        X_train, X_test, y_train, y_test = train_test_split(X_t4,
                                                            y_t4,
                                                            test_size=.30,
                                                            random_state=64)
        clf4 = SVC(kernel='rbf')
        clf4.fit(X_train, y_train)
        print "Classifier 4 accuracy: ", accuracy_score(
            clf4.predict(X_test), y_test)

        X_train, X_test, y_train, y_test = train_test_split(X_t5,
                                                            y_t5,
                                                            test_size=.30,
                                                            random_state=42)
        clf5 = SVC(kernel='rbf')
        clf5.fit(X_train, y_train)
        print "Classifier 5 accuracy: ", accuracy_score(
            clf5.predict(X_test), y_test)

        X_train, X_test, y_train, y_test = train_test_split(X_t6,
                                                            y_t6,
                                                            test_size=.30,
                                                            random_state=52)
        clf6 = SVC(kernel='rbf')
        clf6.fit(X_train, y_train)
        print "Classifier 6 accuracy: ", accuracy_score(
            clf6.predict(X_test), y_test)

        X_train, X_test, y_train, y_test = train_test_split(X_t7,
                                                            y_t7,
                                                            test_size=.30,
                                                            random_state=21)
        clf7 = SVC(kernel='rbf')
        clf7.fit(X_train, y_train)
        print "Classifier 7 accuracy: ", accuracy_score(
            clf7.predict(X_test), y_test)

        X_train, X_test, y_train, y_test = train_test_split(X_t8,
                                                            y_t8,
                                                            test_size=.30,
                                                            random_state=73)
        clf8 = SVC(kernel='rbf')
        clf8.fit(X_train, y_train)
        print "Classifier 8 accuracy: ", accuracy_score(
            clf8.predict(X_test), y_test)

        #================================================================================================

        while True:
            #file_name = raw_input("Enter file name to predict or z to exit: ")
            #if file_name == 'z':
            #	break

            raw_features = extract.start(fname)

            comm = {}
            raw_baseline_angle = raw_features[0]
            baseline_angle, comment = categorize.determine_baseline_angle(
                raw_baseline_angle)
            print "Baseline Angle: " + comment
            comm["Baseline_Angle"] = comment

            raw_top_margin = raw_features[1]
            top_margin, comment = categorize.determine_top_margin(
                raw_top_margin)
            print "Top Margin: " + comment
            comm["Top_margin"] = comment

            raw_letter_size = raw_features[2]
            letter_size, comment = categorize.determine_letter_size(
                raw_letter_size)
            print "Letter Size: " + comment
            comm["Letter_Size"] = comment

            raw_line_spacing = raw_features[3]
            line_spacing, comment = categorize.determine_line_spacing(
                raw_line_spacing)
            print "Line Spacing: " + comment
            comm["Line_Spacing"] = comment

            raw_word_spacing = raw_features[4]
            word_spacing, comment = categorize.determine_word_spacing(
                raw_word_spacing)
            print "Word Spacing: " + comment
            comm["Word_Spacing"] = comment

            raw_pen_pressure = raw_features[5]
            pen_pressure, comment = categorize.determine_pen_pressure(
                raw_pen_pressure)
            print "Pen Pressure: " + comment
            comm["Pen_Pressure"] = comment

            raw_slant_angle = raw_features[6]
            slant_angle, comment = categorize.determine_slant_angle(
                raw_slant_angle)
            print "Slant: " + comment
            comm["Slant"] = comment

            Emotional_stability = clf1.predict([[baseline_angle,
                                                 slant_angle]])[0]
            comm["Emotional_Stability"] = Emotional_stability

            Mental_energy = clf2.predict([[letter_size, pen_pressure]])[0]
            comm["Mental_Energy_or_Will_power"] = Mental_energy

            Modesty = clf3.predict([[letter_size, top_margin]])[0]
            comm["Modesty"] = Modesty

            Harmony = clf3.predict([[letter_size, top_margin]])[0]
            comm["Personal_Harmony_or_Will_Power"] = Harmony

            lack_discipline = clf5.predict([[slant_angle, top_margin]])[0]
            comm["Lack_of_Discipline"] = lack_discipline

            poor_concentration = clf6.predict([[letter_size, line_spacing]])[0]
            comm["Poor_Concentration"] = poor_concentration

            Non_communicativeness = clf7.predict([[letter_size,
                                                   word_spacing]])[0]
            comm["Non_Communicativeness"] = Non_communicativeness

            Social_isolation = clf8.predict([[line_spacing, word_spacing]])[0]
            comm["Social_Isolation"] = Social_isolation

            print
            print "Emotional Stability: ", Emotional_stability
            print "Mental Energy or Will Power: ", Mental_energy
            print "Modesty: ", Modesty
            print "Personal Harmony and Flexibility: ", Harmony
            print "Lack of Discipline: ", lack_discipline
            print "Poor Concentration: ", poor_concentration
            print "Non Communicativeness: ", Non_communicativeness
            print "Social Isolation: ", Social_isolation
            print "---------------------------------------------------"
            print
            break
        #=================================================================================================

    else:
        print("Error: label_list file not found.")
    return comm