コード例 #1
0
ファイル: main.py プロジェクト: rpaudel42/Glassdoor
def main():

    preprocess = Preprocess()
    preprocess.check_data_distribution()

    print "\n\n*********** ANALYSIS PART I *******************"
    partI_classifier = Classifiers(1)
    partI_classifier.draw_auc_curve(1)
コード例 #2
0
def sentiment_analysis(filename):
    try:
        data = open('text_data/text.txt').read()
        words = [word for word in tokenize(data)]
        classifiers = Classifiers()
        classifiers.get_trained()
        naive_best_words = classifiers.naive_best_words.predict(words)
        naive_bag_of_words = classifiers.naive_bag_of_words.predict(words)
        svm = classifiers.svm.predict(words)
        result = {
            'naive_best_words': dict(zip(words, naive_best_words)),
            'naive_bag_of_words': dict(zip(words, naive_bag_of_words)),
            'svm': dict(zip(words, svm))
        }
        return jsonify(result)
    except FileNotFoundError as e:
        return jsonify('File not found', 400)
コード例 #3
0
def sentiment_analysis_post():
    data = request.get_json().get('data')
    words = [sent for sent in tokenize(data)]
    classifiers = Classifiers()
    classifiers.get_trained()
    naive_best_words = classifiers.naive_best_words.predict_prob(words)
    naive_bag_of_words = classifiers.naive_bag_of_words.predict_prob(words)
    svm = classifiers.svm.predict(words)
    result = {
        'original_text': data,
        'classifiers': {
            'naive_best_words': naive_best_words,
            'naive_bag_of_words': naive_bag_of_words,
            'svm': svm
        }
    }
    return jsonify(result)
コード例 #4
0
def main():
    debug = 0
    sample_size = 8192
    
    #database = MFPT(debug=debug)
    database = Paderborn(debug=debug)

    database_acq = database.load()
    #print(database_acq)

    database_exp = Experimenter(database_acq, sample_size)
    database_exp.perform(Classifiers(), Scoring())
コード例 #5
0
from data_manager import DataManager
from extractors import Extractors
from classifiers import Classifiers

data_manager = DataManager()
extractor = Extractors()
classifiers = Classifiers()

X, y, encoder = data_manager.loadData()

results = []
encoded_x1 = extractor.glcm(X)
classifiers.classify(results, encoder, y, title='GLCM - ')

encoded_x2 = extractor.lbp(X)
classifiers.classify(results, encoder, y, title='LBP - ')

encoded_x3 = extractor.huMoments(X)
classifiers.classify(results, encoder, y, title='HUMomments - ')

for index, result in enumerate(encoded_x1):
    new_result = result

    for x2 in encoded_x2[index]:
        new_result.append(x2)
    for x3 in encoded_x3[index]:
        new_result.append(x3)

    results.append(new_result)

classifiers.classify(results, encoder, y, title='Mix - ')
コード例 #6
0
    Test_FeatureMatrix = pd.concat([
        Features.Deviation(inputTest, 'N/A'),
        Features.meanRange(inputTest, 'N/A')[['MeanRange']],
        Features.Range(inputTest, 'N/A')[['HighRange', 'LowRange']],
        Features.FFT(inputTest, 'N/A')[['varFFT', 'sdFFT', 'meanFFT']],
        Features.Quantile(inputTest, 'N/A')['Quantile'],
    ],
                                   axis=1)

    if int(input('Pass From PCA? 1: YES, 0: NO:\t')) == 1:
        columns = TopFeatures(Test_FeatureMatrix,
                              len(Test_FeatureMatrix.columns) - 1)
    else:
        columns = list(Test_FeatureMatrix.columns)
        columns.remove('Class')

    Test_DF = Test_FeatureMatrix[columns]

    while True:
        name = str(
            input('Enter Model Name: SVC, KNN, LOG, RFC, GNB:\t')).upper()
        if name not in ['SVC', 'KNN', 'LOG', 'RFC', 'GNB']:
            break

        model = Classifiers.load(name)
        pred = model.predict(np.array(Test_DF))
        actual = np.array(outputTest['Class'])

        crosstab_stats(np.array(outputTest['Class']), pred)
コード例 #7
0
ファイル: app.py プロジェクト: sandeepk17/gestures
    threshold = 60  # BINARY Threshold
    blurValue = 41  # GaussianBlur Parameter
    bgSubThreshold = 50
    LEARNING_RATE = 0

    # Variables
    isBgCaptured = 0  # Whether the Background is Captured
    triggerSwitch = False  # Allow Keyboard simulator works
    bgModel = None

    camera = cv2.VideoCapture(0)
    camera.set(10, 200)
    cv2.namedWindow('trackbar')
    cv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)

    classifiers = Classifiers()

    while camera.isOpened():
        ret, frame = camera.read()
        threshold = cv2.getTrackbarPos('trh1', 'trackbar')
        frame = cv2.bilateralFilter(frame, 5, 50, 100)  # smoothing filter
        frame = cv2.flip(frame, 1)  # flip the frame horizontally
        # TODO actually get the ROI and dont do this computation twice
        cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                      (frame.shape[1], int(cap_region_y_end * frame.shape[0])),
                      (255, 0, 0), 2)
        cv2.imshow('original', frame)

        #  Main Operation
        if isBgCaptured == 1:
            # Remove Background + # Clip the ROI
コード例 #8
0
ファイル: main.py プロジェクト: omid-jf/CS519_Project_8
    x_std = sc_x.fit_transform(x)

    # Splitting data
    x_std_tr, x_std_ts, y_tr, y_ts = train_test_split(x_std,
                                                      y,
                                                      test_size=0.3,
                                                      random_state=1)

    # Running classifiers
    classifier = Classifiers(criterion="gini",
                             max_depth=None,
                             n_estimators=25,
                             max_samples=1.0,
                             max_features=1.0,
                             bootstrap=True,
                             bootstrap_features=False,
                             n_jobs=1,
                             learning_rate=0.1,
                             seed=1,
                             x_tr=x_std_tr,
                             y_tr=y_tr,
                             x_ts=x_std_ts)

    for name in ["decisiontree", "randforest", "bagging", "adaboost"]:
        print("\n\n" + name)
        y_tr_pred, y_ts_pred = classifier.call("run_" + name)

        train_error = accuracy_score(y_tr, y_tr_pred)
        test_error = accuracy_score(y_ts, y_ts_pred)

        print("%s - train accuracy: %.3f - test accuracy: %.3f" %
コード例 #9
0
from classifiers import Classifiers
import sys

if __name__ == "__main__":

    dataset_index = [1, 2]
    for dataset in dataset_index:
        classifiers = Classifiers(dataset)
        clfs = {
            'GNB': classifiers.GNB,
            'Base_DT': classifiers.Base_DT,
            'Best_DT': classifiers.Best_DT,
            'PER': classifiers.PER,
            'Base_MLP': classifiers.Base_MLP,
            'Best_MLP': classifiers.Best_MLP,
        }
        try:
            if len(sys.argv) == 1:  # No extra argument passed, Run all
                for key, value in clfs.items():
                    print(f"Running: {key} for dataset {dataset}")
                    clfs[key]()
            else:
                print(f"Running: {sys.argv[1]} for dataset {dataset}")
                clfs[sys.argv[1]]()
        except:
            print(f"Supported parameter list:\n{clfs.keys()}")
            print("Example: python main.py GNB")
            sys.exit()
コード例 #10
0
    if int(input('Pass From PCA? 1: YES, 0: NO:\t')) == 1:
        columns = TopFeatures(FeatureMatrix, len(FeatureMatrix.columns) - 1)
    else:
        columns = list(FeatureMatrix.columns)
        columns.remove('Class')

    # TRAINING SET
    Input = np.array(FeatureMatrix[columns])
    Output = np.array(FeatureMatrix['Class'])
    inputTrain, inputTest, outputTrain, outputTest = train_test_split(
        Input, Output, test_size=0.3)

    # cv = KFold(n_splits=10, random_state=42, shuffle=False)

    # SVM
    svc = Classifiers.SVC(inputTrain, outputTrain)
    svc_scores = cross_val_score(svc, inputTrain, outputTrain, cv=10)

    # linear_model
    log = Classifiers.LOG(inputTrain, outputTrain)
    log_scores = cross_val_score(log, inputTrain, outputTrain, cv=10)

    # neighbors
    knn = Classifiers.KNN(inputTrain, outputTrain)
    knn_scores = cross_val_score(knn, inputTrain, outputTrain, cv=10)

    # RandomForestClassifier
    rfc = Classifiers.RFC(inputTrain, outputTrain)
    rfc_scores = cross_val_score(rfc, inputTrain, outputTrain, cv=10)

    # GaussianNB