コード例 #1
0
basemask = np.array(range(1, 25))
svmVectors = []
basemask = basemask - 1

for M in range(MIN_DECIMATION, MAX_DECIMATION + 1):
    oDataSet = DataSet()
    base = np.loadtxt(PATH_TO_SAVE_FEATURES + "FEATURES_M{}_CM8b_TH199.txt".format(M), usecols=basemask, delimiter=",")
    classes = np.loadtxt(PATH_TO_SAVE_FEATURES + "FEATURES_M{}_CM8b_TH199.txt".format(M), dtype=object, usecols=24,
                         delimiter=",")
    for x, y in enumerate(base):
        oDataSet.add_sample_of_attribute(np.array(list(np.float32(y)) + [classes[x]]))
    oDataSet.attributes = oDataSet.attributes.astype(float)
    oDataSet.normalize_data_set()
    for j in range(NUMBER_OF_ROUNDS):
        print(j)
        oData = Data(4, 50, samples=150)
        oData.random_training_test_per_class()
        svm = ml.SVM_create()
        svm.setKernel(ml.SVM_RBF)
        oData.params = dict(kernel_type=ml.SVM_RBF, svm_type=ml.SVM_C_SVC, gamma=2.0, nu=0.0, p=0.0, coef0=0,
                            k_fold=10)
        svm.trainAuto(np.float32(oDataSet.attributes[oData.Training_indexes]), ml.ROW_SAMPLE,
                      np.int32(oDataSet.labels[oData.Training_indexes]), kFold=10)
        # svm.train_auto(np.float32(oDataSet.attributes[oData.Training_indexes]),
        #                np.float32(oDataSet.labels[oData.Training_indexes]), None, None, params=oData.params)
        svmVectors.append(svm.getSupportVectors().shape[0])
        results = []  # svm.predict_all(np.float32(oDataSet.attributes[oData.Testing_indexes]))
        for i in (oDataSet.attributes[oData.Testing_indexes]):
            res, cls = svm.predict(np.float32([i]))
            results.append(cls[0])
        oData.set_results_from_classifier(results, oDataSet.labels[oData.Testing_indexes])
コード例 #2
0
ファイル: BEST_MODEL.py プロジェクト: lukkascost/py_Crosswalk
 base = np.loadtxt(PATH_TO_SAVE_FEATURES +
                   "FEATURES_M{}_CM8b_TH{}.txt".format(M, TH),
                   usecols=basemask,
                   delimiter=",")
 classes = np.loadtxt(PATH_TO_SAVE_FEATURES +
                      "FEATURES_M{}_CM8b_TH{}.txt".format(M, TH),
                      dtype=object,
                      usecols=24,
                      delimiter=",")
 for x, y in enumerate(base):
     oDataSet.add_sample_of_attribute(
         np.array(list(np.float32(y)) + [classes[x]]))
 oDataSet.attributes = oDataSet.attributes.astype(float)
 oDataSet.normalize_data_set()
 for j in range(ROUNDS):
     oData = Data(4, 13, samples=50)
     oData.random_training_test_per_class()
     svm = cv2.SVM()
     oData.params = dict(kernel_type=cv2.SVM_RBF,
                         svm_type=cv2.SVM_C_SVC,
                         gamma=2.0,
                         nu=0.0,
                         p=0.0,
                         coef0=0,
                         k_fold=2)
     svm.train_auto(np.float32(oDataSet.attributes[oData.Training_indexes]),
                    np.float32(oDataSet.labels[oData.Training_indexes]),
                    None,
                    None,
                    params=oData.params)
     oData.insert_model(svm)
コード例 #3
0
                                 [2.88141800e+01, 7.24622100e-02],
                                 [9.68741850e-02, -8.40647000e-02],
                                 [4.80887420e+04, 5.70339160e+00]])
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
basemask = basemask - 1

svm = cv2.SVM()
oDataSet = DataSet()
base = np.loadtxt(PATH_TO_SAVE_FEATURES +
                  "FEATURES_M{}_CM8b_TH198.txt".format(M),
                  usecols=basemask,
                  delimiter=",")
classes = np.loadtxt(PATH_TO_SAVE_FEATURES +
                     "FEATURES_M{}_CM8b_TH198.txt".format(M),
                     dtype=object,
                     usecols=24,
                     delimiter=",")
for x, y in enumerate(base):
    oDataSet.add_sample_of_attribute(
        np.array(list(np.float32(y)) + [classes[x]]))
oDataSet.attributes = oDataSet.attributes.astype(float)
oDataSet.attributes = (oDataSet.attributes - NORMALIZATION_MATRIX[:, 1].T) / (
    NORMALIZATION_MATRIX[:, 0] - NORMALIZATION_MATRIX[:, 1])
oData = Data(4, 13, samples=50)
svm.load("MODEL_M1_CM8_TH198_ATT10_ROUND_24.txt")
results = svm.predict_all(np.float32(oDataSet.attributes))
oData.set_results_from_classifier(results, oDataSet.labels)
oData.insert_model(svm)
print oData.confusion_matrix
print oData
コード例 #4
0
     usecols=basemask,
     delimiter=",")
 classes = np.loadtxt(
     PATH_TO_SAVE_FEATURES +
     "FEATURES_M{}_CM{}b_TH198.txt".format(DECIMATION, n_bits),
     dtype=object,
     usecols=24,
     delimiter=",")
 for x, y in enumerate(base):
     oDataSet.add_sample_of_attribute(
         np.array(list(np.float32(y)) + [classes[x]]))
 oDataSet.attributes = oDataSet.attributes.astype(float)
 oDataSet.normalize_data_set()
 for j in range(NUMBER_OF_ROUNDS):
     print j
     oData = Data(4, 13, samples=50)
     oData.random_training_test_per_class()
     svm = cv2.SVM()
     oData.params = dict(kernel_type=cv2.SVM_RBF,
                         svm_type=cv2.SVM_C_SVC,
                         gamma=2.0,
                         nu=0.0,
                         p=0.0,
                         coef0=0,
                         k_fold=2)
     svm.train_auto(np.float32(oDataSet.attributes[oData.Training_indexes]),
                    np.float32(oDataSet.labels[oData.Training_indexes]),
                    None,
                    None,
                    params=oData.params)
     oData.insert_model(svm)