コード例 #1
0
import cv2.ml as ml

from MachineLearn.Classes.data import Data
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.experiment import Experiment

SAMPLES_PER_CLASS = 150
PATH_TO_SAVE_FEATURES = '../../GLCM_FILES/EXP_07/'
NUMBER_OF_ROUNDS = 50
MIN_DECIMATION = 50
MAX_DECIMATION = 100
EXPERIMENT = 7
NBITS = 8
NATT = 24

oExp = Experiment()
# basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
# basemask = np.array([12, 20, 22])

basemask = np.array(range(1, 25))
svmVectors = []
basemask = basemask - 1

for M in range(MIN_DECIMATION, MAX_DECIMATION + 1):
    oDataSet = DataSet()
    base = np.loadtxt(PATH_TO_SAVE_FEATURES + "FEATURES_M{}_CM8b_TH199.txt".format(M), usecols=basemask, delimiter=",")
    classes = np.loadtxt(PATH_TO_SAVE_FEATURES + "FEATURES_M{}_CM8b_TH199.txt".format(M), dtype=object, usecols=24,
                         delimiter=",")
    for x, y in enumerate(base):
        oDataSet.add_sample_of_attribute(np.array(list(np.float32(y)) + [classes[x]]))
    oDataSet.attributes = oDataSet.attributes.astype(float)
コード例 #2
0
from MachineLearn.Classes.experiment import Experiment
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.data import Data
import numpy as np
import cv2

oExp = Experiment()
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23, 25])
svmVectors = []
qtd_per_class = np.array([82, 93, 140, 60])

###################################################################################################################################
#basemask = basemask-1
#for i in range(1):
#oDataSet = DataSet()
#base = np.loadtxt("GLCM_FILES/M1_CM8b_RE_ELIAS.txt", usecols=(x for x in range(24)), delimiter=",")
#classes  = np.loadtxt("GLCM_FILES/M1_CM8b_RE_ELIAS.txt",dtype=object, usecols=(24), delimiter=",")
#classes = np.matrix(classes)
#base = np.array(np.hstack((base,classes.T)))
#base[base == "c1_p1"] = "c1"
#base[base == "c2_p1"] = "c2"
#base[base == "c3_p1"] = "c3"
#base[base == "c3_p2"] = "c3"
#base[base == "c3_p3"] = "c3"
#base[base == "c3_p4"] = "c3"
#base[base == "c4_p1"] = "c4"
#base[base == "c4_p2"] = "c4"
#base[base == "c4_p3"] = "c4"
#base[base == "c4_p4"] = "c4"

#unique, counts = np.unique(base[:,-1], return_counts=True)
コード例 #3
0
ATT_NUMBER = 24
DECIMATION = 17
DECIMATION_MIN = 1
DECIMATION_MAX = 1
NUMBER_OF_ROUNDS = 50
CM_BIT_MIN = 8
CM_BIT_MAX = 8
CM_BIT = 8
TH_MIN = 199
TH_MAX = 199

R = 17
ROUND = 0
SHOW = True
oExp = Experiment.load(
    "OBJECTS/EXP_{:02d}/ACC_M{}-{}_{}_CM{}-{}b_TH{}-{}_ATT{}.gzip".format(
        EXPERIMENT_NUMBER, DECIMATION_MIN, DECIMATION_MAX, NUMBER_OF_ROUNDS,
        CM_BIT_MIN, CM_BIT_MAX, TH_MIN, TH_MAX, ATT_NUMBER))
# oExp2 = Experiment.load(
#     "OBJECTS/EXP_{:02d}/ACC_M{}-{}_{}_CM{}-{}b_TH{}-{}_ATT{}.gzip".format(EXPERIMENT_NUMBER, 50,
#                                                                          100,
#                                                                          NUMBER_OF_ROUNDS, CM_BIT_MIN, CM_BIT_MAX,
#                                                                          TH_MIN, TH_MAX,
#                                                                          ATT_NUMBER))
# oExp = Experiment()
# oExp.experimentResults = oExp1.experimentResults[:-1] + oExp2.experimentResults
# oExp.experimentDescription = oExp1.experimentDescription[:-1] + oExp2.experimentDescription
# oExp.length = oExp1.length + oExp2.length -1
print(oExp.show_in_table())

if SHOW:
    for j, i in enumerate(oExp.experimentResults):
コード例 #4
0
import os

from MachineLearn.Classes.experiment import Experiment
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.data import Data
import numpy as np
import cv2

M = 17
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
TH = 199
supportVectors = np.zeros((7, 50))

oExp = Experiment.load("../OBJECTS/EXP_06/ACC_M17-17_50_CM2-7b_TH199-199_ATT10.gzip")
for k in range(6):
    oDataSet = oExp.experimentResults[k]
    for i, j in enumerate(oDataSet.dataSet):
        j.save_model("tmp.txt")
        svm = cv2.SVM()
        svm.load("tmp.txt")
        supportVectors[k, i] = svm.get_support_vector_count()

oExp = Experiment.load("../OBJECTS/EXP_06/ACC_M1-100_50_CM8-8b_TH199-199_ATT10.gzip")
oDataSet = oExp.experimentResults[16]
for i, j in enumerate(oDataSet.dataSet):
    j.save_model("tmp.txt")
    svm = cv2.SVM()
    svm.load("tmp.txt")
    supportVectors[6, i] = svm.get_support_vector_count()
os.remove("tmp.txt")
コード例 #5
0
import cv2

import numpy as np

from MachineLearn.Classes.data import Data
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.experiment import Experiment

SAMPLES_PER_CLASS = 50
PATH_TO_SAVE_FEATURES = 'GLCM_FILES/EXP_04/'
NUMBER_OF_ROUNDS = 50
MIN_THRESHOLD = 190
MAX_THRESHOLD = 200

oExp = Experiment()
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
svmVectors = []
basemask = basemask - 1
#
#
# for TH in range(MIN_THRESHOLD, MAX_THRESHOLD + 1):
#     oDataSet = DataSet()
#     base = np.loadtxt(PATH_TO_SAVE_FEATURES + "FEATURES_M1_CM8b_TH{}.txt".format(TH), usecols=basemask, delimiter=",")
#     classes = np.loadtxt(PATH_TO_SAVE_FEATURES + "FEATURES_M1_CM8b_TH{}.txt".format(TH), dtype=object, usecols=24, delimiter=",")
#     for x, y in enumerate(base):
#         oDataSet.addSampleOfAtt(np.array(list(np.float32(y)) + [classes[x]]))
#     oDataSet.atributes = oDataSet.atributes.astype(float)
#     oDataSet.normalizeDataSet()
#     for j in range(NUMBER_OF_ROUNDS):
#         print j
#         oData = Data(4, 13, samples=50)
コード例 #6
0
import cv2

import numpy as np

from MachineLearn.Classes.data import Data
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.experiment import Experiment

SAMPLES_PER_CLASS = 50
PATH_TO_SAVE_FEATURES = '../../GLCM_FILES/EXP_04/'
NUMBER_OF_ROUNDS = 50
MIN_BITS = 2
MAX_BITS = 8
DECIMATION = 14

oExp = Experiment()
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
svmVectors = []
basemask = basemask - 1

for n_bits in range(MIN_BITS, MAX_BITS + 1):
    oDataSet = DataSet()
    base = np.loadtxt(
        PATH_TO_SAVE_FEATURES +
        "FEATURES_M{}_CM{}b_TH198.txt".format(DECIMATION, n_bits),
        usecols=basemask,
        delimiter=",")
    classes = np.loadtxt(
        PATH_TO_SAVE_FEATURES +
        "FEATURES_M{}_CM{}b_TH198.txt".format(DECIMATION, n_bits),
        dtype=object,
コード例 #7
0
ファイル: M_SVM_GLCM.py プロジェクト: lukkascost/py_Crosswalk
import cv2

import numpy as np

from MachineLearn.Classes.data import Data
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.experiment import Experiment

SAMPLES_PER_CLASS = 50
PATH_TO_SAVE_FEATURES = 'GLCM_FILES/EXP_04/'
NUMBER_OF_ROUNDS = 50
MIN_DECIMATION = 1
MAX_DECIMATION = 100

oExp = Experiment()
basemask = np.array(range(1, 25))
svmVectors = []
basemask = basemask - 1

for M in range(MIN_DECIMATION, MAX_DECIMATION + 1):
    oDataSet = DataSet()
    base = np.loadtxt(PATH_TO_SAVE_FEATURES +
                      "FEATURES_M{}_CM8b_TH198.txt".format(M),
                      usecols=basemask,
                      delimiter=",")
    classes = np.loadtxt(PATH_TO_SAVE_FEATURES +
                         "FEATURES_M{}_CM8b_TH198.txt".format(M),
                         dtype=object,
                         usecols=24,
                         delimiter=",")
    for x, y in enumerate(base):
コード例 #8
0
ファイル: BEST_MODEL.py プロジェクト: lukkascost/py_Crosswalk
from MachineLearn.Classes.experiment import Experiment
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.data import Data
import numpy as np
import cv2

oExp = Experiment()
M = 1
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
accuracy = 0
nVectors = 10000
PATH_TO_SAVE_FEATURES = '../../GLCM_FILES/EXP_04/'
TH = 198
ROUNDS = 10000
basemask = basemask - 1
best = 0

for i in range(1):
    oDataSet = DataSet()
    base = np.loadtxt(PATH_TO_SAVE_FEATURES +
                      "FEATURES_M{}_CM8b_TH{}.txt".format(M, TH),
                      usecols=basemask,
                      delimiter=",")
    classes = np.loadtxt(PATH_TO_SAVE_FEATURES +
                         "FEATURES_M{}_CM8b_TH{}.txt".format(M, TH),
                         dtype=object,
                         usecols=24,
                         delimiter=",")
    for x, y in enumerate(base):
        oDataSet.add_sample_of_attribute(
            np.array(list(np.float32(y)) + [classes[x]]))
コード例 #9
0
import numpy as np

from MachineLearn.Classes.data import Data
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.experiment import Experiment

SAMPLES_PER_CLASS = 150
PATH_TO_SAVE_FEATURES = '../../GLCM_FILES/EXP_06/'
NUMBER_OF_ROUNDS = 50
MIN_THRESHOLD = 181
MAX_THRESHOLD = 194
TH_STEP = 1
EXPERIMENT = 6

oExp = Experiment()
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
svmVectors = []
basemask = basemask - 1

for TH in range(MIN_THRESHOLD, MAX_THRESHOLD + 1, TH_STEP):
    oDataSet = DataSet()
    base = np.loadtxt(PATH_TO_SAVE_FEATURES +
                      "FEATURES_M1_CM8b_TH{}.txt".format(TH),
                      usecols=basemask,
                      delimiter=",")
    classes = np.loadtxt(PATH_TO_SAVE_FEATURES +
                         "FEATURES_M1_CM8b_TH{}.txt".format(TH),
                         dtype=object,
                         usecols=24,
                         delimiter=",")
コード例 #10
0
import numpy as np

from MachineLearn.Classes.experiment import Experiment

EXPERIMENT_NUMBER = 6
ATT_NUMBER = 10
MIN_DECIMATION = 1
MAX_DECIMATION = 100
NUMBER_OF_ROUNDS = 50
CM_BITS = 8
TH = 199

oExp = Experiment.load(
    "../OBJECTS/EXP_{:02d}/ACC_M{}-{}_{}_CM{}-{}b_TH{}-{}_ATT{}.gzip".format(EXPERIMENT_NUMBER, MIN_DECIMATION,
                                                                             MAX_DECIMATION,
                                                                             NUMBER_OF_ROUNDS, CM_BITS, CM_BITS, TH, TH,
                                                                             ATT_NUMBER))
results = np.zeros((MAX_DECIMATION - MIN_DECIMATION + 1, 3))
for i, oDataSet in enumerate(oExp.experimentResults):
    metrics = oDataSet.get_general_metrics()
    results[i, 0] = metrics[0][0, -1] - (metrics[1][0, -1]*.75)
    results[i, 1] = metrics[0][0, -1]
    results[i, 2] = metrics[0][0, -1] + metrics[1][0, -1]

results = results * 100

possibles_a = []
possibles_b = []
possibles_c = []
avg_minus = np.min(results[:3, 0])
コード例 #11
0
from MachineLearn.Classes.experiment import Experiment
from MachineLearn.Classes.data_set import DataSet
from MachineLearn.Classes.data import Data
import numpy as np
import cv2

M = 1
basemask = np.array([1, 2, 5, 9, 15, 16, 17, 21, 22, 23])
TH = 198
ROUNDS = 10000

oExp = Experiment.load(
    "../OBJECTS/EXP_04/BEST_MODEL_{}_CROSSWALK_M{}_TH{}.txt".format(
        ROUNDS, M, TH))
oDataSet = oExp.experimentResults[0]

bestIndexies = []
bestP1 = 0
bestP2 = 0
nvector = 1000000
i = 0
results = np.zeros((4, 4, 2))
for i, j in enumerate(oDataSet.dataSet):
    j.save_model("tmp.txt")
    svm = cv2.SVM()
    svm.load("tmp.txt")
    results1 = svm.predict_all(
        np.float32(oDataSet.attributes[j.Testing_indexes]))
    results2 = svm.predict_all(
        np.float32(oDataSet.attributes[j.Training_indexes]))
    acc = j.get_metrics()[0][-1]