def startTraining(self):
        # buat matriks elemen 1 untuk klasifikasi objek dan elemen -1 untuk klasifikasi bukan objek
        kelasObjek = np.ones(len(
            self.vektorObjek))  # kelas 1 menandakan klasifikasi objek
        kelasNonObjek = np.ones(
            len(self.vektorNonObjek
                )) * -1  # kelas -1 menandakan klasifikasi bukan objek

        # push ke stack dari data kelas diatas untuk data train kelas klasifikasi (horizontal stack)
        trainDataY = np.hstack((kelasObjek, kelasNonObjek))
        # print trainDataY,np.size(trainDataY)

        dataVektor = []
        # Push fitur vektor objek ke array dataVektor, buat stack ke array trainDataX (vertical stack)
        for fiturVektor in self.vektorObjek:
            dataVektor.append(fiturVektor)
            trainDataX = np.vstack(
                dataVektor)  # buat stack matriks banyakdata*banyakvektor

        # Lanjutkan push fitur vektor yang bukan objek ke array dataVektor, lalu push ke stack trainDataX
        for fiturVektor in self.vektorNonObjek:
            dataVektor.append(fiturVektor)
            trainDataX = np.vstack(dataVektor)
        # print np.size(trainDataX)
        isSave = raw_input("Simpan data ke dataset? (y/n): ")
        if isSave == "y":
            np.savetxt("dataX.csv", trainDataX, delimiter=",")

        # diperoleh array matriks trainDataX yang berukuran banyak data latih * jumlah vektor per data citra
        # dan array matriks trainDataY yang berisi nilai klasifikasi, untuk selanjutnya ditraining menggunakan SVM
        print "Training vektor dengan SVM..."
        clf = SVM(kernel="linear", galat=1e-2, C=0.4)
        clf.fit(trainDataX, trainDataY)

        print "Training sukses."
        print "Spesifikasi model SVM yang telah dilatih: "
        print "-- Nilai bobot:"
        print clf.w
        # plot hasil training data untuk melihat hyperplane-nya
        print "-- Plot Hyperplane:"
        clf.plot_margin(trainDataX[trainDataY == 1],
                        trainDataX[trainDataY == -1], clf)

        print "-- Confusion Matrix:"
        ConfusionMatrix(model=clf).printMatrix()

        # Buat folder untuk menyimpan model SVM hasil training
        if not os.path.isdir(os.path.split(folderModel)[0]):
            os.makedirs(os.path.split(folderModel)[0])

        # Simpan model ke folder yang telah dibuat/ada
        joblib.dump(clf, folderModel)
        print "Model classifier saved to {}".format(folderModel)
Beispiel #2
0
def classifier(method, data, show_fitting):
    if method == 'bayes':
        from classifier.naiveBayes import NaiveBayes
        return NaiveBayes(data, show_fitting)
    elif method == 'svm':
        from classifier.svm import SVM
        return SVM(data, show_fitting)
    elif method == 'knear':
        from classifier.kNeighbors import KNeighbors
        return KNeighbors(data, show_fitting)
    elif method == 'tree':
        from classifier.decisionTree import DecisionTree
        return DecisionTree(data, show_fitting)
    elif method == 'neural':
        from classifier.neuralNetwork import NeuralNetwork
        return NeuralNetwork(data, show_fitting)
    elif method == 'baseline':
        from classifier.baseline import Baseline
        return Baseline(data, show_fitting)
    else:
        return 'Not a valid classification method!'
Beispiel #3
0
repetition_num = args.repeat
problem_num = args.problem
skip_num = args.skip
block_num = pattern_num * repetition_num
success_count = 0
say_count = 0

if args.online:
    receiver = UDP(args.subject, args.session, "predict", average=args.average, logname=args.log)
elif args.kodama:
    receiver = LoadmatKodama(args.subject, args.session, "predict", folder=args.kodama, repetition_num=repetition_num)
else:
    receiver = Loadmat(args.subject, args.session, "predict", average=args.average, filename=args.filename, matfile=args.matfile)

if args.method == "rbf":
    classifier = SVM(name=args.modelname, decimate=args.decimate)
elif args.method == "libsvm":
    classifier = LibSVM(name=args.modelname)
elif args.method == "linear" or args.method == "l":
    classifier = LinearSVM(name=args.modelname, decimate=args.decimate)
elif args.method == "swlinearsvm":
    classifier = StepwiseLinearSVM(name=args.modelname, decimate=args.decimate)
elif args.method == "lda":
    classifier = LDA(name=args.modelname)
elif args.method == "swlda":
    classifier = SWLDA(name=args.modelname, decimate=args.decimate)

classifier.load()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

while True:
Beispiel #4
0
# Load dataset based on database name
logger.info("Downloading {} Keras dataset".format(options.db))
module = importlib.import_module('keras.datasets.' + options.db)
(X_train, y_train), (X_test, y_test) = module.load_data()
if options.db == "cifar10" and (backend() == "tensorflow" and options.method == "CNN") or options.features == "BOW":
    X_train = np.transpose(X_train, [0, 2, 3, 1])
    X_test = np.transpose(X_test, [0, 2, 3, 1])
if options.db == "mnist" and options.method == "CNN":
    X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
    X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
    X_train = np.transpose(X_train, [0, 3, 1, 2])
    X_test = np.transpose(X_test, [0, 3, 1, 2])

# Create classifier
if options.method == "SVM":
    clf = SVM()
elif options.method == "RandomForest":
    clf = RandomForest()
elif options.method == "MLP":
    clf = MLPNet(input_shape=[reduce(lambda x, y: x * y, X_train.shape[1:], 1)], output_size=10)
elif options.method == "CNN":
    clf = CNN(input_shape=X_train.shape[1:], output_size=10)
else:
    assert False, "Unavailable model"

# Shrink training set size if needed
X_train = X_train[0:round(X_train.shape[0] * options.train_set_prop)]
y_train = y_train[0:round(y_train.shape[0] * options.train_set_prop)]
X_test = X_test[0:round(X_test.shape[0] * options.test_set_prop)]
y_test = y_test[0:round(y_test.shape[0] * options.test_set_prop)]
Beispiel #5
0
def classifier_train(classifier, kernel,  x_k, y_k):
    # n_obs, n_features = x_k.shape[0], x_k.shape[1]
    classifier = SVM(kernel, C=4, gamma=4)
    classifier.fit(x_k, y_k)
    # classifier.opt(x_k, y_k)
    return classifier
Beispiel #6
0
elif args.kodama:
    receiver = LoadmatKodama(args.subject,
                             args.session,
                             "predict",
                             folder=args.kodama,
                             repetition_num=repetition_num)
else:
    receiver = Loadmat(args.subject,
                       args.session,
                       "predict",
                       average=args.average,
                       filename=args.filename,
                       matfile=args.matfile)

if args.method == "rbf":
    classifier = SVM(name=args.modelname, decimate=args.decimate)
elif args.method == "libsvm":
    classifier = LibSVM(name=args.modelname)
elif args.method == "linear" or args.method == "l":
    classifier = LinearSVM(name=args.modelname, decimate=args.decimate)
elif args.method == "swlinearsvm":
    classifier = StepwiseLinearSVM(name=args.modelname, decimate=args.decimate)
elif args.method == "lda":
    classifier = LDA(name=args.modelname)
elif args.method == "swlda":
    classifier = SWLDA(name=args.modelname, decimate=args.decimate)

classifier.load()

while 1:
    for _ in range(problem_num):
def main():
    while True:
        print "Select dataset by entering the number: "
        print "1. White wine"
        print "2. WaveForm"
        print "3. Wall-Following robot navigation"
        dataset_number = raw_input()
        try:
            dataset_number = int(dataset_number)
            if dataset_number > 0 and dataset_number < 4:
                break
            print "number out of range"
        except ValueError:
            print "not a valid number"

    features, labels = load_dataset(
        dataset_number
    )  #dataset is a dictionary with features and labels as keys

    #splitting the data into training and testing sets
    features_train, features_test, labels_train, labels_test = train_test_split(
        features, labels, train_size=.70, random_state=42)

    #setting a seed to get similar output if there is randomization anywhere ahead in the code
    random.seed(42)

    #looping over different ratio of labelled data
    for labelled_data_percent in [.10, .20, .30, .40, .50, .60, .70, .80, .90]:
        print 'For ' + str(labelled_data_percent * 100) + '% of labelled data'
        #splitting into labeled and unlabelled sets
        features_train_labeled, features_train_unlabelled, labels_train_labeled, labels_train_unlabelled = train_test_split(
            features_train,
            labels_train,
            train_size=labelled_data_percent,
            random_state=42)

        #Supervised Algorithms

        #Artificial Neural Network
        #for ann we will use features_train_labeled, labels_train_labeled for traning and features_test, labels_test for testing
        print 'ANN'
        AnnSingleLayer(features_train_labeled, features_test,
                       labels_train_labeled, labels_test)
        print '\n\n'

        #Support Vector Machines
        print 'SVM'
        SVM(features_train_labeled, features_test, labels_train_labeled,
            labels_test)
        print '\n\n'

        #Logistic Regression
        print 'Logistic Regression'
        LogisticRegressionClassifier(features_train_labeled, features_test,
                                     labels_train_labeled, labels_test)
        print '\n\n'

        #Semi supervised algorithms

        #Semi Supervised Support Vector Machine
        #it will use the main train and test features and labels data to be sent to function and split since we need to ensure that there is some train data for each class in the test data. It is the requirement of the implementation used.
        print 'S3VM'
        qns3vm_main(features_train, features_test, labels_train, labels_test,
                    labelled_data_percent)
        print '\n\n'
Beispiel #8
0
erps = receiver.fetch()

if args.moving_average:
    erps = convert.erp.moving_average(args.moving_average)

if args.undersampling and args.method != "swlda":
    erps = convert.erp.undersampling(erps,
                                     block_num,
                                     method=args.undersampling_method,
                                     far=args.undersampling_far)

labels = sum([list(np.repeat(i, len(erps[i]))) for i in range(len(erps))], [])
erps = sum(erps, [])

if args.method == "rbf":
    classifier = SVM(name=args.modelname, decimate=args.decimate)
elif args.method == "linear" or args.method == "l":
    classifier = LinearSVM(name=args.modelname, decimate=args.decimate)
elif args.method == "swlinearsvm":
    classifier = StepwiseLinearSVM(name=args.modelname, decimate=args.decimate)
elif args.method == "libsvm":
    classifier = LibSVM(name=args.modelname)
elif args.method == "lda":
    classifier = LDA(name=args.modelname)
elif args.method == "swlda":
    classifier = SWLDA(name=args.modelname, decimate=args.decimate)

classifier.train(labels, erps)
receiver.save()