Exemple #1
0
 def port(self, classname=None, classmap=None, **kwargs):
     """
     Port to plain C++
     :param classname: str name of the ported class
     :param classmap: dict classmap in the format {class_idx: class_name}
     """
     return port(self, classname=classname, classmap=classmap, **kwargs)
Exemple #2
0
    def port(self, **kwargs):
        """
        Port to C++
        :return: str C++ code
        """
        classname = kwargs.get('classname', 'CascadingClassifier')
        simplex_classname = '%s_SimplexClassifier' % classname
        complex_classname = '%s_ComplexClassifier' % classname

        return jinja(
            'cascading/CascadingClassifier.jinja', {
                'classname': classname,
                'classmap': self.classmap,
                'simplex_classname': simplex_classname,
                'complex_classname': complex_classname,
                'simplex_clf': port(self.simplex_clf,
                                    classname=simplex_classname),
                'complex_clf': port(self.complex_clf,
                                    classname=complex_classname),
                'depth': self.depth,
            })
Exemple #3
0
    def set_project(self, project):
        """
        Export class to Arduino sketch
        """
        ported_clf = port(self.clf,
                          classname='Classifier',
                          classmap=self.dataset.classmap,
                          pretty=True)
        self.config.update(ported_clf=ported_clf,
                           num_features=len(self.dataset.df.columns))

        contents = jinja('third_party/snoopy/snoopy.jinja', self.config)
        project.files.add('ML.h', contents=contents, exists_ok=True)
Exemple #4
0
    def benchmark(self, clf, x=None, n_features=1):
        """
        Run the benchmark for a given classifier
        :param clf:
        :param x:
        :param n_features:
        :return:
        """
        if x is None:
            x = np.random.random(n_features)

        with self.project.tmp_project() as tmp:
            sketch = jinja('metrics/Resources.jinja', {'x': x})
            ported = port(clf, classname='Classifier')
            tmp.files.add('%s.ino' % tmp.name, contents=sketch, exists_ok=True)
            tmp.files.add('Classifier.h', contents=ported, exists_ok=True)
            return self._benchmark_current(tmp)
Exemple #5
0
    def benchmark(self, clf, X_test=None, y_test=None, n_features=1, n_samples=20, repeat=1, upload_options={}):
        """
        Benchmark on-line inference time for a classifier
        :param clf:
        :param X_test:
        :param y_test:
        :param n_features:
        :param n_samples:
        :param repeat:
        :param compile:
        :param upload_options:
        :return:
        """
        if X_test is None or y_test is None:
            assert n_features > 0, 'n_features MUST be positive when X_test is not set'
            assert n_samples > 0, 'n_samples MUST be positive when X_test is not set'
            X_test = np.random.random((n_samples, n_features))
            y_test = np.random.random_integers(0, 1, n_samples)

        with self.project.tmp_project() as tmp:
            # upload benchmarking sketch
            sketch = jinja('metrics/Runtime.jinja', {
                'X_test': X_test,
                'y_test': y_test,
                'repeat': repeat
            })
            ported = port(clf, classname='Classifier')

            tmp.files.add(tmp.ino_name, contents=sketch, exists_ok=True)
            tmp.files.add('Classifier.h', contents=ported, exists_ok=True)
            tmp.upload(**upload_options)

            # parse serial output
            # since we can miss the first response, try a few times
            for i in range(0, 3):
                response = tmp.serial.read_until('======', timeout=8)
                match = re.search(r'inference time = ([0-9.]+) micros[\s\S]+?Score = ([0-9.]+)', response)

                if match is not None:
                    return {
                        'inference_time': float(match.group(1)),
                        'online_accuracy': float(match.group(2))
                    }

        self.project.logger.error('Failed to parse response: %s' % response)
        raise BadBoardResponseError('Unexpected response during runtime inference time benchmark')
Exemple #6
0
from sklearn.svm import SVC
import numpy as np
from glob import glob
from os.path import basename
from micromlgen import port


def load_features(folder):
    dataset = None
    classmap = {}
    for class_idx, filename in enumerate(glob('%s/*.csv' % folder)):
        class_name = basename(filename)[:-4]
        classmap[class_idx] = class_name
        samples = np.loadtxt(filename, dtype=float, delimiter=',')
        labels = np.ones((len(samples), 1)) * class_idx
        samples = np.hstack((samples, labels))
        dataset = samples if dataset is None else np.vstack((dataset, samples))
    return dataset, classmap


features, classmap = load_features(
    r'C:\Users\chait\Desktop\My Folder\Electronics Projects\Object Detection Using ESP32 CAM\samples_csv'
)
X, y = features[:, :-1], features[:, -1]
classifier = SVC(kernel='rbf', gamma=0.001).fit(X, y)
c_code = port(classifier)
print(c_code)
    # Create an array of labels containing one row for each row in the main dataset
    # with the value for each row the label index
    labels = np.full((len(file_contents), 1), label_idx)
    dataset_labels = labels if dataset_labels is None else np.vstack(
        (dataset_labels, labels))

    # Increment the label index for the next file
    label_idx = label_idx + 1

# Split the data into a training and testing set to test the accuracy of the model
# If you are happy with the accuracy of the model, you can remove this split
dataset_train, dataset_test, label_train, label_test = train_test_split(
    dataset, dataset_labels.ravel(), test_size=0.2)

# Build the support vector classification for our data and train the model
svc = SVC(kernel='poly', degree=2, gamma=0.1, C=100)
svc.fit(dataset_train, label_train)

# Test the accuracy of the model
print('Accuracy:', svc.score(dataset_test, label_test))
print()

# Convert the model to C code and write to the classifier.h file
c_code = port(svc, classmap=label_map)
with open('classifier.h', 'w') as f:
    f.write(c_code)
    f.close()

print('Classifier written to classifier.h.')
Exemple #8
0
import numpy as np
from sklearn import svm
from micromlgen import port, port_testset


xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))

# fit the model
clf = svm.OneClassSVM(kernel="rbf", nu=0.5, gamma=0.1)
clf.fit(X_train)
print(port(clf))
print(port_testset(X_test, np.where(clf.predict(X_test) > 0, 1, 0)))
print(clf.predict(X_test))
Exemple #9
0
from sklearn.svm import SVC
from micromlgen import port
from sklearn.model_selection import train_test_split


def load_features(folder):
    dataset = None
    classmap = {}
    for class_idx, filename in enumerate(glob('%s/*.csv' % folder)):
        class_name = basename(filename)[:-4]
        classmap[class_idx] = class_name
        samples = np.loadtxt(filename, dtype=float, delimiter=',')
        labels = np.ones((len(samples), 1)) * class_idx
        samples = np.hstack((samples, labels))
        dataset = samples if dataset is None else np.vstack((dataset, samples))
    return dataset, classmap


np.random.seed(0)
dataset, classmap = load_features('data')
X, y = dataset[:, :-1], dataset[:, -1]
# this line is for testing your accuracy only: once you're satisfied with the results, set test_size to 1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

clf = SVC(kernel='poly', degree=2, gamma=0.1, C=100)
clf.fit(X_train, y_train)

print('Accuracy', clf.score(X_test, y_test))
print('Exported classifier to plain C')
print(port(clf, classmap=classmap))
from micromlgen import port
from sklearn.svm import SVC
from sklearn.datasets import load_iris
import numpy as np

iris = load_iris()
X = iris.data
y = iris.target
clf = SVC(kernel='linear').fit(X, y)
x = port(clf)
print(x)
text_file = open("sample.h", "w")
n = text_file.write(x)
text_file.close()
    dataset = None
    classmap = {}
    for class_idx, filename in enumerate(files):
        class_name = basename(filename)[:-4]
        classmap[class_idx] = class_name
        samples = np.genfromtxt(filename, delimiter=',', filling_values=0.0)
        labels = np.ones((len(samples), 1)) * class_idx
        samples = np.hstack((samples, labels))
        dataset = samples if dataset is None else np.vstack((dataset, samples))

    return dataset, classmap


def get_classifier(features):
    X, y = features[:, :-1], features[:, -1]

    return RandomForestClassifier(20, max_depth=10).fit(X, y)


if __name__ == '__main__':
    features, classmap = load_features(fileNames)
    classifier = get_classifier(features)
    c_code = port(classifier, classmap=classmap)

    print("Writing to a file")

    modelFile = open("model.h", "w")
    modelFile.write(c_code)
    modelFile.close()

    print("Model file created")
Exemple #12
0
 def port(self, **kwargs):
     """
     Port classifier to C++
     """
     return self.clf.port(
         **kwargs) if self.is_tf() else micromlgen.port(self.clf, **kwargs)
Exemple #13
0
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
from micromlgen import port

if __name__ == '__main__':
    X = load_iris().data
    pca = PCA(n_components=2, whiten=False).fit(X)

    print(port(pca))
Exemple #14
0
    def start_classi(self):
        path_test = self.ui.ad_test.text()
        path_train = self.ui.ad_train.text()
        path_val = self.ui.ad_val.text()

        #splite of the matices
        df0 = np.loadtxt(path_test, delimiter=",")
        df1 = np.loadtxt(path_train, delimiter=",")
        df2 = np.loadtxt(path_val, delimiter=",")
        # variables
        size0 = df0.shape
        input0 = size0[1] - 1
        size1 = df1.shape
        input1 = size1[1] - 1
        size2 = df2.shape
        input2 = size2[1] - 1

        # split input and output
        X0 = df1[:, 0:input0]
        Y0 = df1[:, input0]
        X1 = df1[:, 0:input1]
        Y1 = df1[:, input1]
        X2 = df2[:, 0:input2]
        Y2 = df2[:, input2]

        if self.ui.check_train.isChecked():
            file = QFileDialog.getOpenFileName(
                self,
                "Modell einlesen",
                "",
                "All Files (*);;Joblib File (*.joblib)",
            )
            model = load(file[0])
        else:

            if path_train == "" or path_val == "" or path_test == "":
                self.ui.msg_text.setText("path not complett")
            elif self.ui.check_svm.isChecked():

                #svm
                model = svm.SVC(kernel='linear',
                                decision_function_shape='ovr',
                                C=0.01,
                                gamma=0.001)
                model.fit(X1, Y1)

                if self.ui.che_mue.isChecked():
                    porter = port(model)
                    file_svm = QFileDialog.getSaveFileName(
                        self, "Save Objects", "",
                        "All Files (*);;Lib File (*.h)")

                    f = open(file_svm[0], 'w+')
                    f.write(porter)
                    f.close()

                # TODO: FILE save System SVM (done)

            elif self.ui.check_neighbor.isChecked():

                #Decision Tree
                model = DecisionTreeClassifier(random_state=0)
                model.fit(X1, Y1)

                if self.ui.che_mue.isChecked():
                    cmodel = emlearn.convert(model)
                    file_svm = QFileDialog.getSaveFileName(
                        self, "Save Objects", "",
                        "All Files (*);;Lib File (*.h)")
                    cmodel.save(file=file_svm[0])

            elif self.ui.check_neuro.isChecked():
                # creating model
                model = MLPClassifier(solver='lbfgs',
                                      alpha=1e-5,
                                      hidden_layer_sizes=(10, 2),
                                      random_state=1)
                model.fit(X1, Y1)

                if self.ui.che_mue.isChecked():
                    cmodel = emlearn.convert(model)
                    file_svm = QFileDialog.getSaveFileName(
                        self, "Save Objects", "",
                        "All Files (*);;Lib File (*.h)")
                    cmodel.save(file=file_svm[0])

            elif self.ui.check_bayes.isChecked():

                # bayes classifier
                model = GaussianNB()
                model.fit(X1, Y1)

                if self.ui.che_mue.isChecked():
                    cmodel = emlearn.convert(model)
                    file_svm = QFileDialog.getSaveFileName(
                        self, "Save Objects", "",
                        "All Files (*);;Lib File (*.h)")
                    cmodel.save(file=file_svm[0])

            else:
                self.ui.msg_text.setText("no choice of classifier")


#

        if self.ui.check_train_3.isChecked():
            #file creation
            path_classi, _ = QFileDialog.getSaveFileName(
                self, "QFileDialog.getSaveFileName()", "",
                "All Files (*);;joblib Files (*.joblib)")
            dump(model, path_classi)

        predicted = model.predict(X0)

        score = model.score(X2, Y2)
from glob import glob
from os.path import basename
from PIL import Image
import micromlgen


def load_features(folder):
    dataset = None
    classmap = {}
    for class_idx, filename in enumerate(glob('%s/*.csv' % folder)):
        class_name = basename(filename)[:-4]
        classmap[class_idx] = class_name
        samples = np.loadtxt(filename, dtype=float, delimiter=',')
        labels = np.ones((len(samples), 1)) * class_idx
        samples = np.hstack((samples, labels))
        dataset = samples if dataset is None else np.vstack((dataset, samples))
    return dataset, classmap


from sklearn.svm import SVC

features, classmap = load_features('../input/dataset')
X, y = features[:, :-1], features[:, -1]
classifier = SVC(kernel='rbf', gamma=0.001).fit(X, y)

c_code = micromlgen.port(classifier)

fs = open("model.txt", "w")
fs.write(c_code)
fs.close()
Exemple #16
0
import os
from micromlgen import port
from sklearn.svm import SVC
from sklearn.datasets import load_iris


def mkdir(dir, k):
    if not os.path.exists("./" + dir + str(k)):
        os.mkdir("./" + dir + str(k))
        return dir + str(k)
    else:
        mkdir(dir, k + 1)


def write_file(dir, model):
    f = open("./" + dir + "/model.h", 'w')
    f.write(model)
    f.close()


if __name__ == '__main__':
    iris = load_iris()
    x = iris.data
    y = iris.target
    clf = SVC(kernel='linear', gamma=0.001).fit(x, y)
    model = port(clf)

    dir = mkdir("model", 0)
    write_file(dir, model)