def activateLayersWithUnrolling(self, _normed): code = "" for i in range(len(self.layers)): vIn = "z" + str(i - 1) vOut = "z" + str(i) if i == 0: vIn = "in" if i == len(self.layers) - 1: vOut = "out" code += CodeGenerator().unrollMultiplication( vOut, vIn, self.layers[i][0]) if i < len(self.layers) - 1: code += "\tactivate(" + vOut + ", th" + str(i) + ", " + str( len(self.layers[i][0][0])) + ");\n" nOut = str(len(self.outputLayer)) if self.modelType == Type.CLASSIFICATION: code += "\tactivate(out, th_out, " + nOut + ");\n\n" code += "\tunsigned int index = findMax(out, " + nOut + ");\n" code += "\treturn classes[index];\n" else: code += "\n\treturn (out[0] + 1 + th_out[0]) * " + CodeGenerator( ).float2String( _normed[0][1]) + " / 2 + " + CodeGenerator().float2String( _normed[0][0]) + ";\n" code += "\n" return code
def generateRegressionCode(self, _attributes): code = "" for g in self.trees: root = g.root code += g.generateGraphCode() + "\n\n" # mean code += CodeGenerator().generateFunctionHeader( "predict", CSV().createAttributeDict(_attributes, self.discretization)) + "\n{\n" if self.discretization: code += "\tint sum = 0;\n" else: code += "\tfloat sum = 0;\n" for i in range(0, len(self.trees)): code += "\tsum += " + CodeGenerator().generateFunctionCall( "tree_" + str(i), CSV().createAttributeDict(_attributes[1:], self.discretization)) + ";\n" if self.discretization: code += "\n\treturn sum / " + str(len(self.trees)) + ";\n" # TODO: this would be required to undo the discretization, however we skip it here as we want a fully discretized model - it is assumed to dediscretization is done at the application level #code += "\n\treturn (sum / " + str(len(self.trees)) + ") * " + str((self.discretization.widths[0])) + " + " + str((self.discretization.min[0])) + ";\n" else: code += "\n\treturn sum / " + str(len(self.trees)) + ".0;\n" code += "}" return code
def mult(self): code = "void mult(float *_in, const float *_matrix, float *_out, unsigned int _m, unsigned int _n)\n{\n" code += CodeGenerator().generateForLoop(1, "unsigned int", "x", 0, "_n") + "\t{\n" code += "\t\t_out[x] = 0;\n" code += CodeGenerator().generateForLoop(2, "unsigned int", "y", 0, "_m") code += "\t\t\t_out[x] += _in[y] * _matrix[x + y * _n];\n" code += "\t}\n" code += "}" return code
def crossValidation(self, _model, _training, _attributes, _folder, _discretization=None, **kwargs): folds = kwargs.get('xlabel', 10) self.discretization = _discretization if _attributes[0].type=="NUMERIC": self.modelType=Type.REGRESSION else: self.modelType=Type.CLASSIFICATION R = ResultMatrix() C = ConfusionMatrix(_attributes[0].type.strip("{").strip("}").split(",")) fileId = FileHandler().getFileName(_training).replace(".csv", "") for i in range(folds): foldId = fileId + "_" + str(i) + ".csv" training = _folder + "training_" + foldId test = _folder + "test_" + foldId # export the model code codeFile = _folder + "code.cpp" CodeGenerator().export(training, _model, codeFile, self.discretization) # apply the validation if self.modelType==Type.REGRESSION: keys, results, conf = self.regression(codeFile, _attributes, test, _folder + "predictions_" + str(i) + ".csv") R.add(keys, results) elif self.modelType==Type.CLASSIFICATION: keys, results, conf = self.classification(codeFile, _attributes, test, _folder + "predictions_" + str(i) + ".csv") R.add(keys, results) C.merge(conf) return R, C
def activate(self): code = "void activate(float *_values, const float *_thresholds, unsigned int _size)\n{\n" code += CodeGenerator().generateForLoop(1, "unsigned int", "i", 0, "_size") code += "\t\t_values[i] = sigmoid(_values[i] + _thresholds[i]);\n" code += "}" return code
def generateRegressionCode(self, _attributes, _yMin, _yRange): code = "" # compute the weight vectors for i in range(0, len(self.model.weights)): w = self.model.getWeights(self.model.weights[i], self.model.features) code += CodeGenerator().generateArray("const float", "w" + str(i), w) + "\n" code += "\n" + self.generateSVMCode() + "\n\n" code += CodeGenerator().generateFunctionHeader("predict", CSV().createAttributeDict(_attributes)) + "\n{\n" code += "\t" + CodeGenerator().generateArray("float", "v", self.model.normedValues) + "\n\n" code += "\tfloat result = svm(v, w0, " + self.model.offsets[0] + ", " + str(len(self.model.normedValues)) + ");\n" # denormalize the label code += "\treturn result * " + str(_yRange) + " " + self.add(_yMin) + ";\n" code += "}\n\n" return code
def generateSVMCode(self): code = "float svm(float *_values, const float *_weights, float _offset, unsigned int _size)" + "\n{\n" code += "\tfloat sum = 0.0;\n" code += CodeGenerator().generateForLoop(1, "unsigned int", "i", 0, "_size") code += "\t\tsum += _values[i]*_weights[i];\n" code += "\treturn sum + _offset;" code += "\n}" return code
def generateClassificationCode(self, _attributes, _classes): code = "" code += CodeGenerator().generateArray("const char*", "classes", ["\"" + x + "\"" for x in _classes]) + "\n" # compute the weight vectors for i in range(0, len(self.model.weights)): w = self.model.getWeights(self.model.weights[i], self.model.features) code += CodeGenerator().generateArray("const float", "w" + str(i), w) + "\n" code += "\n" + self.generateSVMCode() + "\n\n" code += CodeGenerator().findMax("int") + "\n\n" code += CodeGenerator().generateFunctionHeader("predict", CSV().createAttributeDict(_attributes)) + "\n{\n" # compute the value normalizations code += "\t" + CodeGenerator().generateArray("float", "v", self.model.normedValues) + "\n\n" # one-vs-one code += "\t" + CodeGenerator().generateArray("int", "wins", ["0"] * len(_classes)) + "\n" for i in range(0, len(self.model.weights)): c0 = str(_classes.index(self.model.classes[i][0])) c1 = str(_classes.index(self.model.classes[i][1])) code += "\tsvm(v, w" + str(i) + ", " + str(self.model.offsets[i]) + ", " + str(len(self.model.features)) + ")<0 ? wins[" + c0 + "]++ : wins[" + c1 + "]++;\n" code += "\n\tunsigned int index = findMax(wins, " + str(len(_classes)) + ");\n\n" code += "\treturn classes[index];\n" code += "}\n\n" return code
def generateClassificationCode(self, _attributes, _classes): code = "" classes = ["\"" + x + "\"" for x in _classes] code += CodeGenerator().generateArray("const char*", "classes", classes) + "\n\n" # for g in self.trees: root = g.root treeCode = g.generateGraphCode() + "\n\n" for i in range(0, len(classes)): key = classes[i] treeCode = treeCode.replace("const char* tree", "int tree") treeCode = treeCode.replace("return " + key, "return " + str(i)) code += treeCode code += CodeGenerator().findMax("int") + "\n\n" # majority decision code += CodeGenerator().generateFunctionHeader( "predict", CSV().createAttributeDict(_attributes, self.discretization)) + "\n{\n" code += "\t" + CodeGenerator().generateArray( "int", "wins", ["0"] * len(_classes)) + "\n" for i in range(0, len(self.trees)): code += "\twins[" + CodeGenerator().generateFunctionCall( "tree_" + str(i), CSV().createAttributeDict(_attributes[1:], self.discretization)) + "]++;\n" code += "\tunsigned int index = findMax(wins, " + str( len(_classes)) + ");\n\n" code += "\treturn classes[index];\n" code += "}" return code
def generateDummyMain(self, _callType, _numAttributes): code = "\nvoid main(void)\n{\n" code += "\tWDTCTL = WDTPW + WDTHOLD;\n" code += "\tDCOCTL = 0;\n" code += "\tBCSCTL1 = CALBC1_16MHZ;\n" code += "\tDCOCTL = CALDCO_16MHZ;\n\n" code += "\t" + _callType + " r = " + CodeGenerator( ).generateFunctionCall("predict", ["1.2"] * _numAttributes) + ";\n" code += "\tprintf(\"%i\\n\", 123);\n" code += "\n}\n" return code
def computeMemorySize(_training, _model, _resultFolder, _discretization): csv = CSV(_training) lAtt = len(csv.findAttributes(0)) - 1 codeFile = "example_rf_sweet_spot.cpp" CodeGenerator().export(_training, _model, codeFile, _discretization) mem = [] platforms = [Arduino(), MSP430(), ESP32()] for platform in platforms: mem.append(platform.run(codeFile, "unsigned char", lAtt)) return mem
def activateLayers(self, _header, _normed): code = "" lastLayer = "in" for i in range(0, len(self.L)): layer = self.L[i] m = 0 if i == 0: m = len(_header) else: m = len(self.L[i - 1]) n = len(layer) code += "\tfloat z" + str(i) + "[" + str(n) + "] = {0};\n" code += "\tmult(" + lastLayer + ", &w" + str( i) + "[0][0], z" + str(i) + ", " + str(m) + ", " + str( n) + ");\n" code += "\tactivate(z" + str(i) + ", th" + str(i) + ", " + str( n) + ");\n\n" lastLayer = "z" + str(i) # output layer m = len(self.layers[-1][0]) nOut = str(len(self.outputLayer)) code += "\tfloat out[" + nOut + "] = {0};\n" code += "\tmult(" + lastLayer + ", &w_out[0][0], out, " + str( m) + ", " + nOut + ");\n" if self.modelType == Type.CLASSIFICATION: code += "\tactivate(out, th_out, " + nOut + ");\n\n" code += "\tunsigned int index = findMax(out, " + nOut + ");\n\n" code += "\treturn classes[index];\n" else: code += "\n\treturn (out[0] + 1 + th_out[0]) * " + CodeGenerator( ).float2String( _normed[0][1]) + " / 2.0 + " + CodeGenerator().float2String( _normed[0][0]) + ";\n" return code
def computeMemorySize(_training, _model, _regression): csv = CSV(_training) lAtt = len(csv.findAttributes(0)) - 1 codeFile = "example_rf_sweet_spot.cpp" CodeGenerator().export(_training, _model, codeFile) if _regression == True: resultType = "float" else: resultType = "const char*" mem = [] platforms = [Arduino(), MSP430(), ESP32()] for platform in platforms: mem.append(platform.run(codeFile, resultType, lAtt)) return mem
from data.CSV import CSV # define the training data set and set up the model training = "../examples/mnoA.csv" training = "../examples/vehicleClassification.csv" csv = CSV(training) attributes = csv.findAttributes(0) d = csv.discretizeData() model = RandomForest() model.config.trees = 10 model.config.depth = 5 # perform a 10-fold cross validation e = Experiment(training, "example_rf_disc") e.classification([model], 10) # export the C++ code CodeGenerator().export(training, model, e.path("rf.cpp"), d) # ce = CodeEvaluator() R, C = ce.crossValidation(model, training, attributes, e.tmp(), d) R.printAggregated() # all results are written to results/example_rf_disc/
def generateCode(self, _file): csv = CSV(self.training) attributes = csv.findAttributes(0) normed = self.normalize(csv, attributes) resultType = "float" code = "#include <math.h>\n" if self.modelType == Type.CLASSIFICATION: code += "" classes = attributes[0].type.strip("{").strip("}").split(",") classes = ["\"" + key + "\"" for key in classes] code += CodeGenerator().generateArray("const char*", "classes", classes) + "\n\n" resultType = "const char*" else: code += "\n" # weight matrices if not self.useUnrolling: for i in range(0, len(self.layers)): W = self.layers[i][0] name = "w" + str(i) if i == len(self.layers) - 1: name = "w_out" code += "const " + CodeGenerator().generateMatrix( "float", name, W) + "\n" code += "\n" # threshold vectors for i in range(0, len(self.layers)): matrix = self.layers[i] T = self.layers[i][1] name = "th" + str(i) if i == len(self.layers) - 1: name = "th_out" code += "const " + CodeGenerator().generateArray("float", name, T) + "\n" code += "\n" # generate the required ann-specific methods code += self.sigmoid() + "\n\n" code += self.activate() + "\n\n" if not self.useUnrolling: code += self.mult() + "\n\n" if self.modelType == Type.CLASSIFICATION: code += CodeGenerator().findMax("float") + "\n\n" # generate the callable method header = ["_" + key for key in self.inputLayer] code += resultType + " predict(" + ", ".join( ["float " + x for x in header]) + ")\n{\n" # input layer for i in range(0, len(header)): header[i] = self.norm(header[i], normed[i + 1][0], normed[i + 1][1]) code += "\t" + CodeGenerator().generateArray("float", "in", header) + "\n\n" # activate the layers if self.useUnrolling: code += self.activateLayersWithUnrolling(normed) else: code += self.activateLayers(header, normed) code += "}\n" #code += CodeGenerator().generateDummyMain(len(attributes)-1) FileHandler().write(code, _file)
from models.ann.ANN import ANN from experiment.Experiment import Experiment from code.CodeGenerator import CodeGenerator from data.FileHandler import FileHandler from data.CSV import CSV # define the training data set and set up the model training = "../examples/mnoA.csv" model = ANN() model.hiddenLayers = [10, 10] # perform a 10-fold cross validation e = Experiment(training, "example_ann_visualization") e.regression([model], 10) # export the C++ code CodeGenerator().export(training, model, e.path("ann.cpp")) model.exportEps(e.path("ann_vis.eps"))
from models.m5.M5 import M5 from experiment.Experiment import Experiment from code.CodeGenerator import CodeGenerator from data.CSV import CSV from code.Arduino import Arduino # define the training data set and set up the model training = "../examples/mnoA.csv" model = M5() # perform a 10-fold cross validation e = Experiment(training, "example_arduino") e.regression([model], 10) # export the raw C++ code codeFile = e.path("arduino.cpp") CodeGenerator().export(training, model, codeFile) # create a dummy Arduino project which executes the model csv = CSV() csv.load(training) attributes = csv.findAttributes(0) mem = Arduino().run(codeFile, "float", len(attributes) - 1) print(mem) # all results are written to results/example_arduino/
def exportCode(_args, _resultFolder, _training, _models): M = _args.models.split(",") for i in range(len(M)): model = _models[i] CodeGenerator().export(_training, model, _resultFolder + M[i] + ".cpp")