def RunQDAShogun(q): totalTimer = Timer() Log.Info("Loading dataset", self.verbose) try: # Load train and test dataset. trainData = np.genfromtxt(self.dataset[0], delimiter=',') trainFeat = modshogun.RealFeatures(trainData[:, :-1].T) if len(self.dataset) == 2: testSet = np.genfromtxt(self.dataset[1], delimiter=',') testFeat = modshogun.RealFeatures(testData.T) # Labels are the last row of the training set. labels = modshogun.MulticlassLabels( trainData[:, (trainData.shape[1] - 1)]) with totalTimer: model = modshogun.QDA(trainFeat, labels) model.train() if len(self.dataset) == 2: model.apply(testFeat).get_labels() except Exception as e: q.put(-1) return -1 time = totalTimer.ElapsedTime() q.put(time) return time
def RunMetrics(self, options): Log.Info("Perform QDA.", self.verbose) results = self.QDAShogun(options) if results < 0: return results metrics = {'Runtime': results} if len(self.dataset) >= 3: trainData, labels = SplitTrainData(self.dataset) testData = LoadDataset(self.dataset[1]) truelabels = LoadDataset(self.dataset[2]) model = modshogun.QDA(modshogun.RealFeatures(trainData.T), modshogun.MulticlassLabels(labels)) model.train() predictions = model.apply(modshogun.RealFeatures( testData.T)).get_labels() confusionMatrix = Metrics.ConfusionMatrix(truelabels, predictions) metrics['ACC'] = Metrics.AverageAccuracy(confusionMatrix) metrics['MCC'] = Metrics.MCCMultiClass(confusionMatrix) metrics['Precision'] = Metrics.AvgPrecision(confusionMatrix) metrics['Recall'] = Metrics.AvgRecall(confusionMatrix) metrics['MSE'] = Metrics.SimpleMeanSquaredError( truelabels, predictions) return metrics
def RunMetrics(self, options): if len(self.dataset) >= 3: trainData, labels = SplitTrainData(self.dataset) testData = LoadDataset(self.dataset[1]) truelabels = LoadDataset(self.dataset[2]) model = modshogun.QDA(modshogun.RealFeatures(trainData.T),modshogun.MulticlassLabels(labels)) model.train() predictions = model.apply(modshogun.RealFeatures(testData.T)).get_labels() # Datastructure to store the results. metrics = {} confusionMatrix = Metrics.ConfusionMatrix(truelabels, predictions) metrics['ACC'] = Metrics.AverageAccuracy(confusionMatrix) metrics['MCC'] = Metrics.MCCMultiClass(confusionMatrix) metrics['Precision'] = Metrics.AvgPrecision(confusionMatrix) metrics['Recall'] = Metrics.AvgRecall(confusionMatrix) metrics['MSE'] = Metrics.SimpleMeanSquaredError(truelabels, predictions) return metrics else: Log.Fatal("This method requires three datasets!")
def RunQDAShogun(): totalTimer = Timer() Log.Info("Loading dataset", self.verbose) try: # Load train and test dataset. trainData = np.genfromtxt(self.dataset[0], delimiter=',') trainFeat = modshogun.RealFeatures(trainData[:, :-1].T) if len(self.dataset) == 2: testSet = np.genfromtxt(self.dataset[1], delimiter=',') testFeat = modshogun.RealFeatures(testData.T) if len(options) > 0: Log.Fatal("Unknown parameters: " + str(options)) raise Exception("unknown parameters") # Labels are the last row of the training set. labels = modshogun.MulticlassLabels( trainData[:, (trainData.shape[1] - 1)]) with totalTimer: model = modshogun.QDA(trainFeat, labels) model.train() if len(self.dataset) == 2: model.apply_multiclass(testFeat).get_labels() except Exception as e: return -1 return totalTimer.ElapsedTime()
def get_multi_features(request): try: point_set_raw = json.loads(request.POST['point_set']) except: raise ValueError("cannot read click pts") x = [] y = [] labels = [] for pt in point_set_raw: x.append(float(pt['x'])) y.append(float(pt['y'])) labels.append(float(pt['label'])) n = len(set(labels)) if not n: raise ValueError("0-labels") elif n == 1: raise ValueError("1-class-labels") else: features = np.array([x, y]) features = sg.RealFeatures(features) labels = sg.MulticlassLabels(np.array(labels)) return features, labels