Exemple #1
0
        def RunQDAShogun(q):
            totalTimer = Timer()

            Log.Info("Loading dataset", self.verbose)
            try:
                # Load train and test dataset.
                trainData = np.genfromtxt(self.dataset[0], delimiter=',')
                trainFeat = modshogun.RealFeatures(trainData[:, :-1].T)

                if len(self.dataset) == 2:
                    testSet = np.genfromtxt(self.dataset[1], delimiter=',')
                    testFeat = modshogun.RealFeatures(testData.T)

                # Labels are the last row of the training set.
                labels = modshogun.MulticlassLabels(
                    trainData[:, (trainData.shape[1] - 1)])

                with totalTimer:

                    model = modshogun.QDA(trainFeat, labels)
                    model.train()
                    if len(self.dataset) == 2:
                        model.apply(testFeat).get_labels()
            except Exception as e:
                q.put(-1)
                return -1

            time = totalTimer.ElapsedTime()
            q.put(time)
            return time
Exemple #2
0
        def RunQDAShogun():
            totalTimer = Timer()

            Log.Info("Loading dataset", self.verbose)
            try:
                # Load train and test dataset.
                trainData = np.genfromtxt(self.dataset[0], delimiter=',')
                trainFeat = modshogun.RealFeatures(trainData[:, :-1].T)

                if len(self.dataset) == 2:
                    testSet = np.genfromtxt(self.dataset[1], delimiter=',')
                    testFeat = modshogun.RealFeatures(testData.T)

                if len(options) > 0:
                    Log.Fatal("Unknown parameters: " + str(options))
                    raise Exception("unknown parameters")

                # Labels are the last row of the training set.
                labels = modshogun.MulticlassLabels(
                    trainData[:, (trainData.shape[1] - 1)])

                with totalTimer:

                    model = modshogun.QDA(trainFeat, labels)
                    model.train()
                    if len(self.dataset) == 2:
                        model.apply_multiclass(testFeat).get_labels()
            except Exception as e:
                return -1

            return totalTimer.ElapsedTime()
Exemple #3
0
    def RunMetrics(self, options):
        Log.Info("Perform QDA.", self.verbose)

        results = self.QDAShogun(options)
        if results < 0:
            return results

        metrics = {'Runtime': results}

        if len(self.dataset) >= 3:
            trainData, labels = SplitTrainData(self.dataset)
            testData = LoadDataset(self.dataset[1])
            truelabels = LoadDataset(self.dataset[2])

            model = modshogun.QDA(modshogun.RealFeatures(trainData.T),
                                  modshogun.MulticlassLabels(labels))
            model.train()
            predictions = model.apply(modshogun.RealFeatures(
                testData.T)).get_labels()

            confusionMatrix = Metrics.ConfusionMatrix(truelabels, predictions)
            metrics['ACC'] = Metrics.AverageAccuracy(confusionMatrix)
            metrics['MCC'] = Metrics.MCCMultiClass(confusionMatrix)
            metrics['Precision'] = Metrics.AvgPrecision(confusionMatrix)
            metrics['Recall'] = Metrics.AvgRecall(confusionMatrix)
            metrics['MSE'] = Metrics.SimpleMeanSquaredError(
                truelabels, predictions)

        return metrics
Exemple #4
0
  def RunMetrics(self, options):

    if len(self.dataset) >= 3:
     
      trainData, labels = SplitTrainData(self.dataset)
      
      testData = LoadDataset(self.dataset[1])
      truelabels = LoadDataset(self.dataset[2])

      model = modshogun.QDA(modshogun.RealFeatures(trainData.T),modshogun.MulticlassLabels(labels))
      model.train()
      predictions = model.apply(modshogun.RealFeatures(testData.T)).get_labels()

      # Datastructure to store the results.
      metrics = {}

      confusionMatrix = Metrics.ConfusionMatrix(truelabels, predictions)
      metrics['ACC'] = Metrics.AverageAccuracy(confusionMatrix)
      metrics['MCC'] = Metrics.MCCMultiClass(confusionMatrix)
      metrics['Precision'] = Metrics.AvgPrecision(confusionMatrix)
      metrics['Recall'] = Metrics.AvgRecall(confusionMatrix)
      metrics['MSE'] = Metrics.SimpleMeanSquaredError(truelabels, predictions)
      return metrics
    else:
      Log.Fatal("This method requires three datasets!")