def confidence(self):
     # a -> b
     #confidence = support(a,b)/ support(a)
     attributes = self.right + self.left
     num = support(attributes)
     den = support(self.left)
     conf = num / den
     return conf * 100
Exemple #2
0
	def _libInit (self):
		try:
			# Initialize the main plugin engine
			self.plug = plug (self)
			self.update = update (self)
			self.ui = ui (self)
			self.support = support (self)
		
			self.logger.threaddebug("Dynamic libraries complete")
			
		except Exception as e:
			self.logger.error (ext.getException(e))	
Exemple #3
0
	def _libInit (self):
		try:
			# Initialize the main plugin engine
			self.plug = plug (self)
			self.update = update (self)
			self.ui = ui (self)
			self.support = support (self)
		
			self.logger.threaddebug("Dynamic libraries complete")
			
		except Exception as e:
			self.logger.error (ext.getException(e))	
Exemple #4
0
def main():
    logging.basicConfig(filename="gfl.log",
                        level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s')

    stdscr = curses.initscr()
    curses.start_color()
    curses.noecho()
    curses.cbreak()
    curses.curs_set(0)

    maxY = stdscr.getmaxyx()[0]
    maxX = stdscr.getmaxyx()[1]

    logging.debug("Max Y, X: %s, %s" % (maxY, maxX))

    scenes = support(maxY, maxX)

    try:
        prefetchPhotos()
        if (scenes.isCompatible()):
            scenes.playMusic()
        else:
            proc = Process(target=scenes.playMusic, name="Audio Player")
            proc.start()
            logging.debug('Process %s with PID %s started' %
                          (proc.name, proc.pid))
        scenes.heartBeats(stdscr)
        scenes.scene1(stdscr)
        scenes.scene2(stdscr)
        scenes.scene3(stdscr)
        scenes.playSlideShow(prefetchArr, 0, 8, 3.7)
        scenes.blankScreen(stdscr, 1.06)
        scenes.scene4(stdscr)
        scenes.playSlideShow(prefetchArr, 9, 10, 2.8)
        scenes.playSlideShow(prefetchArr, 11, 25, 3.7)
        scenes.blankScreen(stdscr, 20)
        scenes.scene5(stdscr)
        endSession()

    except KeyboardInterrupt:
        logging.warn("Keyboard Interrupt Event Detected...shutting down")
        endSession()

    except Exception as e:
        logging.error("%s" % e)
        endSession()
    finally:
        if (scenes.isCompatible() == False):
            logging.warning("Killing Child Process %s with pid %s" %
                            (proc.name, proc.pid))
            proc.terminate()
Exemple #5
0
    def _libInit(self):
        try:
            # Initialize the main plugin engine
            self.plug = plug(self)
            #self.update = update (self) # Obsolete by Plugin Store November 2017
            self.ui = ui(self)
            self.support = support(self)
            self.jstash = jstash(self)

            self.logger.threaddebug("Dynamic libraries complete")

        except Exception as e:
            self.logger.error(ext.getException(e))
Exemple #6
0
	auth = lg_authority.AuthRoot()
	auth__doc = "The object that serves authentication pages"


	@cherrypy.expose
	def index(self):
		output = ""

		output += getIndexContent()

		output = getPage(output, '')

		return output 

if __name__ == '__main__':

	#cherrypy.config.update({'server.socket_port':index_port})
	cherrypy.config.update(cherry_settings)
	
	index = index()
	index.upload = upload.upload()
	index.manage = manage.manage()
	index.modify = modify.modify()
	index.download = download.download()
	index.learn = learn.learn()
	index.support = support.support()
	index.visualize = visualize.visualize()
	#index.dashboard = dashboard.dashboard()
	cherrypy.quickstart(index)

from copy import deepcopy

import numpy as np
import scipy as sp

import matplotlib.pyplot as plt
import matplotlib.offsetbox as offsetbox
from mpl_toolkits.mplot3d import Axes3D

from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale

import support

sup = support.support()

#----------------------------------------------------------------------
def printUsageAndExit(error):
	print "-" * 64
	print "ARGUMENT ERROR: " + str(error)
	print "Usage: cluster.py [dataFile] [options (optional)]"
	print "  Options:"
	print "    --k #             Sets the number of clusters (default = 5)"
	print "    --components #    Sets num_components for PCA (default = 3)"
	print "    --output [file]   Gives program a file to output clustered data"
	print "-" * 64
	sys.exit()
#----------------------------------------------------------------------

#----------------------------------------------------------------------
Exemple #8
0
    def run(self):
        startTime = time.time()

        #check arguments
        dataReader, errorMargin, mID, normalize, output, outFile, breakdown, crossvalNum, plot = self.checkArgs(
        )

        #initialize support routines
        sup = support.support()

        print "TRAINING FILE: " + str(sys.argv[1])
        print "Constructing data lists..."
        keys, data, outs, actuals, pops = sup.constructData(dataReader)
        t2 = time.time()
        print " -> Construction COMPLETE. " + str(t2 - startTime) + " seconds."
        print "      Number of complete vectors generated from data: " + str(
            len(data))

        if normalize:
            print "-" * 32
            print "Normalizing features..."
            data, maxs = sup.normalize_crossval(data)

            print "  -> Maximums ="
            print maxs

        print "-" * 32

        if plot:
            accTotal = []
            avgAccuracyTotal = []

            plt.figure(figsize=(8, 6), dpi=80)
            plt.title("Test Accuracy of KMeans within " + str(errorMargin) +
                      " Margin")
            plt.xlabel("Crossvalidation Fold #")
            plt.ylabel("Average Accuracy Per Full Pass")
            plt.xticks(
                np.linspace(0, crossvalNum, crossvalNum + 1, endpoint=True))

            colors = [
                "red", "green", "blue", "orange", "pink", "purple", "yellow",
                "gray", "black", "turqoise"
            ]

            for i in range(2, crossvalNum + 1):
                accuracy, avgAccuracy = self.crossVal(data, outs, pops,
                                                      actuals, errorMargin, i,
                                                      sup)
                avgAccuracyTotal.append(avgAccuracy)
                plt.plot(accuracy, color=colors[i - 2], label="k = " + str(i))

            plt.legend(loc=4)

            plt.figure(figsize=(8, 6), dpi=80)
            plt.title("Test Average Accuracy for All Folds at " +
                      str(errorMargin) + " Error Margin")
            plt.xlabel("--crossval # (k-2)")
            plt.ylabel("Average Accuracy Per Full Pass")
            plt.xticks(
                np.linspace(0, crossvalNum, crossvalNum + 1, endpoint=True))
            plt.plot(avgAccuracyTotal, color="green")

            plt.show()

        else:
            accuracy, avgAccuracy = self.crossVal(data, outs, pops, actuals,
                                                  errorMargin, crossvalNum,
                                                  sup)

            #print out test results before graphing
            print "CROSSVALIDATION BREAKDOWN"
            print "  ---------------------------------------------------------------------------"
            print "  | Average Accuracy of Crossvalidation =   " + str(
                avgAccuracy)
            print "  | KMeans, k =                             " + str(
                crossvalNum)
            print "  | Test Set Size =                   " + str(
                len(data) / int(crossvalNum)) + " datapoints"
            print "  ---------------------------------------------------------------------------"

            plt.figure(figsize=(8, 6), dpi=80)
            plt.title("Test Accuracy of KMeans within " + str(errorMargin) +
                      " Margin")
            plt.xlabel("Crossvalidation Fold #")
            plt.ylabel("Average Accuracy Per Full Pass")
            plt.xticks(
                np.linspace(0, len(accuracy), len(accuracy) + 1,
                            endpoint=True))
            plt.plot(accuracy, color="green")
            plt.show()
import sys, math, operator, time, csv
from copy import deepcopy

from sklearn import linear_model
from sklearn import preprocessing
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.offsetbox as offsetbox

import support

startTime = time.time()

#support object
sup = support.support()


#-----------------------------------------------------------------------------------------------------
def printUsageAndExit():
    print "Usage: plot.py [dataFile] [options (optional)]"
    print "  Options:"
    print "   --features #1 #2          Will be used as x-coord,y-coord (default: 2 highest weighted)"
    #print "   --test-all                Will generate a plot for ALL possible feature combos"
    #print "   --predictions [file]      Will take file as input for predictions"
    #print "       --with-predict-error  Will plot with different marker based on error"
    #print "       --with-predictions    Will plot each point with it's prediction from file"
    sys.exit()


#-----------------------------------------------------------------------------------------------------
Exemple #10
0
    def run(self):
        startTime = time.time()

        #check arguments
        dataReader, testReader, errorMargin, mID, normalize, output, outFile, breakdown = self.checkArgs(
        )

        #initialize support routines
        sup = support.support()

        print "TRAINING FILE: " + str(sys.argv[1])
        print "Constructing data lists..."
        keys, data, outs, actuals, pops = sup.constructData(dataReader)
        t2 = time.time()
        print " -> Construction COMPLETE. " + str(t2 - startTime) + " seconds."
        print "      Number of complete vectors generated from data: " + str(
            len(data))

        print "-" * 32

        print "TEST FILE: " + str(sys.argv[2])
        print "Constructing test lists..."
        testKeys, testData, testOuts, testActuals, testPops = sup.constructData(
            testReader)
        t3 = time.time()
        print " -> Construction COMPLETE. " + str(t3 - t2) + " seconds."
        print "      Number of complete vectors generated from data: " + str(
            len(testData))

        if normalize:
            print "-" * 32
            print "Normalizing features..."
            data, testData, maxs = sup.normalize(data, testData)

            print "  -> Maximums ="
            print maxs

        print "-" * 32

        #BAYES RIDGE
        if mID == 1:
            print "Fitting Bayesian Ridge model to " + str(
                len(data)) + " vectors:"
            print " -> Using " + str(len(data)) + " vectors."
            model = linear_model.BayesianRidge()
            dataMatrix = np.array(data)
            outsMatrix = np.array(outs)
            model.fit(dataMatrix, outsMatrix)
            t4 = time.time()
            print " -> Training COMPLETE. " + str(t4 - t3) + " seconds."
            print "      Weight vector:\n" + str(model.coef_)

        #LINEAR REGRESSION
        elif mID == 2:
            print "Fitting Linear Regression model to " + str(
                len(data)) + " vectors:"
            print " -> Using " + str(len(data)) + " vectors."
            model = linear_model.LinearRegression()
            dataMatrix = np.array(data)
            outsMatrix = np.array(outs)
            model.fit(dataMatrix, outsMatrix)
            t4 = time.time()
            print " -> Training COMPLETE. " + str(t4 - t3) + " seconds."

        #GAUSSIAN NB
        elif mID == 3:
            print "Fitting GaussianNB model to " + str(len(data)) + " vectors:"
            print " -> Using " + str(len(data)) + " vectors."
            model = GaussianNB()
            dataMatrix = np.array(data)
            outsMatrix = np.array(outs)
            model.fit(dataMatrix, outsMatrix)
            t4 = time.time()
            print " -> Training COMPLETE. " + str(t4 - t3) + " seconds."

        print "-" * 32

        print "Testing against test file..."
        predictions = deepcopy(model.predict(testData))
        t5 = time.time()
        print " -> Testing COMPLETE. " + str(t5 - t4) + " seconds."

        #convert outputs PERCENTILE -> POPULATION FIGURE
        predictPop = sup.convertPopVals(predictions, testPops)

        print "\nCrunchifying tasty test data stats for review... Yum"
        misses, error, totalError, totalErrorPercentile = sup.crunchTestResults(
            predictPop, testActuals, errorMargin)
        t6 = time.time()
        if output:
            print " -> Wrote predictions to output file: " + str(
                sys.argv[sys.argv.index("--output") + 1])
        print " -> Crunching COMPLETE. " + str(t6 - t5) + " seconds."

        print "-" * 32

        print "ALGORITHM SUMMARY"
        print "  Training Vectors:      " + str(
            len(data)) + " from file: " + str(sys.argv[1])
        print "  Testing Vectors:       " + str(
            len(testData)) + " from file: " + str(sys.argv[2])
        print "  Accuracy Summary:      "
        print "      -------------------------------------------------------"
        print "      |        *****Correct/Incorrect Stats******"
        print "      | Using " + str(errorMargin) + " acceptable error margin:"
        print "      |    " + str(len(testData) - misses) + " correct"
        print "      |    " + str(misses) + " incorrect"
        print "      |    " + str(len(testData)) + " total"
        print "      | Prediction Accuracy:      " + str(1 - error)
        print "      | Prediction Inaccuracy:    " + str(error)
        print "      |"
        print "      |        *****Marginal Accuracy Stats******"
        print "      | NOT FOR FINAL EVALUATION PURPOSES"
        #print "      | Total Error:           " + str(totalError)
        print "      | Average Population Error: " + str(
            float(totalError) / float(len(testData)))
        print "      | Average Error Percentile: " + str(
            float(totalErrorPercentile) / float(len(testData)))
        print "      -------------------------------------------------------"
        print "  Total Time:            " + str(time.time() -
                                                startTime) + " seconds"

        if output:
            print "\n  -> Writing outputs to file: " + str(
                sys.argv[sys.argv.index("--output") + 1])
            info = [0] * 5
            #model
            info[
                0] = "Naive Bayesian Ridge" if model == 1 else "Linear Regression"
            #numvectors
            info[1] = len(data)
            info[2] = len(testData)
            #average error
            info[3] = str(float(totalError) / float(len(testData)))
            #average percentile error
            info[4] = str(float(totalErrorPercentile) / float(len(testData)))

            sup.writeOutputFile(testKeys, predictions, testOuts, predictPop,
                                testOutPop, info, outFile)

        if breakdown:
            print "-" * 32
            print "EXPLICIT TEST VECTOR BREAKDOWN"
            i = 0
            for prediction, actual in zip(predictPop, testOutPop):
                key = (testKeys[i][0], testKeys[i][1])
                print str(key) + "; Prediction: " + str(
                    prediction) + "; Actual: " + str(actual)
                i += 1
	def run(self):
		startTime = time.time()

		#check arguments
		dataReader,errorMargin,mID,normalize,output,outFile,breakdown,crossvalNum,plot = self.checkArgs()

		#initialize support routines
		sup = support.support()

		print "TRAINING FILE: " + str(sys.argv[1])
		print "Constructing data lists..."
		keys,data,outs,actuals,pops = sup.constructData(dataReader)
		t2 = time.time()
		print " -> Construction COMPLETE. " + str(t2-startTime) + " seconds."
		print "      Number of complete vectors generated from data: " + str(len(data))

		if normalize:
			print "-" * 32
			print "Normalizing features..."
			data,maxs = sup.normalize_crossval(data)

			print "  -> Maximums ="
			print maxs

		print "-" * 32

		if plot:
			accTotal = []
			avgAccuracyTotal = []

			plt.figure(figsize=(8,6), dpi=80)
			plt.title("Test Accuracy of KMeans within " + str(errorMargin) + " Margin")
			plt.xlabel("Crossvalidation Fold #")
			plt.ylabel("Average Accuracy Per Full Pass")
			plt.xticks(np.linspace(0,crossvalNum,crossvalNum+1,endpoint=True))

			colors = ["red","green","blue","orange","pink","purple","yellow","gray","black","turqoise"]

			for i in range(2,crossvalNum+1):
				accuracy,avgAccuracy = self.crossVal(data,outs,pops,actuals,errorMargin,i,sup)
				avgAccuracyTotal.append(avgAccuracy)
				plt.plot(accuracy,color=colors[i-2],label="k = " + str(i))

			plt.legend(loc=4)

			plt.figure(figsize=(8,6), dpi=80)
			plt.title("Test Average Accuracy for All Folds at " + str(errorMargin) + " Error Margin")
			plt.xlabel("--crossval # (k-2)")
			plt.ylabel("Average Accuracy Per Full Pass")
			plt.xticks(np.linspace(0,crossvalNum,crossvalNum+1,endpoint=True))
			plt.plot(avgAccuracyTotal,color="green")

			plt.show()


		else:
			accuracy,avgAccuracy = self.crossVal(data,outs,pops,actuals,errorMargin,crossvalNum, sup)

			#print out test results before graphing
			print "CROSSVALIDATION BREAKDOWN"
			print "  ---------------------------------------------------------------------------"
			print "  | Average Accuracy of Crossvalidation =   " + str(avgAccuracy)
			print "  | KMeans, k =                             " + str(crossvalNum)
			print "  | Test Set Size =                   " + str(len(data)/int(crossvalNum)) + " datapoints"
			print "  ---------------------------------------------------------------------------"

			plt.figure(figsize=(8,6), dpi=80)
			plt.title("Test Accuracy of KMeans within " + str(errorMargin) + " Margin")
			plt.xlabel("Crossvalidation Fold #")
			plt.ylabel("Average Accuracy Per Full Pass")
			plt.xticks(np.linspace(0,len(accuracy),len(accuracy)+1,endpoint=True))
			plt.plot(accuracy,color="green")
			plt.show()
Exemple #12
0
def GJK(shape1, shape2):
    ''' Calculates whether shape1 has collided with shape2. It uses Minkowski
        Difference to find out if they have any point in common; if they do,
        they have collided.

        Input:
            *   shape1 and shape2 are Shape objects. Shape objects shold have
                a position, describing it's location, and a list of it's points,
                represented as Vector objects, describing the whereabouts of
                it's vertices.
        Output:
            *   The output is a Boolean: True if the shapes have collided and
                False otherwise. It also outputs an approximation of the
                contact point, represented as a Vector object.
    '''
    assert isinstance(shape1, Shape), 'Input must be a Shape object'
    assert isinstance(shape2, Shape), 'Input must be a Shape object'
    # Create a Simplex object
    simplex = Simplex()

    # Choose an initial search direction
    direction = shape1.get_pos() - shape2.get_pos()
    
    # Get the first Minkowski Difference point
    simplex.add(support(shape1, shape2, direction))
    
    direction *= -1.0

    originInSimplex = None
    # Start looping
    while True:
        #print 'New loop'
        # Add a new point to the simplex
        # TODO: Take care of if the simplex already contains the point.
        simplex.add(support(shape1, shape2, direction))

        # Make sure that the last point we added passed the origin
        if simplex.get(1).dot(direction) <= 0 and originInSimplex != '':
            # If the point added last was not past the origin in
            # the chosen direction then the Minkowski Difference cannot
            # possibly contain the origin since the last point
            # added is on the edge of the Minkowski Difference.
            #print 'False in loop'
            #return False, None, None, None
            return False, (None, None, None)
        else:
            # Otherwise we need to determine if the origin is in
            # the current simplex
            originInSimplex, direction = containsOrigin(simplex)
            if originInSimplex:
                # If it is then we know there is a collision
                #print 'True in loop'
                #collisionPoint, point1, point2 = pointOfCollision(simplex)
                #return True, collisionPoint, point1, point2
                assert len(simplex.get_points()) == 4, \
                       'Terminated without full simplex'
                collisionPoint = pointOfCollision_2(simplex)

                ######
                # A very rough approximation of the penetration depth
                # and vector that only applies if shape1 is a sphere...
                # Must be edited to be more accurate (and work for other shapes)
                # TODO: Better penetration normal

                penetrationNormal = (collisionPoint - shape1.get_pos()).normalize()
                penetrationDepth = (penetrationNormal*shape1.get_radius() \
                                   - (collisionPoint - shape1.get_pos())).norm()

                
                assert isinstance(collisionPoint, Vector), \
                       'Invalid type for collisionPoint'
                assert isinstance(penetrationNormal, Vector), \
                       'Invalid type for penetrationNormal'
                assert isinstance(penetrationDepth, numbers.Number)
                return True, (collisionPoint, penetrationNormal, penetrationDepth)