コード例 #1
0
  def test(self, categories):
    for i in range(Constants.NUM_SUBJECTS):
      trainSubjects = [1, 2, 3, 4]
      testSubjects = [i + 1]
      trainSubjects.remove(i + 1)

      trainVoxelArrayMap = util.getVoxelArray(subjectNumbers = trainSubjects)
      testVoxelArrayMap = util.getVoxelArray(subjectNumbers = testSubjects)
      util.normalize(trainVoxelArrayMap)
      util.normalize(testVoxelArrayMap)
      util.filterData(trainVoxelArrayMap, categories=categories)
      util.filterData(testVoxelArrayMap, categories=categories)

      Xtrain = numpy.array([trainVoxelArrayMap[key] for key in trainVoxelArrayMap])
      Ytrain = numpy.array([key[1] for key in trainVoxelArrayMap])

      Xtest = numpy.array([testVoxelArrayMap[key] for key in testVoxelArrayMap])
      Yanswer = numpy.array([key[1] for key in testVoxelArrayMap])

      Yprediction = OneVsRestClassifier(LinearSVC()).fit(Xtrain, Ytrain).predict(Xtest)
      # Yprediction = OneVsOneClassifier(LinearSVC()).fit(Xtrain, Ytrain).predict(Xtest)

      correct = 0
      for index in range(len(Yanswer)):
        if Yanswer[index] == Yprediction[index]:
          correct += 1
      # correct = [1 if Yanswer[index] == Yprediction[index] else 0 for index in range(len(Yanswer))]
      print categories, "Correct Predictions: ", correct, "/", len(Yanswer)
      return float(correct) * 100 / len(Yanswer)
コード例 #2
0
def correlationNearestNeighbor():
	voxelArrayMap = util.getVoxelArray(False, False,True,False, [4])
	
	util.normalize(voxelArrayMap)
	correct = [{ }]*3
	totalCountCorrect = [0] *3
	totalCountInCorrect = [0] *3
	incorrect = [{ }]*3
	voxelCopy = voxelArrayMap.keys()
	totalCorrect =0
	totalIncorrect =0
	count = 0
	for key in voxelCopy:
		count +=1
		testExample = voxelArrayMap[key]
		voxelArrayMap.pop(key, None)

		averageCategoryCorrelations = matrixify(calculateAverageCorrelations(voxelArrayMap))
		exampleCorrelations = calculateSingleExampleCorrelations(testExample, voxelArrayMap)
		classifiedCategory = classifyByClosestCorrelation(exampleCorrelations,averageCategoryCorrelations)

		if classifiedCategory[0] == key[1]:
			totalCorrect +=1
		else:
			totalIncorrect +=1
		voxelArrayMap[key] = testExample
	print "Correct", totalCorrect, "Incorrect", totalIncorrect, "Percentage Correct ", totalCorrect/float((totalCorrect +totalIncorrect))
コード例 #3
0
  def __init__(self, data=None, normalize=True, numTrainingExamples=24, categories=[0, 1, 2, 3, 4], subjects=[1, 2, 3, 4]):
    if data is None:
      data = util.getVoxelArray(subjectNumbers=subjects)
    if normalize:
      util.normalize(data)
    if len(categories) < Constants.NUM_CATEGORIES:
      util.filterData(data, categories)

    self.numCategories = Constants.NUM_CATEGORIES
    self.categories = None
    if not categories is None:
      self.numCategories = len(categories)
      self.categories = categories

    self.numTrainingExamples = numTrainingExamples * len(subjects) / Constants.NUM_SUBJECTS
    self.numTestingExamples = len(subjects) * Constants.NUM_RUNS * Constants.NUM_TASKS - self.numTrainingExamples
    self.trainExamples = [[[0] for _ in range(self.numTrainingExamples)] for _ in range(self.numCategories)]
    self.testExamples = []
    self.averages = [[0 for _ in range(Constants.NUM_VOXELS)] for _ in range(self.numCategories)]

    for key in data:
      value = data[key]
      run, category, task, subject = key
      if not self.categories is None:
        if category not in self.categories: continue
        category = self.getCategory(category)
      index = self.index(run, task, subject) * len(subjects) / Constants.NUM_SUBJECTS
      if index < self.numTrainingExamples:
        self.trainExamples[category][index] = list(value)
      else:
        self.testExamples.append((key, value))
コード例 #4
0
def run(categories=None, subject=None):
  data = util.getVoxelArray(subjectNumbers=subject)
  # data = util.dataSmoothing(data)
  nb = NaiveBayes(data=data, categories=categories, subjects=subject)
  nb.train()
  nb.test()
  return nb.results()
コード例 #5
0
def correlationNearestNeighbor():
    voxelArrayMap = util.getVoxelArray(False, False, True, False, [4])

    util.normalize(voxelArrayMap)
    correct = [{}] * 3
    totalCountCorrect = [0] * 3
    totalCountInCorrect = [0] * 3
    incorrect = [{}] * 3
    voxelCopy = voxelArrayMap.keys()
    totalCorrect = 0
    totalIncorrect = 0
    count = 0
    for key in voxelCopy:
        count += 1
        testExample = voxelArrayMap[key]
        voxelArrayMap.pop(key, None)

        averageCategoryCorrelations = matrixify(
            calculateAverageCorrelations(voxelArrayMap))
        exampleCorrelations = calculateSingleExampleCorrelations(
            testExample, voxelArrayMap)
        classifiedCategory = classifyByClosestCorrelation(
            exampleCorrelations, averageCategoryCorrelations)

        if classifiedCategory[0] == key[1]:
            totalCorrect += 1
        else:
            totalIncorrect += 1
        voxelArrayMap[key] = testExample
    print "Correct", totalCorrect, "Incorrect", totalIncorrect, "Percentage Correct ", totalCorrect / float(
        (totalCorrect + totalIncorrect))
コード例 #6
0
 def __init__(self, data=None, normalize=True, categories=[0, 1, 2, 3, 4], subjects=[1, 2, 3, 4]):
   if data is None:
     data = util.getVoxelArray(subjectNumbers=subjects)
   if normalize:
     util.normalize(data)
   if len(categories) < Constants.NUM_CATEGORIES:
     util.filterData(data, categories)
   self.data = data
   self.categories = categories
コード例 #7
0
def run():
	voxelArrayMap = util.getVoxelArray(False, False, False, True, [2])
	util.normalize(voxelArrayMap)

	centroids = [0] * 5
	keys = list(voxelArrayMap.keys())
	selected = []
	for index in range(len(centroids)):
		while True:
			centroidKey = random.choice(keys)
			category = centroidKey[1]
			if category not in selected:
				selected.append(category)
				break
		print centroidKey
		centroids[index] = voxelArrayMap[centroidKey]

	numIters = 120
	assignmentMap = {}
	for iteration in range(numIters):
		newCentroids = [ [0] * 1973 for i in range(len(centroids))]
		clusterCounts = [0] * len(newCentroids)
		for key in voxelArrayMap:
			value = voxelArrayMap[key]
			distances = [ util.getDistance(centroid, value) for centroid in centroids ]
			minDistance = min(distances)
			optCentroid = distances.index(minDistance)
			assignmentMap[key] = optCentroid
			for voxelIndex in range(len(newCentroids[optCentroid])):
				newCentroids[optCentroid][voxelIndex] += value[voxelIndex]
			clusterCounts[optCentroid] += 1

		for index in range(len(newCentroids)):
			numPoints = clusterCounts[index]
			if numPoints != 0:
				centroid = newCentroids[index]
				centroid = [ centroid[voxelIndex] / numPoints for voxelIndex in range(len(centroid)) ]
				newCentroids[index] = centroid
		centroids = newCentroids

	# we want ClusterNum -> What type of image
	catMap = {}
	for key in assignmentMap:
		assignment = assignmentMap[key]
		# key[1] is the category (0-5 for face, body, etc)
		# assignment is the optimal centroid
		if assignment in catMap:
			arr = catMap[assignment]
			arr.append(key[1])
			catMap[assignment] = arr
		else:
			catMap[assignment] = [key[1]]
	print catMap
コード例 #8
0
def PCA(normalizedData=None):
    if normalizedData is None:
        normalizedData = util.getVoxelArray(allTasks=True)

    # prepocess and normalize data
    util.normalize(normalizedData)
    m = len(normalizedData)

    print('Calculating the Covariance Matrix')

    count = 0
    # compute Covariance matrix [1/m Sum (x xT)]
    covarianceMatrix = [[0 for _ in range(Constants.NUM_VOXELS)]
                        for _ in range(Constants.NUM_VOXELS)]
    for key in normalizedData:
        x = normalizedData[key]
        for i in range(Constants.NUM_VOXELS):
            for j in range(Constants.NUM_VOXELS):
                if count % 50000000 is 0:
                    print(
                        int(
                            float(count) * 100 /
                            (m * Constants.NUM_VOXELS * Constants.NUM_VOXELS)),
                        'percent completed')
                covarianceMatrix[i][j] += (x[i] * x[j]) / m
                count += 1

    for i in range(Constants.NUM_VOXELS):
        for j in range(Constants.NUM_VOXELS):
            if isinstance(covarianceMatrix[i][j], complex):
                print('Covariance Matrix index', i, j, 'is complex:',
                      covarianceMatrix[i][j])
    print('Finding the', Constants.K_DIMENSIONS, 'Largest Eigenvalues')
    # then compute all eigenvectors and corresponding eigenvalues
    eigenvalues, eigenvectors = linalg.eig(covarianceMatrix)
    eigens = zip(eigenvalues, eigenvectors)
    eigens.sort(key=lambda x: x[0], reverse=True)
    eigens = eigens[0:Constants.K_DIMENSIONS]
    eigenvalues, eigenvectors = zip(*eigens)

    for i in range(Constants.K_DIMENSIONS):
        print(i, eigenvalues[i])

    # transform data into K dimensions
    for key in normalizedData:
        x = normalizedData[key]
        y = [sum([eigenvector[i] * x[i] for i in range(Constants.NUM_VOXELS)])\
             for index, eigenvector in enumerate(eigenvectors)]
        normalizedData[key] = y
コード例 #9
0
ファイル: grapher.py プロジェクト: SanjV/FMRI_MachineLearning
        pylab.show()

    def twoDHeatMap(self,
                    data,
                    vmin=None,
                    vmax=None,
                    title=None,
                    xLabel=None,
                    yLabel=None,
                    filename=None):
        if not vmin is None and not vmax is None:
            pylab.pcolor(numpy.array(data), vmin=vmin, vmax=vmax)
        else:
            pylab.pcolor(numpy.array(data))
        if not title is None:
            pylab.title(title)
        if not xLabel is None:
            pylab.xlabel(xLabel)
        if not yLabel is None:
            pylab.ylabel(yLabel)
        if not filename is None:
            pylab.savefig(filename + '.png')
        pylab.show()


if __name__ == '__main__':
    data = util.getVoxelArray()
    data = util.dataSmoothing(data)
    util.normalize(data)
    g = Grapher()
    g.twoDGraph(data)
コード例 #10
0
def test(firstCategory, secondCategory):
    def sigmoid(x, deriv=False):
        return x * (1 - x) if (deriv == True) else 1 / (1 + numpy.exp(-x))

    trainVoxelMap = util.getVoxelArray(allTasks=True,
                                       includeOddBall=False,
                                       includeWorkingMemory=False,
                                       includeSelectiveAttention=True,
                                       subjectNumbers=trainSubjects)
    testVoxelMap = util.getVoxelArray(allTasks=True,
                                      includeOddBall=False,
                                      includeWorkingMemory=False,
                                      includeSelectiveAttention=True,
                                      subjectNumbers=testSubjects)
    util.normalize(trainVoxelMap)
    util.normalize(testVoxelMap)

    trainX = []
    trainY = []
    testX = []
    testY = []

    category1 = firstCategory
    category2 = secondCategory

    def testWithinSubjects():  # separate by runs
        for key in trainVoxelMap:
            if key[1] == category1 or key[1] == category2:
                classification = 1 if key[1] == category2 else 0
                if key[0] == 2:
                    testX.append(trainVoxelMap[key])
                    testY.append(classification)
                else:
                    trainX.append(trainVoxelMap[key])
                    trainY.append(classification)

    def testAcrossSubjects():  # include all data for a whole subject
        for key in trainVoxelMap:
            if key[1] == category1 or key[1] == category2:
                trainX.append(trainVoxelMap[key])
                classification = 1 if key[1] == category2 else 0
                trainY.append(classification)
        for key in testVoxelMap:
            if key[1] == category1 or key[1] == category2:
                testX.append(testVoxelMap[key])
                classification = 1 if key[1] == category2 else 0
                testY.append(classification)

    testWithinSubjects()

    trainX = numpy.array(trainX)
    trainY = numpy.array(trainY).T

    # back propogation
    numpy.random.seed(1)
    synapseZero = 2 * numpy.random.random((1973, 1)) - 1
    for iter in xrange(10000):
        l0 = trainX
        l1 = sigmoid(numpy.dot(l0, synapseZero))
        l1_error = [trainY[i] - l1[i] for i in range(len(trainY))]
        l1_delta = l1_error * sigmoid(l1, True)
        synapseZero += numpy.dot(l0.T, l1_delta)

    #print "Correct Classifications: ", testY
    a = sigmoid(numpy.dot(testX, synapseZero))
    predictions = [
        0 if abs(a[i]) < abs(a[i] - 1) else 1 for i in range(len(testX))
    ]
    #print "Output After Training: ", predictions
    print "Categories: ", category1, "/", category2, " Error: ", sum([
        1 if predictions[i] != testY[i] else 0 for i in range(len(testY))
    ]), " out of ", len(testY)
コード例 #11
0
def test(firstCategory, secondCategory):
	def sigmoid(x,deriv=False):
		return x*(1-x) if(deriv==True) else 1/(1+numpy.exp(-x))

	trainVoxelMap = util.getVoxelArray(allTasks=True, includeOddBall=False, includeWorkingMemory=False, includeSelectiveAttention=True,
	subjectNumbers = trainSubjects)
	testVoxelMap = util.getVoxelArray(allTasks=True, includeOddBall=False, includeWorkingMemory=False, includeSelectiveAttention=True,
	subjectNumbers = testSubjects)
	util.normalize(trainVoxelMap)
	util.normalize(testVoxelMap)

	trainX = []
	trainY = []
	testX = []
	testY = []

	category1 = firstCategory
	category2 = secondCategory

	def testWithinSubjects():	# separate by runs
		for key in trainVoxelMap:
			if key[1] == category1 or key[1] == category2:
				classification = 1 if key[1] == category2 else 0
				if key[0] == 2:
					testX.append(trainVoxelMap[key])
					testY.append(classification)
				else:
					trainX.append(trainVoxelMap[key])
					trainY.append(classification)

	def testAcrossSubjects():	# include all data for a whole subject
		for key in trainVoxelMap:
			if key[1] == category1 or key[1] == category2:
				trainX.append(trainVoxelMap[key])
				classification = 1 if key[1] == category2 else 0
				trainY.append(classification)
		for key in testVoxelMap:
			if key[1] == category1 or key[1] == category2:
				testX.append(testVoxelMap[key])
				classification = 1 if key[1] == category2 else 0
				testY.append(classification)

	testWithinSubjects()

	trainX = numpy.array(trainX)
	trainY = numpy.array(trainY).T

	# back propogation
	numpy.random.seed(1)
	synapseZero = 2*numpy.random.random((1973, 1)) - 1
	for iter in xrange(10000):
		l0 = trainX
		l1 = sigmoid(numpy.dot(l0,synapseZero))
		l1_error = [ trainY[i]-l1[i] for i in range(len(trainY)) ]
		l1_delta = l1_error * sigmoid(l1, True)
		synapseZero += numpy.dot(l0.T, l1_delta)

	#print "Correct Classifications: ", testY
	a = sigmoid(numpy.dot(testX,synapseZero))
	predictions = [0 if abs(a[i]) < abs(a[i] - 1)  else 1 for i in range(len(testX))]
	#print "Output After Training: ", predictions
	print "Categories: ", category1, "/", category2, " Error: ", sum([1 if predictions[i] != testY[i] else 0 for i in range(len(testY))]), " out of ", len(testY)
コード例 #12
0
def run(categories, subjects):
  data = util.getVoxelArray(subjectNumbers=subjects)
  nn = NearestNeighbor(data=data, categories=categories)
  return nn.kNearestNeighbor(5)
コード例 #13
0
def test(firstCategory, secondCategory):
    def sigmoid(x, deriv=False):
        return x * (1 - x) if (deriv == True) else 1 / (1 + numpy.exp(-x))

    # initialization
    trainVoxelMap = util.getVoxelArray(allTasks=True,
                                       includeOddBall=False,
                                       includeWorkingMemory=False,
                                       includeSelectiveAttention=True,
                                       subjectNumbers=trainSubjects)
    testVoxelMap = util.getVoxelArray(allTasks=True,
                                      includeOddBall=False,
                                      includeWorkingMemory=False,
                                      includeSelectiveAttention=True,
                                      subjectNumbers=testSubjects)
    util.normalize(trainVoxelMap)
    util.normalize(testVoxelMap)

    trainX = []
    trainY = []
    testX = []
    testY = []

    category1 = firstCategory
    category2 = secondCategory

    def testWithinSubjects():
        for key in trainVoxelMap:
            if key[1] == category1 or key[1] == category2:
                classification = 1 if key[1] == category2 else 0
                if key[0] == 2:
                    testX.append(trainVoxelMap[key])
                    testY.append(classification)
                else:
                    trainX.append(trainVoxelMap[key])
                    trainY.append(classification)

    def testAcrossSubjects():
        for key in trainVoxelMap:
            if key[1] == category1 or key[1] == category2:
                trainX.append(trainVoxelMap[key])
                classification = 1 if key[1] == category2 else 0
                trainY.append(classification)
        for key in testVoxelMap:
            if key[1] == category1 or key[1] == category2:
                testX.append(testVoxelMap[key])
                classification = 1 if key[1] == category2 else 0
                testY.append(classification)

    testWithinSubjects()

    trainX = numpy.array(trainX)
    trainY = numpy.array(trainY).T

    numpy.random.seed(1)
    synapseZero = 2 * numpy.random.random((1973, 45)) - 1
    synapseOne = 2 * numpy.random.random((45, 1)) - 1

    for iter in range(5000):
        l0 = trainX
        l1 = sigmoid(numpy.dot(l0, synapseZero))
        l2 = sigmoid(numpy.dot(l1, synapseOne))

        l2_error = [trainY[i] - l2[i] for i in range(len(trainY))]
        l2_delta = l2_error * sigmoid(l2, deriv=True)

        l1_error = l2_delta.dot(synapseOne.T)
        l1_delta = l1_error * sigmoid(l1, deriv=True)

        synapseOne += l1.T.dot(l2_delta)
        synapseZero += l0.T.dot(l1_delta)

    synapse = numpy.dot(synapseZero, synapseOne)
    a = sigmoid(numpy.dot(testX, synapse))
    predictions = [
        0 if abs(a[i]) < abs(a[i] - 1) else 1 for i in range(len(testX))
    ]

    print(
        "Categories: ", category1, "/", category2, " Error: ",
        sum([
            1 if predictions[i] != testY[i] else 0 for i in range(len(testY))
        ]), " out of ", len(testY), "accuracy", 100 - sum([
            1 if predictions[i] != testY[i] else 0 for i in range(len(testY))
        ]) / len(testY) * 100, "%")
コード例 #14
0
    for key in data:
      fig = pylab.figure()
      values = data[key]
      pylab.plot(values)
      print key
      count += 1
      if count == 2:
        break # print first example (Subject 4, Selective Attention, WORDS, Second Run)
    pylab.show()

  def twoDHeatMap(self, data, vmin=None, vmax=None, title=None, xLabel=None, yLabel=None, filename=None):
    if not vmin is None and not vmax is None: 
      pylab.pcolor(numpy.array(data), vmin=vmin, vmax=vmax)
    else:
      pylab.pcolor(numpy.array(data))
    if not title is None:
      pylab.title(title)
    if not xLabel is None:
      pylab.xlabel(xLabel)
    if not yLabel is None:
      pylab.ylabel(yLabel)
    if not filename is None:
      pylab.savefig(filename + '.png')
    pylab.show()

if __name__ == '__main__':
  data = util.getVoxelArray()
  data = util.dataSmoothing(data)
  util.normalize(data)
  g = Grapher()
  g.twoDGraph(data)