def __init__(self, trainingDataContainer, alpha, maxIterations): self.log = LoggerFetcher().fetchLogger("p1", "perceptron") self.trainingDataContainer = trainingDataContainer self.alpha = alpha self.maxIterations = maxIterations self.initialiseWeightVector()
def testNonExistingLogConfig(self): configFileName = 'non-existing-conf' package = 'p1' fetcher = LoggerFetcher() try: fetcher.fetchLogger(package, configFileName) except IOError: pass
class Main(object): def __init__(self): self.log = LoggerFetcher().fetchLogger("p1", "main") def classifyDataSet(self, dataSetName, perceptron): self.log.info('Classifying dataset %s', dataSetName) container = DataFetcher().fetchDataSet(dataSetName) labels = container.getLabels() samples = container.getDataVectors() errors = 0 for i, sample in enumerate(samples): cls = perceptron.classify(sample) if cls != labels[i]: errors += 1 self.log.info('Misclassified sample %s as %s', labels[i], cls) setSize = container.getDataSetSize() errorRate = errors * 1.0 / setSize * 100 self.log.info('Error rate while classifying was %s', errorRate) def trainPerceptron(self, dataSetName, alpha, iterations): container = DataFetcher().fetchDataSet(dataSetName) perceptron = Perceptron(container, 0.1, 15) perceptron.train() return perceptron def main(self): dataSets = ['buffer_dataset', 'inverter_dataset'] for dataSetName in dataSets: perceptron = self.trainPerceptron(dataSetName, 0.1, 15) self.classifyDataSet(dataSetName, perceptron)
class Perceptron(object): def __init__(self, trainingDataContainer, alpha, maxIterations): self.log = LoggerFetcher().fetchLogger("p1", "perceptron") self.trainingDataContainer = trainingDataContainer self.alpha = alpha self.maxIterations = maxIterations self.initialiseWeightVector() def initialiseWeightVector(self): self.log.debug('Initializing weight vector') vectorLength = self.trainingDataContainer.getDataVectorLength() + 1 self.weightVector = [random.random() for i in xrange(vectorLength)] def classify(self, dataVector): self.log.info('Classifying sample %s', dataVector) if len(dataVector) == self.getWeightVectorLength() - 1: activation = self.getActivation(self.weightVector, dataVector) self.log.debug('Activation is %s', activation) classification = 0 if activation < 0 else 1 self.log.info('Classification is %s' ,classification) return classification self.log.warn('Expected vector length %s, but found %s', self.getWeightVectorLength() - 1, len(dataVector) ) return -1 def train(self): self.log.info('Starting training') labels = self.trainingDataContainer.getLabels() vectors = self.trainingDataContainer.getDataVectors() errors = -1 iteration = 0 while errors !=0 and iteration <= self.maxIterations : errors = 0 iteration += 1 self.log.info('Iteration %s', iteration ) for i, target in enumerate(labels): self.log.info('Label %s, sample [%s]',target, i) classification = self.classify(vectors[i]) error = target - classification if error != 0: self.log.info('Made an error while classifying') errors += 1 delta = self.alpha * error self.weightVector = self.updateWeights( self.weightVector, delta, vectors[i]) self.log.info('Finished training') def updateWeights(self, weightVector, delta, dataVector): self.log.info('Updating weights: %s', weightVector) for index in xrange(len(dataVector)): weightVector[index] += delta * dataVector[index] weightVector[-1] += delta * (-1) self.log.info('New weights: %s', weightVector) return weightVector def getActivation(self, weightVector, dataVector): activation = sum(value * weightVector[index] for index, value in enumerate(dataVector)) activation += weightVector[-1] * (-1) return activation def getWeightVector(self): return self.weightVector def getWeightVectorLength(self): return len(self.weightVector)
def __init__(self): self.log = LoggerFetcher().fetchLogger("p1", "main")
class Perceptron(object): def __init__(self, trainingDataContainer, alpha, maxIterations): self.log = LoggerFetcher().fetchLogger("p1", "perceptron") self.trainingDataContainer = trainingDataContainer self.alpha = alpha self.maxIterations = maxIterations self.initialiseWeightVector() def initialiseWeightVector(self): self.log.debug('Initializing weight vector') vectorLength = self.trainingDataContainer.getDataVectorLength() + 1 self.weightVector = [random.random() for i in xrange(vectorLength)] def classify(self, dataVector): self.log.info('Classifying sample %s', dataVector) if len(dataVector) == self.getWeightVectorLength() - 1: activation = self.getActivation(self.weightVector, dataVector) self.log.debug('Activation is %s', activation) classification = 0 if activation < 0 else 1 self.log.info('Classification is %s', classification) return classification self.log.warn('Expected vector length %s, but found %s', self.getWeightVectorLength() - 1, len(dataVector)) return -1 def train(self): self.log.info('Starting training') labels = self.trainingDataContainer.getLabels() vectors = self.trainingDataContainer.getDataVectors() errors = -1 iteration = 0 while errors != 0 and iteration <= self.maxIterations: errors = 0 iteration += 1 self.log.info('Iteration %s', iteration) for i, target in enumerate(labels): self.log.info('Label %s, sample [%s]', target, i) classification = self.classify(vectors[i]) error = target - classification if error != 0: self.log.info('Made an error while classifying') errors += 1 delta = self.alpha * error self.weightVector = self.updateWeights( self.weightVector, delta, vectors[i]) self.log.info('Finished training') def updateWeights(self, weightVector, delta, dataVector): self.log.info('Updating weights: %s', weightVector) for index in xrange(len(dataVector)): weightVector[index] += delta * dataVector[index] weightVector[-1] += delta * (-1) self.log.info('New weights: %s', weightVector) return weightVector def getActivation(self, weightVector, dataVector): activation = sum(value * weightVector[index] for index, value in enumerate(dataVector)) activation += weightVector[-1] * (-1) return activation def getWeightVector(self): return self.weightVector def getWeightVectorLength(self): return len(self.weightVector)