connectedCounts = connectedCounts.astype(uintType) sp.getConnectedCounts(connectedCounts) connectedCounts = connectedCounts.astype(realDType) entropyTrace.append( calculateEntropy(activeColumnsCurrentEpoch[:, aliveColumns])) boostFactors = np.zeros((columnNumber, ), dtype=realDType) sp.getBoostFactors(boostFactors) meanBoostFactorTrace.append(np.mean(boostFactors)) activeDutyCycle = np.zeros((columnNumber, ), dtype=realDType) sp.getActiveDutyCycles(activeDutyCycle) if epoch >= 1: stability = calculateStability(activeColumnsCurrentEpoch, activeColumnsPreviousEpoch) stabilityTrace.append(stability) numConnectedSynapsesTrace.append(np.sum(connectedCounts)) numNewSynapses = connectedCounts - connectedCountsPreviousEpoch numNewSynapses[numNewSynapses < 0] = 0 numNewlyConnectedSynapsesTrace.append(np.sum(numNewSynapses)) numEliminatedSynapses = connectedCountsPreviousEpoch - connectedCounts numEliminatedSynapses[numEliminatedSynapses < 0] = 0 numEliminatedSynapsesTrace.append(np.sum(numEliminatedSynapses)) metrics = { 'connected syn': [numConnectedSynapsesTrace[-1]], 'new syn': [numNewlyConnectedSynapsesTrace[-1]],
def runSPexperiments(expConfig): inputVectorType = expConfig.dataSet params = getSDRDataSetParams(expConfig.dataSet, int(expConfig.seed)) expName = getExperimentName(expConfig) createDirectories(expName) sdrData = SDRDataSet(params) inputVectors = sdrData.getInputVectors() numInputVector, inputSize = inputVectors.shape plt.figure() plt.imshow(np.reshape(inputVectors[2], (params['nX'], params['nY'])), interpolation='nearest', cmap='gray') plt.savefig('figures/exampleInputs/{}'.format(expName)) print print "Runnning experiment: {}".format(expName) print "Training Data Size {} Dimensions {}".format(numInputVector, inputSize) spParams = getSpatialPoolerParams(params, expConfig) sp = createSpatialPooler(expConfig.spatialImp, spParams) if expConfig.topology == 1 and expConfig.spatialImp in ['faulty_sp', 'py']: initializeSPConnections(sp, potentialRaidus=10, initConnectionRadius=5) numColumns = np.prod(sp.getColumnDimensions()) numTestInputs = int(numInputVector * 0.5) testInputs = inputVectors[:numTestInputs, :] connectedCounts = np.zeros((numColumns, ), dtype=uintType) boostFactors = np.zeros((numColumns, ), dtype=realDType) activeDutyCycle = np.zeros((numColumns, ), dtype=realDType) metrics = { 'numConnectedSyn': [], 'numNewSyn': [], 'numRemoveSyn': [], 'stability': [], 'entropy': [], 'maxEntropy': [], 'sparsity': [], 'noiseRobustness': [], 'classification': [], 'meanBoostFactor': [], 'reconstructionError': [], 'witnessError': [] } connectedSyns = getConnectedSyns(sp) activeColumnsCurrentEpoch, dum = runSPOnBatch(sp, testInputs, learn=False) inspectSpatialPoolerStats(sp, inputVectors, expName + "beforeTraining") checkPoints = [ 0, expConfig.changeDataSetAt - 1, expConfig.changeDataSetAt, expConfig.numEpochs - 1 ] epoch = 0 while epoch < expConfig.numEpochs: print "training SP epoch {} ".format(epoch) if (expConfig.changeDataSetContinuously or epoch == expConfig.changeDataSetAt): params['seed'] = epoch sdrData.generateInputVectors(params) inputVectors = sdrData.getInputVectors() numInputVector, inputSize = inputVectors.shape testInputs = inputVectors[:numTestInputs, :] if expConfig.killInputsAfter > 0 and epoch > expConfig.killInputsAfter: if expConfig.topology == 1: inputSpaceDim = (params['nX'], params['nY']) centerColumn = indexFromCoordinates((15, 15), inputSpaceDim) deadInputs = topology.wrappingNeighborhood( centerColumn, 5, inputSpaceDim) else: zombiePermutation = np.random.permutation(inputSize) deadInputs = zombiePermutation[:100] inputVectors[:, deadInputs] = 0 if epoch == expConfig.killCellsAt: if expConfig.spatialImp in ['faulty_sp', 'monitored_faulty_sp']: if expConfig.topology == 1: centerColumn = indexFromCoordinates((15, 15), sp._columnDimensions) sp.killCellRegion(centerColumn, 5) else: sp.killCells(expConfig.killCellPrct) if expConfig.trackOverlapCurve: noiseLevelList, inputOverlapScore, outputOverlapScore = \ calculateOverlapCurve(sp, testInputs) metrics['noiseRobustness'].append( np.trapz(np.flipud(np.mean(outputOverlapScore, 0)), noiseLevelList)) np.savez( './results/input_output_overlap/{}/epoch_{}'.format( expName, epoch), noiseLevelList, inputOverlapScore, outputOverlapScore) if expConfig.classification: # classify SDRs with noise noiseLevelList = np.linspace(0, 1.0, 21) classification_accuracy = classificationAccuracyVsNoise( sp, testInputs, noiseLevelList) metrics['classification'].append( np.trapz(classification_accuracy, noiseLevelList)) np.savez( './results/classification/{}/epoch_{}'.format(expName, epoch), noiseLevelList, classification_accuracy) # train SP here, # Learn is turned off at the first epoch to gather stats of untrained SP learn = False if epoch == 0 else True # randomize the presentation order of input vectors sdrOrders = np.random.permutation(np.arange(numInputVector)) activeColumnsTrain, meanBoostFactors = runSPOnBatch( sp, inputVectors, learn, sdrOrders) # run SP on test dataset and compute metrics activeColumnsPreviousEpoch = copy.copy(activeColumnsCurrentEpoch) activeColumnsCurrentEpoch, dum = runSPOnBatch(sp, testInputs, learn=False) stability = calculateStability(activeColumnsCurrentEpoch, activeColumnsPreviousEpoch) if (expConfig.changeDataSetContinuously or epoch == expConfig.changeDataSetAt): stability = float('nan') metrics['stability'].append(stability) metrics['sparsity'].append( np.mean(np.mean(activeColumnsCurrentEpoch, 1))) metrics['entropy'].append(calculateEntropy(activeColumnsCurrentEpoch)) # generate ideal SP outputs where all columns have the same activation prob. activeColumnsIdeal = np.random.rand( numInputVector, numColumns) > metrics['sparsity'][-1] metrics['maxEntropy'].append(calculateEntropy(activeColumnsIdeal)) connectedSynsPreviousEpoch = copy.copy(connectedSyns) sp.getConnectedCounts(connectedCounts) connectedSyns = getConnectedSyns(sp) metrics['meanBoostFactor'].append(np.mean(meanBoostFactors)) sp.getActiveDutyCycles(activeDutyCycle) metrics['numConnectedSyn'].append(np.sum(connectedCounts)) numNewSynapses = connectedSyns - connectedSynsPreviousEpoch numNewSynapses[numNewSynapses < 0] = 0 metrics['numNewSyn'].append(np.sum(numNewSynapses)) numEliminatedSynapses = connectedSynsPreviousEpoch - connectedSyns numEliminatedSynapses[numEliminatedSynapses < 0] = 0 metrics['numRemoveSyn'].append(np.sum(numEliminatedSynapses)) metrics['reconstructionError'].append( reconstructionError(sp, testInputs, activeColumnsCurrentEpoch)) metrics['witnessError'].append( witnessError(sp, testInputs, activeColumnsCurrentEpoch)) print tabulate(metrics, headers="keys") if expConfig.checkRFCenters: # check distribution of RF centers, useful to monitor recovery from trauma RFcenters, avgDistToCenter = getRFCenters(sp, params, type='connected') if expConfig.spatialImp == 'faulty_sp': aliveColumns = sp.getAliveColumns() else: aliveColumns = np.arange(numColumns) fig = plotReceptiveFieldCenter(RFcenters[aliveColumns, :], connectedCounts[aliveColumns], (params['nX'], params['nY'])) plt.savefig('figures/RFcenters/{}/epoch_{}.png'.format( expName, epoch)) plt.close(fig) np.savez('results/RFcenters/{}/epoch_{}'.format(expName, epoch), RFcenters, avgDistToCenter) if expConfig.checkInputSpaceCoverage: # check coverage of input space, useful to monitor recovery from trauma inputSpaceCoverage = calculateInputSpaceCoverage(sp) np.savez( 'results/InputCoverage/{}/epoch_{}'.format(expName, epoch), inputSpaceCoverage, connectedCounts) plt.figure(2) plt.clf() plt.imshow(inputSpaceCoverage, interpolation='nearest', cmap="jet") plt.colorbar() plt.savefig('figures/InputCoverage/{}/epoch_{}.png'.format( expName, epoch)) if expConfig.checkTestInput: RFcenters, avgDistToCenter = getRFCenters(sp, params, type='connected') inputIdx = 0 outputColumns = np.zeros((numColumns, 1), dtype=uintType) sp.compute(testInputs[inputIdx, :], False, outputColumns) activeColumns = np.where(outputColumns > 0)[0] fig = plotReceptiveFieldCenter(RFcenters[aliveColumns, :], connectedCounts[aliveColumns], (params['nX'], params['nY'])) plt.scatter(RFcenters[activeColumns, 0], RFcenters[activeColumns, 1], color='r') plt.savefig('figures/ResponseToTestInputs/{}/epoch_{}.png'.format( expName, epoch)) if expConfig.saveBoostFactors: np.savez('results/boostFactors/{}/epoch_{}'.format(expName, epoch), meanBoostFactors) if expConfig.showExampleRFs: fig = plotReceptiveFields2D(sp, params['nX'], params['nY']) plt.savefig('figures/exampleRFs/{}/epoch_{}'.format( expName, epoch)) plt.close(fig) if epoch in checkPoints: # inspect SP again inspectSpatialPoolerStats(sp, inputVectors, expName + "epoch{}".format(epoch)) epoch += 1 # plot stats over training fileName = 'figures/network_stats_over_training_{}.pdf'.format(expName) plotSPstatsOverTime(metrics, fileName) metrics['expName'] = expName pickle.dump(metrics, open('results/traces/{}/trace'.format(expName), 'wb')) plotReceptiveFields2D(sp, params['nX'], params['nY']) inspectSpatialPoolerStats(sp, inputVectors, inputVectorType + "afterTraining") plotExampleInputOutput(sp, inputVectors, expName + "final") return metrics, expName
def runSPexperiments(expConfig): inputVectorType = expConfig.dataSet params = getSDRDataSetParams(expConfig.dataSet, int(expConfig.seed)) expName = getExperimentName(expConfig) createDirectories(expName) sdrData = SDRDataSet(params) inputVectors = sdrData.getInputVectors() numInputVector, inputSize = inputVectors.shape plt.figure() plt.imshow(np.reshape(inputVectors[2], (params['nX'], params['nY'])), interpolation='nearest', cmap='gray') plt.savefig('figures/exampleInputs/{}'.format(expName)) print print "Runnning experiment: {}".format(expName) print "Training Data Size {} Dimensions {}".format(numInputVector, inputSize) spParams = getSpatialPoolerParams(params, expConfig) sp = createSpatialPooler(expConfig.spatialImp, spParams) if expConfig.topology == 1 and expConfig.spatialImp in ['faulty_sp', 'py']: initializeSPConnections(sp, potentialRaidus=10, initConnectionRadius=5) numColumns = np.prod(sp.getColumnDimensions()) numTestInputs = int(numInputVector * 0.5) testInputs = inputVectors[:numTestInputs, :] connectedCounts = np.zeros((numColumns,), dtype=uintType) boostFactors = np.zeros((numColumns,), dtype=realDType) activeDutyCycle = np.zeros((numColumns,), dtype=realDType) metrics = {'numConnectedSyn': [], 'numNewSyn': [], 'numRemoveSyn': [], 'stability': [], 'entropy': [], 'maxEntropy': [], 'sparsity': [], 'noiseRobustness': [], 'classification': [], 'meanBoostFactor': [], 'reconstructionError': [], 'witnessError': []} connectedSyns = getConnectedSyns(sp) activeColumnsCurrentEpoch, dum = runSPOnBatch(sp, testInputs, learn=False) inspectSpatialPoolerStats(sp, inputVectors, expName + "beforeTraining") checkPoints = [0, expConfig.changeDataSetAt - 1, expConfig.changeDataSetAt, expConfig.numEpochs - 1] epoch = 0 while epoch < expConfig.numEpochs: print "training SP epoch {} ".format(epoch) if (expConfig.changeDataSetContinuously or epoch == expConfig.changeDataSetAt): params['seed'] = epoch sdrData.generateInputVectors(params) inputVectors = sdrData.getInputVectors() numInputVector, inputSize = inputVectors.shape testInputs = inputVectors[:numTestInputs, :] if expConfig.killInputsAfter > 0 and epoch > expConfig.killInputsAfter: if expConfig.topology == 1: inputSpaceDim = (params['nX'], params['nY']) centerColumn = indexFromCoordinates((15, 15), inputSpaceDim) deadInputs = topology.wrappingNeighborhood(centerColumn, 5, inputSpaceDim) else: zombiePermutation = np.random.permutation(inputSize) deadInputs = zombiePermutation[:100] inputVectors[:, deadInputs] = 0 if epoch == expConfig.killCellsAt: if expConfig.spatialImp in ['faulty_sp', 'monitored_faulty_sp']: if expConfig.topology == 1: centerColumn = indexFromCoordinates((15, 15), sp._columnDimensions) sp.killCellRegion(centerColumn, 5) else: sp.killCells(expConfig.killCellPrct) if expConfig.trackOverlapCurve: noiseLevelList, inputOverlapScore, outputOverlapScore = \ calculateOverlapCurve(sp, testInputs) metrics['noiseRobustness'].append( np.trapz(np.flipud(np.mean(outputOverlapScore, 0)), noiseLevelList)) np.savez( './results/input_output_overlap/{}/epoch_{}'.format(expName, epoch), noiseLevelList, inputOverlapScore, outputOverlapScore) if expConfig.classification: # classify SDRs with noise noiseLevelList = np.linspace(0, 1.0, 21) classification_accuracy = classificationAccuracyVsNoise( sp, testInputs, noiseLevelList) metrics['classification'].append( np.trapz(classification_accuracy, noiseLevelList)) np.savez('./results/classification/{}/epoch_{}'.format(expName, epoch), noiseLevelList, classification_accuracy) # train SP here, # Learn is turned off at the first epoch to gather stats of untrained SP learn = False if epoch == 0 else True # randomize the presentation order of input vectors sdrOrders = np.random.permutation(np.arange(numInputVector)) activeColumnsTrain, meanBoostFactors = runSPOnBatch(sp, inputVectors, learn, sdrOrders) # run SP on test dataset and compute metrics activeColumnsPreviousEpoch = copy.copy(activeColumnsCurrentEpoch) activeColumnsCurrentEpoch, dum = runSPOnBatch(sp, testInputs, learn=False) stability = calculateStability(activeColumnsCurrentEpoch, activeColumnsPreviousEpoch) if (expConfig.changeDataSetContinuously or epoch == expConfig.changeDataSetAt): stability = float('nan') metrics['stability'].append(stability) metrics['sparsity'].append(np.mean(np.mean(activeColumnsCurrentEpoch, 1))) metrics['entropy'].append(calculateEntropy(activeColumnsCurrentEpoch)) # generate ideal SP outputs where all columns have the same activation prob. activeColumnsIdeal = np.random.rand(numInputVector, numColumns) > metrics['sparsity'][-1] metrics['maxEntropy'].append(calculateEntropy(activeColumnsIdeal)) connectedSynsPreviousEpoch = copy.copy(connectedSyns) sp.getConnectedCounts(connectedCounts) connectedSyns = getConnectedSyns(sp) metrics['meanBoostFactor'].append(np.mean(meanBoostFactors)) sp.getActiveDutyCycles(activeDutyCycle) metrics['numConnectedSyn'].append(np.sum(connectedCounts)) numNewSynapses = connectedSyns - connectedSynsPreviousEpoch numNewSynapses[numNewSynapses < 0] = 0 metrics['numNewSyn'].append(np.sum(numNewSynapses)) numEliminatedSynapses = connectedSynsPreviousEpoch - connectedSyns numEliminatedSynapses[numEliminatedSynapses < 0] = 0 metrics['numRemoveSyn'].append(np.sum(numEliminatedSynapses)) metrics['reconstructionError'].append( reconstructionError(sp, testInputs, activeColumnsCurrentEpoch)) metrics['witnessError'].append( witnessError(sp, testInputs, activeColumnsCurrentEpoch)) print tabulate(metrics, headers="keys") if expConfig.checkRFCenters: # check distribution of RF centers, useful to monitor recovery from trauma RFcenters, avgDistToCenter = getRFCenters(sp, params, type='connected') if expConfig.spatialImp == 'faulty_sp': aliveColumns = sp.getAliveColumns() else: aliveColumns = np.arange(numColumns) fig = plotReceptiveFieldCenter(RFcenters[aliveColumns, :], connectedCounts[aliveColumns], (params['nX'], params['nY'])) plt.savefig('figures/RFcenters/{}/epoch_{}.png'.format(expName, epoch)) plt.close(fig) np.savez('results/RFcenters/{}/epoch_{}'.format(expName, epoch), RFcenters, avgDistToCenter) if expConfig.checkInputSpaceCoverage: # check coverage of input space, useful to monitor recovery from trauma inputSpaceCoverage = calculateInputSpaceCoverage(sp) np.savez('results/InputCoverage/{}/epoch_{}'.format(expName, epoch), inputSpaceCoverage, connectedCounts) plt.figure(2) plt.clf() plt.imshow(inputSpaceCoverage, interpolation='nearest', cmap="jet") plt.colorbar() plt.savefig( 'figures/InputCoverage/{}/epoch_{}.png'.format(expName, epoch)) if expConfig.checkTestInput: RFcenters, avgDistToCenter = getRFCenters(sp, params, type='connected') inputIdx = 0 outputColumns = np.zeros((numColumns, 1), dtype=uintType) sp.compute(testInputs[inputIdx, :], False, outputColumns) activeColumns = np.where(outputColumns > 0)[0] fig = plotReceptiveFieldCenter(RFcenters[aliveColumns, :], connectedCounts[aliveColumns], (params['nX'], params['nY'])) plt.scatter(RFcenters[activeColumns, 0], RFcenters[activeColumns, 1], color='r') plt.savefig( 'figures/ResponseToTestInputs/{}/epoch_{}.png'.format(expName, epoch)) if expConfig.saveBoostFactors: np.savez('results/boostFactors/{}/epoch_{}'.format(expName, epoch), meanBoostFactors) if expConfig.showExampleRFs: fig = plotReceptiveFields2D(sp, params['nX'], params['nY']) plt.savefig('figures/exampleRFs/{}/epoch_{}'.format(expName, epoch)) plt.close(fig) if epoch in checkPoints: # inspect SP again inspectSpatialPoolerStats(sp, inputVectors, expName+"epoch{}".format(epoch)) epoch += 1 # plot stats over training fileName = 'figures/network_stats_over_training_{}.pdf'.format(expName) plotSPstatsOverTime(metrics, fileName) metrics['expName'] = expName pickle.dump(metrics, open('results/traces/{}/trace'.format(expName), 'wb')) plotReceptiveFields2D(sp, params['nX'], params['nY']) inspectSpatialPoolerStats(sp, inputVectors, inputVectorType + "afterTraining") plotExampleInputOutput(sp, inputVectors, expName + "final") return metrics, expName