示例#1
0
    def test_partitionModel3(self):
        """Test partitioning of a NxModel.

        After completion of the algorithm, the partitioner reconstructs the
        kernelIdMap from the synapses and axons generated during partitioning.
        An exception is thrown if the reconstructed map does not equal the
        original map.
        """

        inputShape = (73, 81, 3)
        inputLayer = NxInputLayer(inputShape)
        hiddenLayer = NxConv2D(11,
                               3,
                               strides=(2, 2),
                               padding='same',
                               validatePartitions=True)(inputLayer.input)
        hiddenLayer = NxAveragePooling2D(4,
                                         validatePartitions=True)(hiddenLayer)
        hiddenLayer = NxFlatten()(hiddenLayer)
        outputLayer = NxDense(50, validatePartitions=True)(hiddenLayer)

        model = NxModel(inputLayer.input, outputLayer)

        model.partition()

        model.clearTemp()
示例#2
0
    def test_partition1(self):
        """Test partitioning a single fully-connected layer."""

        inputShape = (3, 3, 2)
        inputLayer = NxInputLayer(inputShape)
        flattenLayer = NxFlatten()(inputLayer.input)
        outputLayer = NxDense(10, validatePartitions=True)
        model = NxModel(inputLayer.input, outputLayer(flattenLayer))

        model.partition()

        model.clearTemp()
示例#3
0
    def test_partition2(self):
        """Test partitioning two fully-connected layers."""

        inputShape = (40, 32, 2)
        inputLayer = NxInputLayer(inputShape)
        flattenLayer = NxFlatten()(inputLayer.input)
        hiddenLayer = NxDense(100, validatePartitions=True)
        outputLayer = NxDense(10, validatePartitions=True)
        model = NxModel(inputLayer.input,
                        outputLayer(hiddenLayer(flattenLayer)))

        model.partition()

        model.clearTemp()
示例#4
0
    def setUpDNN(self) -> ComposableDNN:
        """Sets up a DNN"""
        # Specify input shape of network.
        inputShape = (16, 16, 3)

        #################
        # BUILD NETWORK #
        #################

        inputLayer = NxInputLayer(inputShape)

        x = NxConv2D(4, (3, 3))(inputLayer.input)
        x = NxAveragePooling2D()(x)
        x = NxFlatten()(x)
        x = NxDense(10)(x)

        DNNModel = NxModel(inputLayer.input, x)

        composableDNNModel = ComposableDNN(model=DNNModel, num_steps_per_img=100)
        return composableDNNModel
示例#5
0
                 resetMode=reset_mode,
                 name=name)(layer)

name = 'pool3'
weights = np.load(os.path.join(path_weights, name + '.npy'))
shape = weights.shape + (filters, filters)
weights = np.broadcast_to(np.expand_dims(weights, (-2, -1)), shape)
layer = NxAveragePooling2D(2,
                           2,
                           compartmentKwargs=compartment_kwargs,
                           connectionKwargs=connection_kwargs,
                           resetMode=reset_mode,
                           name=name)(layer)
pool_weights[name] = [weights, biases]

layer = NxFlatten()(layer)

name = 'fc1'
weights = np.load(os.path.join(path_weights, name + '.npy'))
weights, num_weight_bits, weight_exponent, sign_mode = \
    Slayer2Loihi.optimizeWeightBits(weights)
conn_kwargs = connection_kwargs.copy()
conn_kwargs['numWeightBits'] = int(num_weight_bits)
conn_kwargs['weightExponent'] = int(weight_exponent)
conn_kwargs['signMode'] = sign_mode
weights = weights.transpose()

# Switching from x-y-p co-ordinates to a fully connected layer requires
# re-ordering
shape = (8, 8, 32)
idxs = np.arange(int(np.prod(shape)))
# Get SLAYER weights.
path_weights = os.path.join(Slayer2Loihi.getModels(), '02_NMNIST', 'Trained')

# MODEL #
#########

# INPUT LAYER
input_layer = NxInputLayer(input_shape_flat,
                           resetMode=reset_mode,
                           compartmentKwargs=compartment_kwargs_input,
                           connectionKwargs=connection_kwargs,
                           inputMode=InputModes.AEDAT)

# The flatten layer is just needed to remove the dummy axes (1, ..., 1) in the
# input layer.
layer = NxFlatten()(input_layer.input)

# HIDDEN LAYER
name = 'NMNISTFc1'
weights = np.load(os.path.join(path_weights, name + '.npy'))

# SLAYER helper function to determine how to encode weights efficiently.
weights, num_weight_bits, weight_exponent, sign_mode = \
    Slayer2Loihi.optimizeWeightBits(weights)
# Apply optimized connection settings.
conn_kwargs = connection_kwargs.copy()
conn_kwargs['numWeightBits'] = int(num_weight_bits)
conn_kwargs['weightExponent'] = int(weight_exponent)
conn_kwargs['signMode'] = sign_mode

# Weight matrix needs to be transposed when going from SLAYER (Pytorch) to
示例#7
0
    def test_Flatten(self):
        """Test correlation between ANN activations and SNN spikerates.

        The network consists of a 3D input layer, followed by a flatten layer,
        which has 1-to-1 connections to the output layer. The input pixel
        values are set to ascending integers in Fortran style.

        This test asserts that the spikerates in the output layer are close to
        the ANN activations, by computing the Pearson correlation coefficient.
        A perfect correlation cannot be expected due to quantization errors
        when approximating ANN activations with discrete spikes. However,
        correlations should be higher than 0.99.
        """

        visualizePartitions = False
        doPlot = False

        # Height, width, depth
        inputShape = (3, 4, 5)

        numInputNeurons = int(np.asscalar(np.prod(inputShape)))
        numOutputNeurons = numInputNeurons - 1
        inputScale = 255

        thrToInputRatio = 2**7
        thrGain = 2**6

        # No need to divide by thrGain because spike input receives equal gain.
        vThMant = 1

        vThMantInput = thrToInputRatio * inputScale // thrGain

        maxNumSpikes = 100
        numSteps = thrToInputRatio * maxNumSpikes

        weights = np.eye(numInputNeurons, numOutputNeurons, dtype=int)
        biases = np.zeros(numOutputNeurons, int)

        nxInput = NxInputLayer(inputShape,
                               vThMant=vThMantInput,
                               visualizePartitions=visualizePartitions)
        nxLayer = NxDense(numOutputNeurons,
                          weights=[weights, biases],
                          vThMant=vThMant,
                          validatePartitions=True,
                          probeSpikes=True)
        nxModel = NxModel(nxInput.input, nxLayer(NxFlatten()(nxInput.input)))
        nxModel.compileModel()

        kerasInput = Input(inputShape)
        kerasLayer = Dense(numOutputNeurons,
                           weights=[weights, biases])(Flatten()(kerasInput))
        kerasModel = Model(kerasInput, kerasLayer)

        # Define probes to read out currents.
        sProbes = []
        for i in range(numOutputNeurons):
            sProbes.append(nxLayer[i].probe(ProbableStates.ACTIVITY))

        # Set bias currents
        inputImage = np.reshape(np.arange(numInputNeurons), inputShape, 'F')
        inputImage = inputImage % 255
        for i, b in enumerate(np.ravel(inputImage, 'F')):
            nxInput[i].biasMant = b
            nxInput[i].phase = 2

        nxModel.run(numSteps)
        nxModel.disconnect()

        data = extract(sProbes)
        spikecount = _data_to_img(data // 127, nxLayer.output_shape[1:])
        spikerates = spikecount / numSteps * thrToInputRatio

        batchInputImage = np.expand_dims(inputImage, 0)
        activations = \
            kerasModel.predict(batchInputImage)[0] / (vThMant * thrGain)

        if doPlot:

            plt.figure(3)
            plt.imshow(normalize_image_dims(inputImage))
            plt.show()

            plt.figure(6)
            plt.plot(activations.flatten(), spikerates.flatten(), '.')
            plt.show()

            plt.figure(7)
            plt.imshow(normalize_image_dims(activations))
            plt.show()

            plt.figure(8)
            plt.imshow(normalize_image_dims(spikerates))
            plt.show()

        corr = np.corrcoef(np.ravel(spikerates), np.ravel(activations))[0, 1]

        self.assertAlmostEqual(corr,
                               1,
                               2,
                               msg="Correlation between ANN activations "
                               "and SNN spikerates is too low.")
示例#8
0
def runCorrelationRandom(layer,
                         vThMant,
                         insertFlatten=False,
                         inputShape=None,
                         logger=None):
    """Run network to test correlation between ANN and SNN.

    :param NxLayer | Layer layer: NxLayer to test.
    :param int vThMant: Threshold of ``layer``; used to scale activations.
    :param bool insertFlatten: Whether to flatten input before applying it to
        ``layer``.
    :param np.ndarray | tuple | list inputShape: Shape of input to the network.
    :param logging.Logger logger: Logger.

    :return: Pearson correlation coefficient of ANN activations and SNN rates.
    :rtype: float
    """

    seed = 123
    np.random.seed(seed)

    visualizePartitions = False
    plotUV = False

    if inputShape is None:
        inputShape = (7, 7, 1)
    numInputNeurons = int(np.asscalar(np.prod(inputShape)))
    inputScale = numInputNeurons - 1

    thrToInputRatio = 2**7
    thrGain = 2**0

    vThMantInput = thrToInputRatio * inputScale // thrGain

    maxNumSpikes = 100
    numSteps = thrToInputRatio * maxNumSpikes

    inputImage = np.random.randint(0, inputScale, inputShape)

    inputLayer = NxInputLayer(batch_input_shape=(1, ) + inputShape,
                              vThMant=vThMantInput,
                              visualizePartitions=visualizePartitions)

    out = layer(NxFlatten()(inputLayer.input)) \
        if insertFlatten else layer(inputLayer.input)

    model = NxModel(inputLayer.input, out, logger=logger)

    model.compileModel()

    outputShape = layer.output_shape[1:]

    # Define probes to read out currents.
    vProbes0 = []
    for i in range(numInputNeurons):
        vProbes0.append(inputLayer[i].probe(ProbableStates.VOLTAGE))

    vProbes = []
    sProbes = []
    for i in range(int(np.asscalar(np.prod(outputShape)))):
        vProbes.append(layer[i].probe(ProbableStates.VOLTAGE))
        sProbes.append(layer[i].probe(ProbableStates.ACTIVITY))

    # Set bias currents
    for i, b in enumerate(np.ravel(inputImage, 'F')):
        inputLayer[i].biasMant = b
        inputLayer[i].phase = 2

    model.run(numSteps)
    model.disconnect()

    data = extract(sProbes)
    spikecount = _data_to_img(data // 127, outputShape)
    spikerates = spikecount / numSteps * thrToInputRatio

    batchInputImage = np.expand_dims(inputImage, 0)
    activations = model.predict(batchInputImage)[0] / (vThMant * thrGain)

    if plotUV:
        plt.figure(1)
        _plot_stimulus_response(vProbes0, [])
        plt.show()

        plt.figure(2)
        _plot_stimulus_response(vProbes, sProbes)
        plt.show()

        plt.figure(3)
        plt.imshow(normalize_image_dims(inputImage))
        plt.show()

        plt.figure(6)
        plt.plot(activations.flatten(), spikerates.flatten(), '.')
        plt.show()

        plt.figure(7)
        plt.imshow(normalize_image_dims(activations))
        plt.show()

        plt.figure(8)
        plt.imshow(normalize_image_dims(spikerates))
        plt.show()

    return np.corrcoef(np.ravel(spikerates), np.ravel(activations))[0, 1]