Example #1
0
def passConvertToFPGALayers(pipeline):
    "Convert supported layers to their FPGA dataflow layer equivalents."
    ret = []
    numChanges = 0
    for L in pipeline:
        if L.get_type().startswith("FPGA"):
            # already FPGA layer -- copy as-is
            ret += [L]
        elif layers_base.isMatrixThresholdLayer(L):
            if layers_base.isFCLayer(L.mlayer):
                # TODO the conditions need to be more specific (bipolar
                # weights)
                ret += [layers_fpga.FPGABipolarMatrixThresholdLayer(L)]
            elif layers_base.isConvLayer(L.mlayer):
                # TODO the conditions need to be more specific (bipolar
                # weights)
                ret += [layers_fpga.FPGABipolarConvThresholdLayer(L)]
            else:
                raise Exception(
                    "Unsupported matrix-threshold combination for FPGA backend"
                )
            numChanges += 1
        elif layers_base.isFCLayer(L):
            # TODO the conditions need to be more specific (bipolar input and
            # weights)
            ret += [layers_fpga.FPGABipolarMatrixLayer(L)]
            numChanges += 1
        elif layers_base.isPoolingLayer(L):
            ret += [layers_fpga.FPGAMaxPoolLayer(L)]
        else:
            raise Exception("Unsupported layer type in FPGA backend: %s" %
                            L.get_type())
    return (ret, numChanges)
Example #2
0
 def __str__(self):
     strbuf = 'NSWG: \n'
     strbuf += '{0:>8} {1:>8}\n'.format('MatrixH', 'MatrixW')
     for i in range(len(self.net.layers)):
         if lb.isMatrixLayer(self.net.layers[i]) or lfpga.isFPGAMatrixLayer(
                 self.net.layers[i]):
             strbuf += '{0:>8} {1:>8}\n'.format(str(self.matrixH[i]),
                                                str(self.matrixW[i]))
     strbuf += '\n'
     strbuf += '{0:>8} {1:>8}\n'.format('Synaptic', 'Neuron')
     for i in range(len(self.net.layers)):
         if lb.isMatrixLayer(self.net.layers[i]) or lfpga.isFPGAMatrixLayer(
                 self.net.layers[i]):
             strbuf += '{0:>8} {1:>8}\n'.format(self.synapse_fold[i],
                                                self.neuron_fold[i])
     strbuf += '\n'
     strbuf += '{0:>16} {1:>20} {2:>20} {3:>20} {4:>20}\n'.format(
         'Initial Buffer', 'Write Block Cycles', 'Read Block Cycles',
         'Total Cycles', 'Input Multiplier')
     for i in range(len(self.net.layers)):
         if lb.isConvLayer(self.net.layers[i]) or lfpga.isFPGAMatrixLayer(
                 self.net.layers[i]):
             strbuf += '{0:>16} {1:>20} {2:>20} {3:>20} {4:>20}\n'.format(
                 self.initial_buffer[i], self.write_block_cycles[i],
                 self.read_block_cycles[i], self.total_cycles[i],
                 self.input_multiplier[i])
     return strbuf
Example #3
0
def passConvertToCaffeLayers(pipeline, qnnengine):
    "Convert layers to corresponding Caffe-equivalent implementation layers."
    inStages = pipeline
    ret = []
    default_engine_maxbits = 4
    gemmlowp_maxbits = 8
    if qnnengine == "float":
        # force all layers to be generated as vanilla Caffe layers
        default_engine_maxbits = 0
        gemmlowp_maxbits = 0
    # note that layer, in and out buf names are empty -- we'll set those later
    for L in inStages:
        if lb.isFCLayer(L):
            if (L.wbits * L.ibits) <= default_engine_maxbits:
                ret += [lcaffe.CaffeIntegerInnerProductLayer("", L, qnnengine)]
            elif L.wbits <= gemmlowp_maxbits:
                ret += [
                    lcaffe.CaffeIntegerInnerProductLayer("", L, "gemmlowp")
                ]
            else:
                ret += [lcaffe.CaffeInnerProductLayer("", L)]
        elif lb.isConvLayer(L):
            if (L.wbits * L.ibits) <= default_engine_maxbits:
                ret += [lcaffe.CaffeIntegerConvolutionLayer("", L, qnnengine)]
            elif L.wbits <= gemmlowp_maxbits:
                ret += [lcaffe.CaffeIntegerConvolutionLayer("", L, "gemmlowp")]
            else:
                ret += [lcaffe.CaffeConvolutionLayer("", L)]
        elif lb.isPoolingLayer(L):
            ret += [lcaffe.CaffePoolLayer("", L)]
        elif lb.isThresholdLayer(L):
            ret += [lcaffe.CaffeMultiThresholdLayer("", L)]
        elif lb.isLinearLayer(L):
            ret += [lcaffe.CaffeScaleLayer("", L)]
        elif lb.isReLULayer(L):
            ret += [lcaffe.CaffeReLULayer("")]
        elif lb.isSoftmaxLayer(L):
            ret += [lcaffe.CaffeSoftmaxLayer("")]
        elif lcaffe.isCaffeLayer(L):
            ret += [L]
        else:
            raise Exception("Unsupported layer type for Caffe backend: %s" %
                            L.get_type())

    return (ret, 0)
Example #4
0
 def __init__(self, mtl):
     # copy all attributes from base MatrixThresholdLayer
     self.__dict__.update(mtl.__dict__)
     # TODO checking width is not enough -- also need to check encoding
     # (bipolar/regular)
     if self.wbits != 1:
         raise Exception("Only binarized weights supported")
     if self.ibits > 8:
         raise Exception("Only sub-8-bit activations supported")
     if not lb.isConvLayer(self.mlayer):
         raise Exception(
             "FPGABipolarConvThresholdLayer needs conv as matrix layer")
     # make sure weight array really is bipolar
     if not isBipolarMatrix(self.mlayer.W):
         raise Exception("Non-bipolar elements found in weight matrix")
     # ConvolutionMMVInputGenerator needs a fix to handle uneven stride
     if self.mlayer.k % self.mlayer.stride != 0:
         raise Exception(
             "ConvolutionMMVInputGenerator currently needs window mod stride == 0"
         )
     self.pe = 1
     self.simd = 1
     self.mmv = 1
Example #5
0
def passInterleaveChannels(pipeline):
    """Interleave the weight matrices of all convolutional layers, plus the first
    subsequent fully-connected layer."""
    ret = []
    numChanges = 0
    # whether the inputs to the current layer were interleaved
    last_output_interleaved = 0
    # the interleave factor for the inputs to the current layer
    last_output_interleave_factor = 0
    for L in pipeline:
        if lb.isConvLayer(L):
            # interleave the conv weight matrix
            # originally, we assume the data layout is [ofm][ifm][k][k]
            W = L.W.reshape(L.ofm, L.ifm, L.get_filter_dim(),
                            L.get_filter_dim())
            # transpose the weight tensor to be [ofm][k][k][ifm]
            W = W.transpose(0, 2, 3, 1)
            # put back into matrix form and set layer weight matrix
            L.W = W.reshape(L.ofm, -1)
            ret += [L]
            last_output_interleaved = 1
            last_output_interleave_factor = L.ofm
        elif lb.isFCLayer(L) and last_output_interleaved == 1:
            # interleave the columns in the weight matrix of the first FC
            # layer
            r = L.W.shape[0]
            c = L.W.shape[1]
            W = L.W.reshape(r, last_output_interleave_factor, -1)
            L.W = W.transpose(0, 2, 1).reshape(r, c)
            ret += [L]
            # output is no longer interleaved
            last_output_interleaved = 0
        else:
            # copy layer as-is
            ret += [L]
    return (ret, numChanges)