Exemplo n.º 1
0
def buildTrainerSAENetwork(train,
                           kernelConv,
                           kernelSizeConv,
                           downsampleConv,
                           learnConv,
                           momentumConv,
                           dropoutConv,
                           neuronFull,
                           learnFull,
                           momentumFull,
                           dropoutFull,
                           prof=None):
    from operator import mul
    from numpy.random import RandomState
    rng = RandomState(int(time()))

    # create the stacked network -- LeNet-5 (minus the output layer)
    network = TrainerSAENetwork(train, prof=prof)

    if log is not None:
        log.info('Initialize the Network')

    # prepare for the next layer
    def prepare(network, count):
        return (count + 1, network.getNetworkOutputSize())

    layerCount = 1
    layerInputSize = train.eval().shape[1:]
    for k, ks, do, l, m, dr in zip(kernelConv, kernelSizeConv, downsampleConv,
                                   learnConv, momentumConv, dropoutConv):
        # add a convolutional layer as defined
        network.addLayer(
            ConvolutionalAutoEncoder(layerID='conv' + str(layerCount),
                                     inputSize=layerInputSize,
                                     kernelSize=(k, layerInputSize[1], ks, ks),
                                     downsampleFactor=[do, do],
                                     dropout=dr,
                                     learningRate=l,
                                     randomNumGen=rng))

        # prepare for the next layer
        layerCount, layerInputSize = prepare(network, layerCount)

    # update to transition for fully connected layers
    layerInputSize = (layerInputSize[0], reduce(mul, layerInputSize[1:]))
    for n, l, m, dr in zip(neuronFull, learnFull, momentumFull, dropoutFull):
        # add a fully-connected layer as defined
        network.addLayer(
            ContiguousAutoEncoder(layerID='fully' + str(layerCount),
                                  inputSize=layerInputSize,
                                  numNeurons=n,
                                  learningRate=l,
                                  dropout=dr,
                                  randomNumGen=rng))

        # prepare for the next layer
        layerCount, layerInput, layerInputSize = prepare(network, layerCount)

    return network
Exemplo n.º 2
0
def createNetwork(image, log=None):
    from nn.net import ClassifierNetwork
    global chips, regions

    # divide the image into chips
    chips, regions = subdivideImage(image, options.chipSize, 5,
                                    options.batchSize, False)
    print('Chips Cut: ' + str(chips.shape))
    prof = Profiler(log=log,
                    name='profile',
                    profFile='./likelinessFinder_FullScan-profile.xml')

    # load a previously created network
    if options.synapse is not None:
        if log is not None:
            log.info('Loading Network from Disk...')
        network = ClassifierNetwork(options.synapse, prof)

    # create a newly trained network on the specified image
    else:
        import time
        from ae.net import TrainerSAENetwork
        from nn.datasetUtils import toShared
        from ae.contiguousAE import ContiguousAutoEncoder
        from numpy.random import RandomState
        from operator import mul

        if log is not None:
            log.info('Training new Network...')

        # create a random number generator for efficiency
        rng = RandomState(int(time.time()))

        if log is not None:
            log.info('Intializing the SAE...')

        # create the SAE
        network = TrainerSAENetwork((toShared(chips, True), None), prof=prof)
        '''
        # add convolutional layers
        network.addLayer(ConvolutionalAutoEncoder(
            layerID='c1', inputSize=chips.shape[1:], 
            kernelSize=(options.kernel,chips.shape[2],5,5),
            downsampleFactor=(2,2), randomNumGen=rng,
            learningRate=options.learnC))
        network.addLayer(ConvolutionalAutoEncoder(
            layerID='c2', inputSize=network.getNetworkOutputSize(), 
            kernelSize=(options.kernel,options.kernel,5,5),
            downsampleFactor=(2,2), randomNumGen=rng,
            learningRate=options.learnC))

        # add fully connected layers
        numInputs = reduce(mul, network.getNetworkOutputSize()[1:])
        network.addLayer(ContiguousAutoEncoder(
            layerID='f3', 
            inputSize=(network.getNetworkOutputSize()[0], numInputs),
            numNeurons=int(options.hidden*1.5),
            learningRate=options.learnF, randomNumGen=rng))
        '''
        from theano.tensor import tanh
        network.addLayer(
            ContiguousAutoEncoder(layerID='f1',
                                  inputSize=(chips.shape[1],
                                             reduce(mul, chips.shape[2:])),
                                  numNeurons=500,
                                  learningRate=options.learnF,
                                  activation=tanh,
                                  contractionRate=options.contrF,
                                  randomNumGen=rng))
        network.addLayer(
            ContiguousAutoEncoder(layerID='f2',
                                  inputSize=network.getNetworkOutputSize(),
                                  numNeurons=200,
                                  learningRate=options.learnF,
                                  activation=tanh,
                                  contractionRate=options.contrF,
                                  randomNumGen=rng))
        network.addLayer(
            ContiguousAutoEncoder(layerID='f3',
                                  inputSize=network.getNetworkOutputSize(),
                                  numNeurons=100,
                                  learningRate=options.learnF,
                                  activation=tanh,
                                  contractionRate=options.contrF,
                                  randomNumGen=rng))
        network.addLayer(
            ContiguousAutoEncoder(layerID='f4',
                                  inputSize=network.getNetworkOutputSize(),
                                  numNeurons=50,
                                  learningRate=options.learnF,
                                  activation=tanh,
                                  contractionRate=options.contrF,
                                  randomNumGen=rng))

        if log is not None:
            log.info('Entering Training...')

        network.writeWeights(0, -1)
        # TODO: this could make for a great demo visual to create a blinking
        #       image of the chips which are currently being activated
        globalEpoch = 0
        for layerIndex in range(network.getNumLayers()):
            for ii in range(4):
                '''
                globalEpoch, globalCost = network.trainEpoch(
                    layerIndex, globalEpoch, options.numEpochs)
                '''
                globCost = []
                for localEpoch in range(options.numEpochs):
                    layerEpochStr = 'Layer[' + str(layerIndex) + '] Epoch[' + \
                                    str(globalEpoch + localEpoch) + ']'
                    print('Running ' + layerEpochStr)
                    locCost = []
                    for ii in range(chips.shape[0]):
                        locCost.append(network.train(layerIndex, ii))

                    locCost = np.mean(locCost, axis=0)
                    if isinstance(locCost, tuple):
                        print(layerEpochStr + ' Cost: ' + \
                              str(locCost[0]) + ' - Jacob: ' + \
                              str(locCost[1]))
                    else:
                        print(layerEpochStr + ' Cost: ' + str(locCost))
                    globCost.append(locCost)

                    if layerIndex == 0:
                        network.writeWeights(layerIndex,
                                             globalEpoch + localEpoch)
                globalEpoch = globalEpoch + options.numEpochs
                network.save('kirtland_afb_neurons500_layer' + \
                             str(layerIndex) + '_epoch' + str(globalEpoch) + \
                             '.pkl.gz')
        #network.trainGreedyLayerwise(options.numEpochs)

        # save trained network -- just in case
        if log is not None:
            log.info('Saving Trained Network...')
        network.save(os.path.basename(options.image).replace(
                        '.png', '_preTrainedSAE_epoch' + \
                        str(options.numEpochs) + '.pkl.gz'))

        # cast to the correct network type
        network.__class__ = ClassifierNetwork

    return network
Exemplo n.º 3
0
def buildSAENetwork(network,
                    inputSize,
                    kernelConv,
                    kernelSizeConv,
                    downsampleConv,
                    learnConv,
                    momentumConv,
                    dropoutConv,
                    neuronFull,
                    learnFull,
                    momentumFull,
                    dropoutFull,
                    log=None):
    '''Build the network in an automated way.'''
    from ae.convolutionalAE import ConvolutionalAutoEncoder
    from ae.contiguousAE import ContiguousAutoEncoder
    from six.moves import reduce
    from operator import mul
    from numpy.random import RandomState
    from time import time
    rng = RandomState(int(time()))

    if log is not None:
        log.info('Initialize the Network')

    def prepare(network, count):
        return (count + 1, network.getNetworkOutputSize())

    layerCount = 1
    layerInputSize = inputSize
    for k, ks, do, l, m, dr in zip(kernelConv, kernelSizeConv, downsampleConv,
                                   learnConv, momentumConv, dropoutConv):
        # add a convolutional layer as defined
        network.addLayer(
            ConvolutionalAutoEncoder(layerID='conv' + str(layerCount),
                                     inputSize=layerInputSize,
                                     kernelSize=(k, layerInputSize[1], ks, ks),
                                     downsampleFactor=[do, do],
                                     dropout=dr,
                                     learningRate=l,
                                     randomNumGen=rng))

        # prepare for the next layer
        layerCount, layerInputSize = prepare(network, layerCount)

    # update to transition for fully connected layers
    layerInputSize = (layerInputSize[0], reduce(mul, layerInputSize[1:]))
    for n, l, m, dr in zip(neuronFull, learnFull, momentumFull, dropoutFull):
        # add a fully-connected layer as defined
        network.addLayer(
            ContiguousAutoEncoder(layerID='fully' + str(layerCount),
                                  inputSize=layerInputSize,
                                  numNeurons=n,
                                  learningRate=l,
                                  dropout=dr,
                                  randomNumGen=rng))

        # prepare for the next layer
        layerCount, layerInputSize = prepare(network, layerCount)

    return network
Exemplo n.º 4
0
    # create the stacked network -- LeNet-5 (minus the output layer)
    network = Net(train, prof=prof)

    if options.synapse is not None:
        # load a previously saved network
        network.load(options.synapse)
    else:
        log.info('Initializing Network...')

        import theano.tensor as t
        network.addLayer(
            ContiguousAutoEncoder(
                layerID='f1',
                inputSize=(trainShape[1], reduce(mul, trainShape[2:])),
                numNeurons=options.neuron,
                learningRate=options.learnF,
                dropout=1.,  #.8 if options.dropout else 1.,
                activation=t.nnet.sigmoid,
                randomNumGen=rng))
        network.addLayer(
            ContiguousAutoEncoder(
                layerID='f2',
                inputSize=(network.getNetworkOutputSize()[0],
                           reduce(mul,
                                  network.getNetworkOutputSize()[1:])),
                numNeurons=options.neuron,
                learningRate=options.learnF,
                dropout=1.,
                activation=t.nnet.sigmoid,
                randomNumGen=rng))
        '''
Exemplo n.º 5
0
        # refactor the output to be (numImages*numKernels,1,numRows,numCols)
        # this way we don't combine the channels kernels we created in 
        # the first layer and destroy our dimensionality
        network.addLayer(ConvolutionalAutoEncoder(
            layerID='c2', inputSize=network.getNetworkOutputSize(), 
            kernelSize=(options.kernel,options.kernel,5,5),
            downsampleFactor=(2,2), randomNumGen=rng,
            dropout=.5 if options.dropout else 1.,
            learningRate=options.learnC))

        # add fully connected layers
        network.addLayer(ContiguousAutoEncoder(
            layerID='f3', 
            inputSize=(network.getNetworkOutputSize()[0], 
                       reduce(mul, network.getNetworkOutputSize()[1:])),
            numNeurons=options.neuron, learningRate=options.learnF,
            dropout=.5 if options.dropout else 1.,
            randomNumGen=rng))

        # the final output layer is removed from the normal NN --
        # the output layer is special, as it makes decisions about
        # patterns identified in previous layers, so it should only
        # be influenced/trained during supervised learning. 

    # train the SAE for unsupervised pattern recognition
    bestNetwork = trainUnsupervised(network, __file__, options.data, 
                                    numEpochs=options.limit, stop=options.stop, 
                                    synapse=options.synapse, base=options.base, 
                                    dropout=options.dropout, 
                                    learnC=options.learnC,