dataTrain = data.Iterator(
        dataSamples=hypData.spectraPrep[:trainSamples, :],
        targets=hypData.spectraPrep[:trainSamples, :],
        batchSize=1000)
    dataVal = data.Iterator(
        dataSamples=hypData.spectraPrep[trainSamples:trainSamples +
                                        valSamples, :],
        targets=hypData.spectraPrep[trainSamples:trainSamples + valSamples, :])

    # shuffle training data
    dataTrain.shuffle()

    # setup a fully-connected autoencoder neural network with 3 encoder layers
    net_mlp = autoencoder.mlp_1D_network(inputSize=hypData.numBands,
                                         encoderSize=[50, 30, 10, 3],
                                         activationFunc='relu',
                                         weightInitOpt='truncated_normal',
                                         tiedWeights=None,
                                         skipConnect=False)

    # setup a convolutional autoencoder neural network with 3 conv encoder layers
    net_cnn = autoencoder.cnn_1D_network(inputSize=hypData.numBands,
                                         zDim=3,
                                         encoderNumFilters=[10, 10, 10],
                                         encoderFilterSize=[20, 10, 10],
                                         activationFunc='relu',
                                         weightInitOpt='truncated_normal',
                                         encoderStride=[1, 1, 1],
                                         tiedWeights=None,
                                         skipConnect=False)

    # setup a training operation for each network (using the same loss function)
from deephyp import data

if __name__ == '__main__':

    # read data into numpy array
    mat = scipy.io.loadmat('PaviaU.mat')
    img = mat['paviaU']

    # create a hyperspectral dataset object from the numpy array
    hypData = data.HypImg(img)

    # pre-process data to make the model easier to train
    hypData.pre_process('minmax')

    # setup each network from the config files
    net_mlp = autoencoder.mlp_1D_network(configFile=os.path.join(
        'models', 'test_ae_comparison_mlp', 'config.json'))
    net_cnn = autoencoder.cnn_1D_network(configFile=os.path.join(
        'models', 'test_ae_comparison_cnn', 'config.json'))

    # assign previously trained parameters to the network, and name each model
    net_mlp.add_model(addr=os.path.join('models', 'test_ae_comparison_mlp',
                                        'epoch_100'),
                      modelName='mlp_100')
    net_cnn.add_model(addr=os.path.join('models', 'test_ae_comparison_cnn',
                                        'epoch_10'),
                      modelName='cnn_10')

    # feed forward hyperspectral dataset through each encoder model (get latent encoding)
    dataZ_mlp = net_mlp.encoder(modelName='mlp_100',
                                dataSamples=hypData.spectraPrep)
    dataZ_cnn = net_cnn.encoder(modelName='cnn_10',
コード例 #3
0
from deephyp import data

if __name__ == '__main__':

    # read data into numpy array
    mat = scipy.io.loadmat('PaviaU.mat')
    img = mat['paviaU']

    # create a hyperspectral dataset object from the numpy array
    hypData = data.HypImg(img)

    # pre-process data to make the model easier to train
    hypData.pre_process('minmax')

    # setup a network from a config file
    net = autoencoder.mlp_1D_network(
        configFile=os.path.join('models', 'test_ae_mlp_sid', 'config.json'))

    # assign previously trained parameters to the network, and name model
    net.add_model(addr=os.path.join('models', 'test_ae_mlp_sid', 'epoch_100'),
                  modelName='sid_100')

    # feed forward hyperspectral dataset through encoder (get latent encoding)
    dataZ = net.encoder(modelName='sid_100', dataSamples=hypData.spectraPrep)

    # feed forward latent encoding through decoder (get reconstruction)
    dataY = net.decoder(modelName='sid_100', dataZ=dataZ)

    #--------- visualisation ----------------------------------------

    # download dataset ground truth pixel labels (if already downloaded, comment this out)
    urlretrieve('http://www.ehu.eus/ccwintco/uploads/5/50/PaviaU_gt.mat',
コード例 #4
0
    hypData.pre_process( 'minmax' )

    # create data iterator objects for training and validation using the pre-processed data
    trainSamples = 200000
    valSamples = 100
    dataTrain = data.Iterator( dataSamples=hypData.spectraPrep[:trainSamples, :],
                              targets=hypData.spectraPrep[:trainSamples, :], batchSize=1000 )
    dataVal = data.Iterator( dataSamples=hypData.spectraPrep[trainSamples:trainSamples+valSamples, :],
                            targets=hypData.spectraPrep[trainSamples:trainSamples+valSamples, :] )

    # shuffle training data
    dataTrain.shuffle()

    # setup a fully-connected autoencoder neural network with 3 encoder layers
    net = autoencoder.mlp_1D_network( inputSize=hypData.numBands, encoderSize=[50,30,10], activationFunc='sigmoid',
                                      weightInitOpt='truncated_normal', tiedWeights=None, skipConnect=False,
                                      activationFuncFinal='sigmoid' )

    # setup a training operation for the network
    net.add_train_op( name='sid', lossFunc='SID', learning_rate=1e-3, decay_steps=None, decay_rate=None,
                      method='Adam', wd_lambda=0.0 )

    # create a directory to save the learnt model
    model_dir = os.path.join('models','test_ae_mlp_sid')
    if os.path.exists(model_dir):
        # if directory already exists, delete it
        shutil.rmtree(model_dir)
    os.mkdir(model_dir)

    # train the network for 100 epochs, saving the model at epoch 50 and 100
    net.train(dataTrain=dataTrain, dataVal=dataVal, train_op_name='sid', n_epochs=100, save_addr=model_dir,