예제 #1
0
def getDataFrame(url):
    runs = Utils.ResultsManager().load(url)
    raw = [(run.trainingAccuracies, run.learningRate, run.elapsed, run.step, run.validationAccuracies) for run in runs]
    maxLength = max([len(x[0]) for x in raw])
    trainingAccuracies = pd.DataFrame(index=range(maxLength))
    validationAccuracies = pd.DataFrame(index=range(maxLength))
    for i in range(len(raw)):
        toAdd = maxLength - len(raw[i][0])
        ta = raw[i][0]
        va = raw[i][4]
        if toAdd > 0:
            ta.extend([None]*toAdd)
            va.extend([None]*toAdd)
        trainingAccuracies['{0:.2f}'.format(raw[i][1])] = pd.Series(ta)
        validationAccuracies['{0:.2f}'.format(raw[i][1])] = pd.Series(va)
        
    runs = Utils.ResultsManager().load(url)
    steps = [x.step for x in runs]
    elapsed = ['{0:.2f}s'.format(x.elapsed) for x in runs]
    learningRate = ['{0:.2f}'.format(x.learningRate) for x in runs]
    trainingAccuracy = ['{0:.4f}'.format(x.trainingAccuracy) for x in runs]
    validationAccuracy = ['{0:.4f}'.format(x.validationAccuracies[-1:][0]) for x in runs]
    testAccuracy = ['{0:.4f}'.format(x.testAccuracy) for x in runs] 
    
    return pd.DataFrame({
            'Steps':steps,
            'Learning Rate':learningRate,
            'Elapsed (seconds)':elapsed,
            'Training Accuracy (%)':trainingAccuracy,
            'Validation Accuracy (%)':validationAccuracy,
            'Test Accuracy (%)':testAccuracy,
    }), trainingAccuracies, validationAccuracies
예제 #2
0
        run.trainingAccuracies = trainingAccuracies
        runs.append(run)
    return runs


'''MNIST'''
print('')
print('Running MNIST (relu)')
dataProvider = MNIST.DataProvider()
data = dataProvider.get()
configuration.numberOfPixelsPerImage = data.train.images.shape[1]
configuration.numberOfClassLabels = data.train.labels.shape[1]
configuration.activationFunction = tf.nn.relu
plt.imshow(dataProvider.getSingleImageFromTrainingSet()[1], cmap="gist_gray")
runs = createAndRunModel(data, configuration)
Utils.ResultsManager().save('m2.mnist.relu.json', runs)
runs = Utils.ResultsManager().load('m2.mnist.relu.json')
plt.plot(range(len(runs[0].validationAccuracies)), runs[0].validationAccuracies)

'''MNIST'''
print('')
print('Running MNIST (sigmoid)')
dataProvider = MNIST.DataProvider()
data = dataProvider.get()
configuration.numberOfPixelsPerImage = data.train.images.shape[1]
configuration.numberOfClassLabels = data.train.labels.shape[1]
configuration.activationFunction = tf.nn.sigmoid
plt.imshow(dataProvider.getSingleImageFromTrainingSet()[1], cmap="gist_gray")
runs = createAndRunModel(data, configuration)
Utils.ResultsManager().save('m2.mnist.sigmoid.json', runs)
runs = Utils.ResultsManager().load('m2.mnist.sigmoid.json')
예제 #3
0
    def train(self, data):
        configuration = types.SimpleNamespace()
        configuration.optimizer = 'adam'
        configuration.loss = 'binary_crossentropy'
        configuration.epochs = self.epochs
        configuration.batchSize = self.batchSize
        configuration.trainSize = data.train.num_examples
        configuration.validationSize = data.validation.num_examples

        numberOfInputs = data.train.images.shape[1]
        numberEncoding = int(numberOfInputs / 2)
        preNumberEncoding = 2 * int(numberOfInputs / 3)

        autoencoder = Sequential()

        # Encoder Layers
        autoencoder.add(
            Dense(preNumberEncoding,
                  input_shape=(numberOfInputs, ),
                  activation='relu'))
        autoencoder.add(Dense(numberEncoding, activation='relu'))

        # Decoder Layers
        autoencoder.add(Dense(preNumberEncoding, activation='relu'))
        autoencoder.add(Dense(numberOfInputs, activation='sigmoid'))

        # Extract Encoder
        input_img = Input(shape=(numberOfInputs, ))
        encoder_layer1 = autoencoder.layers[0]
        encoder_layer2 = autoencoder.layers[1]
        encoder = Model(input_img, encoder_layer2(encoder_layer1(input_img)))

        # Extract Decoder
        input_img = Input(shape=(numberEncoding, ))
        decoder_layer1 = autoencoder.layers[2]
        decoder_layer2 = autoencoder.layers[3]
        decoder = Model(input_img, decoder_layer2(decoder_layer1(input_img)))

        #        # Setup Architecture
        #        input_img = Input(shape=(numberOfInputs,))
        #        encoded = Dense(numberEncoding, activation='relu')(input_img)
        #        decoded = Dense(numberOfInputs, activation='sigmoid')(encoded)
        #
        #        # Create Models
        #        autoencoder = Model(input_img, decoded)
        #        encoder = Model(input_img, encoded)
        #        encoded_input = Input(shape=(numberEncoding,))
        #        decoder_layer = autoencoder.layers[-1]
        #        decoder = Model(encoded_input, decoder_layer(encoded_input))

        # Train
        autoencoder.compile(optimizer=configuration.optimizer,
                            loss=configuration.loss)
        modelFit = autoencoder.fit(data.train.images,
                                   data.train.images,
                                   epochs=self.epochs,
                                   batch_size=self.batchSize,
                                   shuffle=True,
                                   validation_data=(data.validation.images,
                                                    data.validation.images))

        # Save models
        encoder.save(self._getEncoderModelUrl())
        decoder.save(self._getDecoderModelUrl())
        autoencoder.save(self._getAutoEncoderModelUrl())
        Utils.ResultsManager().save(self._getModelFitUrl(), modelFit)
        Utils.ResultsManager().save(self._getConfigurationUrl(), configuration)
        return modelFit
예제 #4
0
 def getConfiguration(self):
     return Utils.ResultsManager().load(self._getConfigurationUrl())
예제 #5
0
 def getModelFit(self):
     return Utils.ResultsManager().load(self._getModelFitUrl())
예제 #6
0
        run.testAccuracy = testAccuracy
        run.trainingAccuracies = trainingAccuracies
        runs.append(run)
    return runs


'''MNIST'''
print('')
print('Running MNIST')
dataProvider = MNIST.DataProvider()
data = dataProvider.get()
configuration.numberOfPixelsPerImage = data.train.images.shape[1]
configuration.numberOfClassLabels = data.train.labels.shape[1]
plt.imshow(dataProvider.getSingleImageFromTrainingSet()[1], cmap="gist_gray")
runs = createAndRunModel(data, configuration)
Utils.ResultsManager().save('m1.mnist.json', runs)
runs = Utils.ResultsManager().load('m1.mnist.json')
plt.plot(range(len(runs[0].validationAccuracies)), runs[0].validationAccuracies)

'''FMNIST'''
print('')
print('Running FMNIST')
dataProvider = FMNIST.DataProvider()
data = dataProvider.get()
configuration.numberOfPixelsPerImage = data.train.images.shape[1]
configuration.numberOfClassLabels = data.train.labels.shape[1]
plt.imshow(dataProvider.getSingleImageFromTrainingSet()[1], cmap="gist_gray")
runs = createAndRunModel(data, configuration)
Utils.ResultsManager().save('m1.fmnist.json', runs)
runs = Utils.ResultsManager().load('m1.fmnist.json')
plt.plot(range(len(runs[0].validationAccuracies)), runs[0].validationAccuracies)
예제 #7
0
    def train(self,
              data,
              shape,
              applyL1=False,
              applyL2=False,
              applyDropout=False,
              applyBatch=False):
        dropoutRate = 0.1
        regularizerRate = 0.01
        print(applyDropout)
        configuration = types.SimpleNamespace()
        configuration.activation = 'relu'
        configuration.optimizer = 'adam'  #optimizers.SGD(lr=0.1)
        configuration.trainSize = data.train.num_examples
        configuration.validationSize = data.validation.num_examples
        configuration.classes = data.train.labels.shape[1]
        configuration.shape1 = shape[0]
        configuration.shape2 = shape[1]
        configuration.shape3 = shape[2]

        filterSize = (3, 3)
        poolSize = (2, 2)
        model = Sequential()
        kernel_initializer = keras.initializers.glorot_normal(seed=None)

        regularizer = None
        if applyL1:
            regularizer = regularizers.l1(regularizerRate)
        if applyL2:
            regularizer = regularizers.l2(regularizerRate)

        model.add(
            Conv2D(55,
                   filterSize,
                   kernel_initializer=kernel_initializer,
                   kernel_regularizer=regularizer,
                   strides=(1, 1),
                   padding='valid',
                   activation=configuration.activation,
                   input_shape=(configuration.shape1, configuration.shape2,
                                configuration.shape3)))
        model.add(MaxPooling2D(pool_size=poolSize))
        if applyBatch:
            model.add(BatchNormalization())
        if applyDropout:
            model.add(Dropout(dropoutRate))

        model.add(
            Conv2D(55,
                   filterSize,
                   kernel_initializer=kernel_initializer,
                   kernel_regularizer=regularizer,
                   strides=(1, 1),
                   padding='valid',
                   activation=configuration.activation,
                   input_shape=(configuration.shape1, configuration.shape2,
                                configuration.shape3)))
        model.add(MaxPooling2D(pool_size=poolSize))
        if applyBatch:
            model.add(BatchNormalization())
        if applyDropout:
            model.add(Dropout(dropoutRate))

        model.add(
            Conv2D(55,
                   filterSize,
                   kernel_initializer=kernel_initializer,
                   kernel_regularizer=regularizer,
                   strides=(1, 1),
                   padding='valid',
                   activation=configuration.activation,
                   input_shape=(configuration.shape1, configuration.shape2,
                                configuration.shape3)))
        model.add(MaxPooling2D(pool_size=poolSize))
        if applyBatch:
            model.add(BatchNormalization())
        if applyDropout:
            model.add(Dropout(dropoutRate))

        model.add(Flatten())
        model.add(
            Dense(200,
                  activation=configuration.activation,
                  kernel_regularizer=regularizer,
                  kernel_initializer=kernel_initializer))
        if applyBatch:
            model.add(BatchNormalization())
        if applyDropout:
            model.add(Dropout(dropoutRate))
        model.add(
            Dense(100,
                  activation=configuration.activation,
                  kernel_regularizer=regularizer,
                  kernel_initializer=kernel_initializer))
        if applyBatch:
            model.add(BatchNormalization())
        if applyDropout:
            model.add(Dropout(dropoutRate))
        model.add(
            Dense(50,
                  activation=configuration.activation,
                  kernel_regularizer=regularizer,
                  kernel_initializer=kernel_initializer))
        if applyBatch:
            model.add(BatchNormalization())
        if applyDropout:
            model.add(Dropout(dropoutRate))
        model.add(
            Dense(25,
                  activation=configuration.activation,
                  kernel_regularizer=regularizer,
                  kernel_initializer=kernel_initializer))
        if applyBatch:
            model.add(BatchNormalization())
        if applyDropout:
            model.add(Dropout(dropoutRate))
        model.add(
            Dense(configuration.classes,
                  activation='softmax',
                  kernel_regularizer=regularizer,
                  kernel_initializer=kernel_initializer))

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=configuration.optimizer,
                      metrics=['accuracy'])
        model.summary()
        modelFit = model.fit(data.train.images,
                             data.train.labels,
                             batch_size=self.batchSize,
                             epochs=self.epochs,
                             shuffle=True,
                             validation_data=(data.validation.images,
                                              data.validation.labels))

        testAccuracy = model.evaluate(data.test.images, data.test.labels)
        model.save(self._getModelUrl())
        Utils.ResultsManager().save(self._getModelFitUrl(), modelFit)
        Utils.ResultsManager().save(self._getTestAccuracyUrl(), testAccuracy)
        #        Utils.ResultsManager().save(self._getConfigurationUrl(), configuration)
        return
예제 #8
0
 def getTestAccuracy(self):
     return Utils.ResultsManager().load(self._getTestAccuracyUrl())
예제 #9
0
    return data


'''MNIST'''
print('')
print('Running MNIST (relu)')
dataProvider = MNIST.DataProvider()
data = dataProvider.get()
ae = AutoEncoder('AE_MNIST')
print('Transforming...')
data = transformData(data, ae)
configuration.numberOfPixelsPerImage = data.train.images.shape[1]
configuration.numberOfClassLabels = data.train.labels.shape[1]
configuration.activationFunction = tf.nn.relu
runs = createAndRunModel(data, configuration)
Utils.ResultsManager().save('m4.mnist.relu.json', runs)
runs = Utils.ResultsManager().load('m4.mnist.relu.json')
plt.plot(range(len(runs[0].validationAccuracies)),
         runs[0].validationAccuracies)
'''FMNIST'''
print('')
print('Running FMNIST (relu)')
dataProvider = FMNIST.DataProvider()
data = dataProvider.get()
ae = AutoEncoder('AE_FMNIST')
print('Transforming...')
data = transformData(data, ae)
configuration.numberOfPixelsPerImage = data.train.images.shape[1]
configuration.numberOfClassLabels = data.train.labels.shape[1]
configuration.activationFunction = tf.nn.relu
runs = createAndRunModel(data, configuration)