예제 #1
0
def train(project_path, model, output_queue):
    """Trains a neural net model on the project's dataset
    Parameters:
    project_path -- 
    """

    #Set dimension ordering
    K.set_image_dim_ordering('th')
    
    #Load the image size
    image_size = Image.open(os.scandir(os.scandir(path.join(project_path, "images")).__next__().path).__next__().path).size
    
    #Check to see if a model has been generated
    if model is None:
        raise Exception("There is no model stored. Please generate a model before training")
    
    #Determine class mode based on folder structure
    num_classes = len(list(os.scandir(path.join(project_path, "images"))))
    if num_classes == 1:
        raise Exception("The training data only contains one class")
    else:
        class_mode = "categorical"
    
    #Create a training data generator from the images folder
    train_generator = ImageDataGenerator(
            rescale = 1.0/255.0, #Scale the 0-255 pixel values down to 0.0-1.0
            dim_ordering = 'th' #samples, channels, width, height
            ).flow_from_directory(
            path.join(project_path, 'images'), #Read training data from the project's images dir
            target_size=image_size, #Resize must be set or the generator will automatically choose dimensions
            color_mode='grayscale', #TODO: take another look at this
            batch_size=64, #TODO: customize this
            shuffle=True, #Shuffle the data inputs. TODO: set a random seed
            class_mode=class_mode) #TODO: consider binary mode for systems with only 2 labels
    
    #Design a callback to store training progress
    class ProgressBarCallback(keras.callbacks.Callback):
        def on_train_begin(self, logs={}):
            return
        def on_train_end(self, logs={}):
            return
        def on_epoch_begin(self, epoch, logs={}):
            return
        def on_epoch_end(self, epoch, logs={}):
            print("Training epoch {} ended".format(epoch))
            return
        def on_batch_begin(self, batch, logs={}):
            return
    
    #Train the model
    #TODO: k-fold validation
    try:
        return model.fit_generator(
            train_generator,
            samples_per_epoch=len(train_generator.filenames), #TODO: better solution
            nb_epoch=20,
            callbacks=[ProgressBarCallback()]).history #TODO: customize this
    except Exception as err:
        #TODO: Handle error better
        raise Exception("Something went wrong with the training process: {}".format(str(err)))
예제 #2
0
from keras.layers import Dropout
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Flatten
from keras.constraints import max_norm
# from scipy.misc import toimage
import matplotlib.pyplot as plt
import matplotlib.gridspec as gspec
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.layers.convolutional import Conv2D
import keras.backend.common as K
from PIL import Image

K.set_image_dim_ordering('th')

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

gs = gspec.GridSpec(4, 4, wspace=0.0)
ax = [plt.subplot(gs[i]) for i in range(4 * 4)]
for i in range(16):
    ax[i].imshow(Image.fromarray(x_train[i]))
plt.show()

y_train_onehot = np_utils.to_categorical(y_train)
y_test_onehot = np_utils.to_categorical(y_test)

num_classes = 10
model = Sequential()
예제 #3
0
def build_model(project_path):
    """Generates a compiled keras model for use in timechange training
    Parameters: 
    project_path -- path to a timechange project"""
    #Load keras
    from keras.models import Sequential
    from keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D
    from keras.layers import Input, Dense, Flatten, Dropout
    from keras.optimizers import SGD
    #Set dimension ordering
    from keras.backend import common as K
    K.set_image_dim_ordering('th')
    # Extract parameters from project folder
    image_folder = path.join(project_path, "images")
    # Extract number of classes from project by finding image folders
    num_classes = len(list(os.scandir(image_folder)))
    # Extract height and width of image
    image_height, image_width = Image.open(
        os.scandir(
            os.scandir(image_folder).__next__().path).__next__().path).size
    # Extract configuration
    config = ConfigParser()
    config.read(path.join(project_path, 'parameters.conf'))
    #Extract model type from configuration
    model_type = config['DEFAULT'].get(
        'model_type', 'convolutional_basic').strip('\"').strip('\'')
    #Initialize a model object
    model = Sequential()
    #Determine the block type for the model
    if model_type == 'convolutional_basic':
        #Extract and set parameters
        #Load number of blocks, with a default of 3
        num_blocks = int(config['convolutional_basic'].get('num_blocks', 3))
        #Load a filter size list, with a default
        num_filters = config['convolutional_basic'].get('num_filters', '8,8,8')
        #Parse the filter list into useful values
        num_filters = [int(f) for f in num_filters.split(',')]
        #Extend the filter list to make sure it accounts for all blocks
        if len(num_filters) < num_blocks:
            #Pad the filter sizes with the last element
            num_filters.extend(num_filters[-1] *
                               (num_blocks - len(num_filters)))
        #Extract the learning rate
        learning_rate = float(config['convolutional_basic'].get(
            'learning_rate', 1e-2))
        #Dynamically determine final layer's activation based
        #on the number of classes
        if num_classes == 2:
            final_activation = 'sigmoid'
            loss_measure = 'binary_crossentropy'
        else:
            final_activation = 'softmax'
            loss_measure = 'categorical_crossentropy'
        #Add the initial blocks
        model.add(
            Convolution2D(num_filters[0],
                          3,
                          3,
                          activation='relu',
                          input_shape=(3, image_height, image_width),
                          dim_ordering='th'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
        #Add convolutional blocks. n-1 because the first was added already
        for layer in range(1, num_blocks):
            #TODO: Allow configuring the parameters on these
            model.add(
                Convolution2D(num_filters[layer],
                              3,
                              3,
                              activation='relu',
                              dim_ordering='th'))
            model.add(ZeroPadding2D((1, 1)))
            model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
        #Create the final layers
        model.add(Flatten())
        #TODO: Allow configuring the parameters and existence of these
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.3))
        model.add(Dense(num_classes, activation=final_activation))
    else:
        raise Exception("Invalid neural net type")
    #Compile the model
    optimizer = SGD(lr=learning_rate)
    #Compile the model
    model.compile(loss=loss_measure, optimizer=optimizer, metrics=['accuracy'])
    #Output the model
    return model
예제 #4
0
def rnn_model(project_path, config):
    # Set dimension ordering
    K.set_image_dim_ordering('th')

    # Extract parameters from project folder
    image_folder = path.join(project_path, "images")

    # Extract number of classes from project by finding image folders
    num_classes = len(list(os.scandir(image_folder)))

    # Extract height and width of image
    image_height, image_width = Image.open(
        os.scandir(
            os.scandir(image_folder).__next__().path).__next__().path).size

    #Initialize a model object
    model = Sequential()

    # Extract and set parameters
    # Load number of blocks, with a default of 3
    num_blocks = config['num_blocks', 3]

    # Load a filter size list, with a default
    num_filters = config['num_filters']

    # Parse the filter list into useful values
    num_filters = [int(f) for f in num_filters.split(',')]

    # Extend the filter list to make sure it accounts for all blocks
    if len(num_filters) < num_blocks:
        # Pad the filter sizes with the last element
        num_filters.extend(num_filters[-1] * (num_blocks - len(num_filters)))

    # Extract the learning rate
    learning_rate = config['learning_rate']

    # Dynamically determine final layer's activation based
    # on the number of classes
    if num_classes == 2:
        final_activation = 'sigmoid'
        loss_measure = 'binary_crossentropy'
    else:
        final_activation = 'softmax'
        loss_measure = 'categorical_crossentropy'

    model.add(
        Reshape((image_height, image_width),
                input_shape=(1, image_height, image_width)))

    # Add the initial blocks
    # TODO: need to specify the output dimension
    model.add(LSTM(num_filters[0], activation='relu', return_sequences=True))

    # Add convolutional blocks. n-1 because the first was added already
    for layer in range(1, num_blocks - 1):
        # TODO: Allow configuring the parameters on these
        model.add(LSTM(num_filters[layer], return_sequences=True))

    model.add(LSTM(num_filters[-1]))

    # TODO: Allow configuring the parameters and existence of these
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(num_classes, activation=final_activation))

    # Compile the model
    optimizer = SGD(lr=learning_rate)

    # Compile the model
    model.compile(loss=loss_measure, optimizer=optimizer, metrics=['accuracy'])

    return model
예제 #5
0
import os
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization
from keras.optimizers import adam
from keras.callbacks import Callback, ModelCheckpoint
from keras.constraints import maxnorm
from keras.utils import np_utils  # Transfrom labels to categorical.
from keras.datasets import cifar10  # To load the dataset.
import numpy as np
import matplotlib.pyplot as plt
import keras.backend.common as K
K.set_image_dim_ordering('tf')  # Tell TensorFlow the right order of dims.
import matplotlib as mpl  # Just to set some standard plot format.
mpl.style.use('classic')
import numpy as np
import argparse
from keras.preprocessing import image

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

class_names = [
    'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
    'ship', 'truck'
]

# Normalize the x_train and x_test to be a float between 0 and 1.
x_train = x_train / 255.0
x_test = x_test / 255.0

# One-hot encoding based on number of classes.
class_count = 10