Esempio n. 1
0
def Dave_v3(input_tensor=None, load_weights=False):
    model = models.Sequential()
    model.add(
        convolutional.Convolution2D(16,
                                    3,
                                    3,
                                    input_shape=(32, 128, 3),
                                    activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(32, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(64, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(core.Flatten())
    model.add(core.Dense(500, activation='relu'))
    #model.add(core.Dropout(.5))
    model.add(core.Dense(100, activation='relu'))
    #model.add(core.Dropout(.25))
    model.add(core.Dense(20, activation='relu'))
    model.add(core.Dense(1))
    model.add(
        Lambda(One_to_radius, output_shape=atan_layer_shape,
               name="prediction"))
    if load_weights:
        model.load_weights('./models/dave3/dave3.h5')
    model.compile(optimizer=optimizers.Adam(lr=1e-04),
                  loss='mean_squared_error')
    return model
Esempio n. 2
0
def _cnn():

    train_data, train_target, test_data = load_data()  # load data from utility
    train_data, validation_data, train_target, validation_target = train_test_split(
        train_data, train_target, test_size=0.2, random_state=42
    )  #r andomly split data into traingin and validation sets

    test_data, input_shape = _reshape(test_data)  # see docstring
    train_data, input_shape = _reshape(train_data)  # see docstring
    validation_data, input_shape = _reshape(validation_data)  # see docstring

    model = Sequential()  # sequential model

    model.add(
        convolutional.Conv2D(  # first convolitional layer
            filters=32,
            kernel_size=(3, 3),
            activation='relu',
            input_shape=input_shape))

    model.add(convolutional.Conv2D(
        64, (3, 3),
        activation='relu'))  # 2nd convo layer, using relu for activation
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))  #1st pooling

    model.add(Dropout(0.25))  # prevent overfit w/dropout 1
    model.add(Flatten())  # flatten for dnn
    model.add(Dense(128, activation='relu'))  # 1st dnn layer

    model.add(Dropout(0.5))  # prevent overfit w/dropout 2
    model.add(Dense(3, activation='softmax'))  # using softmax for activation

    model.compile(  # compile using Adadelta for optimizer
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'])

    model.fit(train_data, train_target, batch_size=128, epochs=12,
              verbose=1)  # fit using training data
    loss, accuracy = model.evaluate(
        validation_data, validation_target,
        verbose=0)  # evaluate using validation data
    print "accuracy: {}".format(accuracy)

    class_output = model.predict_classes(test_data)  # predict on test_data
    return class_output
Esempio n. 3
0
def mnist_model(input_shape):
  """Creates a MNIST model."""
  model = sequential_model_lib.Sequential()

  # Adding custom pass-through layer to visualize input images.
  model.add(LayerForImageSummary())

  model.add(
      conv_layer_lib.Conv2D(
          32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
  model.add(conv_layer_lib.Conv2D(64, (3, 3), activation='relu'))
  model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2)))
  model.add(layer_lib.Dropout(0.25))
  model.add(layer_lib.Flatten())
  model.add(layer_lib.Dense(128, activation='relu'))
  model.add(layer_lib.Dropout(0.5))
  model.add(layer_lib.Dense(NUM_CLASSES, activation='softmax'))

  # Adding custom pass-through layer for summary recording.
  model.add(LayerForHistogramSummary())
  return model
Esempio n. 4
0
X_train /= 255
X_test /= 255

# Convert 1-dimensional class arrays to 10-dimensional class matrices
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)

################################################################## BUILD NETWORK


# 7. Define model architecture
model = Sequential()
 
model.add(convolutional.Conv2D(NB_CONV_1, KERNEL_SIZE_CONV_1, activation='relu', input_shape=(28,28,1)))
model.add(convolutional.Conv2D(NB_CONV_2, KERNEL_SIZE_CONV_2, activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=POOL_SIZE))
model.add(Dropout(DROPOUT_1))
 
model.add(Flatten())
model.add(Dense(DENSE_SIZE_1, activation='relu'))
model.add(Dropout(DROPOUT_2))
model.add(Dense(DENSE_SIZE_2, activation='softmax'))

config = {
            'sequential':model.get_config(),
            'compilation': {
                'loss' : 'categorical_crossentropy',
                'optimizer' : 'adam',
                'metrics' : ['acc']
            },
            'training': {
Esempio n. 5
0
def architecture():
    # put the normalization fucntion inside the model ensure preprocess using Lambda layer
    # There were many issue depending on using the lambda layer and in this waa it works
    def resize_normalize(image):
        import cv2
        from keras.backend import tf as ktf
        """
        Applies preprocessing pipeline to an image: crops `top` and `bottom`
        portions of image, resizes to 66*200 px and scales pixel values to [0, 1].
        """
        # resize to width 200 and high 66 liek recommended
        # in the nvidia paper for the used CNN
        # image = cv2.resize(image, (66, 200)) #first try
        resized = ktf.image.resize_images(image, (32, 128))
        #normalize 0-1
        resized = resized / 255.0 - 0.5

        return resized

    print('I am inside call of architecture')
    #initialize model
    model = Sequential()
    #dropout = 0.5
    nonlinear = 'tanh'
    print('I am before call of cropping layer')
    ### Convolution layers and parameters were taken from the "nvidia paper" on end-to-end autonomous steering.
    model.add(
        Cropping2D(cropping=((60, 20), (1, 1)), input_shape=(160, 320, 3)))
    print('I am before call of Lambda')
    model.add(
        Lambda(resize_normalize,
               input_shape=(160, 320, 3),
               output_shape=(32, 128, 3)))

    # Model architecture
    model.add(
        Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(32, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(core.Flatten())
    model.add(core.Dense(500, activation='relu'))
    model.add(core.Dropout(.5))
    model.add(core.Dense(100, activation='relu'))
    model.add(core.Dropout(.25))
    model.add(core.Dense(20, activation='relu'))
    model.add(core.Dense(1))
    model.compile(optimizer=optimizers.Adam(lr=1e-04),
                  loss='mean_squared_error')
    #model.add(Lambda(lambda x: resize_normalize(x), input_shape=(80,318,3), output_shape=(66, 200, 3)))
    # model.add(Convolution2D(24, 5, 5, name='conv1', subsample=(2, 2), activation=nonlinear))
    # model.add(Convolution2D(36, 5, 5, name='conv2', subsample=(2, 2), activation=nonlinear))
    # model.add(Convolution2D(48, 5, 5, name='conv3', subsample=(2, 2), activation=nonlinear))
    # model.add(Convolution2D(64, 3, 3, name='conv4', activation=nonlinear))
    # model.add(Convolution2D(64, 3, 3, name='conv5', activation=nonlinear))

    # ### Regression
    # model.add(Flatten())
    # model.add(Dropout(dropout))
    # model.add(Dense(1164, name='hidden1', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(100, name='hidden2', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(50, name='hidden3', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(10, name='hidden4', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(1, name='output', activation=nonlinear))

    # #model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error')
    # model.compile(optimizer='adam', loss='mse')
    print('I am finished build the model')
    print(model.summary())
    return model
nb_validation_samples = 2079
batch_size = 10
epochs = 4
num_classes = 51

inp = Input(shape=(img_width, img_height, 3))
conv_layer1 = convolutional.Conv2D(8, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   activation='relu')(inp)
conv_layer2 = convolutional.Conv2D(8, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   activation='relu')(conv_layer1)
pool_layer1 = pooling.MaxPooling2D(pool_size=(2, 2),
                                   strides=None,
                                   padding='valid',
                                   data_format=None)(conv_layer2)
conv_layer3 = convolutional.Conv2D(16, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   activation='relu')(pool_layer1)
pool_layer2 = pooling.MaxPooling2D(pool_size=(2, 2),
                                   strides=None,
                                   padding='valid',
                                   data_format=None)(conv_layer3)
conv_layer4 = convolutional.Conv2D(32, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   activation='relu')(pool_layer2)
pool_layer3 = pooling.MaxPooling2D(pool_size=(2, 2),
                                   strides=None,
Esempio n. 7
0
from keras import models, optimizers, backend
from keras.layers import core, convolutional, pooling
from sklearn import model_selection,utils
from dataPreprocessing import generate_samples, preprocess

if __name__ == '__main__':

    # Read splitted  data

    df_train = pd.read_csv('train.csv')
    df_valid = pd.read_csv('test.csv')

    # CNN Model Architecture
    model = models.Sequential()
    model.add(convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(32, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(64, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(core.Flatten())
    model.add(core.Dense(500, activation='relu'))
    model.add(core.Dropout(.5))
    model.add(core.Dense(100, activation='relu'))
    model.add(core.Dropout(.25))
    model.add(core.Dense(20, activation='relu'))
    model.add(core.Dense(1))
    model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error')

    # load the exist model
    model.load_weights("model.h5")
Esempio n. 8
0
def _cnn():

    train_data, train_target, test_data = load_data()
    train_data, validation_data, train_target, validation_target = train_test_split(
        train_data, train_target, test_size=0.2, random_state=42)

    test_data, input_shape = _reshape(test_data)
    train_data, input_shape = _reshape(train_data)
    validation_data, input_shape = _reshape(validation_data)

    model = Sequential()

    model.add(
        convolutional.Conv2D(filters=32,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same',
                             input_shape=input_shape,
                             activation='relu'))

    model.add(pooling.MaxPooling2D(pool_size=(2, 2), padding='same'))

    model.add(convolutional.Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2), padding='same'))

    model.add(convolutional.Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2), padding='same'))

    model.add(convolutional.Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2), padding='same'))

    model.add(convolutional.Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2), padding='same'))

    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(3))
    model.add(Activation('softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(train_data, train_target, epochs=20, batch_size=64, verbose=2)

    loss, accuracy = model.evaluate(validation_data,
                                    validation_target,
                                    verbose=2)

    print "accuracy: {}".format(accuracy)
    output = model.predict_classes(test_data)

    return output
Esempio n. 9
0
train_data_raw = pd.read_csv("./input/train.csv").values
test_data_raw = pd.read_csv("./input/test.csv").values

img_cols = 28
img_rows = 28

train_X = train_data_raw[:, 1:].reshape(train_data_raw.shape[0], 1, img_rows, img_cols)
train_Y = kutils.to_categorical(train_data_raw[:, 0])
num_class = train_Y.shape[1]

num_filters_1 = 64
conv_dim = 3
cnn = kmodels.Sequential()
cnn.add(kconv.ZeroPadding2D((1,1), input_shape=(1, 28, 28),))
cnn.add(kconv.Convolution2D(num_filters_1, conv_dim, conv_dim,  activation="relu"))
cnn.add(kpool.MaxPooling2D(strides=(2, 2)))

num_filters_2 = 128
cnn.add(kconv.ZeroPadding2D((1, 1)))
cnn.add(kconv.Convolution2D(num_filters_2, conv_dim, conv_dim, activation="relu"))
cnn.add(kpool.MaxPooling2D(strides=(2, 2)))

conv_dim_2 = 3
cnn.add(kconv.ZeroPadding2D((1, 1)))
cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu"))
cnn.add(kpool.MaxPooling2D(strides=(2, 2)))

cnn.add(kconv.ZeroPadding2D((1, 1)))
cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu"))
cnn.add(kpool.MaxPooling2D(strides=(2, 2)))
Esempio n. 10
0
    def __init__(self,
                 input_shape=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 filters=None,
                 kernel_size=None,
                 strides=None,
                 pool_size=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        # INPUTS
        self.input_data = Input(shape=self.input_shape)

        # ENCODER
        self.encoded = BatchNormalization()(self.input_data)
        for i in range(self.encoder_layers):
            self.encoded = BatchNormalization()(self.encoded)
            self.encoded = convolutional.Conv2D(filters=self.filters,
                                                kernel_size=self.kernel_size,
                                                strides=self.strides,
                                                activation="elu",
                                                padding="same")(self.encoded)
            self.encoded = Dropout(rate=0.5)(self.encoded)
            self.encoded = pooling.MaxPooling2D(strides=self.pool_size,
                                                padding="same")(self.encoded)

        # DECODER
        self.decoded = BatchNormalization()(self.encoded)
        for i in range(self.decoder_layers):
            self.decoded = BatchNormalization()(self.decoded)
            self.decoded = convolutional.Conv2D(filters=self.filters,
                                                kernel_size=self.kernel_size,
                                                strides=self.strides,
                                                activation="elu",
                                                padding="same")(self.decoded)
            self.decoded = Dropout(rate=0.5)(self.decoded)
            self.decoded = convolutional.UpSampling2D(size=self.pool_size)(
                self.decoded)

        # ACTIVATION
        self.decoded = convolutional.Conv2D(filters=self.input_shape[2],
                                            kernel_size=self.kernel_size,
                                            strides=self.strides,
                                            activation="sigmoid",
                                            padding="same")(self.decoded)
        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss="mean_squared_error")
Esempio n. 11
0
def _cnn():

    train_data, train_target, test_data = load_data()  # load data from utility
    train_data, validation_data, train_target, validation_target = train_test_split(
        train_data, train_target, test_size=0.2,
        random_state=42)  #randomly split for validation

    test_data, input_shape = _reshape(test_data)  # see docstring
    train_data, input_shape = _reshape(train_data)  # see docstring
    validation_data, input_shape = _reshape(validation_data)  # see docstring

    model = Sequential()  # sequential model

    model.add(
        convolutional.Conv2D(  # first convolitional layer
            filters=32,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            input_shape=input_shape,
            activation='relu'))

    model.add(convolutional.Conv2D(64, (3, 3),
                                   padding='same'))  # 2nd convo layer
    model.add(Activation('relu'))  #using relu
    model.add(pooling.MaxPooling2D(pool_size=(2, 2),
                                   padding='same'))  # 1st pooling

    model.add(convolutional.Conv2D(64, (3, 3),
                                   padding='same'))  # 3rd convo layer
    model.add(Activation('relu'))  # using relu
    model.add(pooling.MaxPooling2D(pool_size=(2, 2),
                                   padding='same'))  # 2nd pooling

    model.add(convolutional.Conv2D(64, (3, 3),
                                   padding='same'))  # 4th convo layer
    model.add(Activation('relu'))  # using relu
    model.add(pooling.MaxPooling2D(pool_size=(2, 2),
                                   padding='same'))  # 3nd pooling

    model.add(Dropout(0.25))  # prevent overfit w/dropout 1
    model.add(Flatten())  # flatten for dnn
    model.add(Dense(1024))  # 1st dnn layer
    model.add(Activation('relu'))  # using relu

    model.add(Dropout(0.5))  # prevent overfit w/dropout 2
    model.add(Dense(3))  # output dnn layer
    model.add(Activation('softmax'))  # using softmax

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08,
                decay=0.0)  # adam optimizer

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])  # compile

    model.fit(train_data, train_target, epochs=12, batch_size=128,
              verbose=1)  # fit using training data

    loss, accuracy = model.evaluate(
        validation_data, validation_target,
        verbose=0)  # evaluate using validation data

    print "accuracy: {}".format(accuracy)
    output = model.predict_classes(test_data)  # predict on test_data

    return output
    def learn(self,train_data, train_target, cn_drp_rate=0.2, d_drp_rate=0.5, epochs=150, batch_size=40,
              num_class=2):


        train_target = np_utils.to_categorical(train_target, num_class)
        train_data = train_data.reshape(train_data.shape[0], 1, train_data.shape[1], 1)

        self.model = Sequential()


        #Adding first convolution layer with dropout and maxpooling with activation relu
        self.model.add(
            convolutional.Conv2D(filters=32, kernel_size=(3,3),strides=(1, 1),padding='same',activation='relu'
                                 ,input_shape=(1,193,1)))
        #model.add(Dropout(cn_drp_rate))
        self.model.add(pooling.MaxPooling2D(pool_size=(2, 2),padding='same'))

        #Adding second convolution layer with activation relu
        self.model.add(convolutional.Conv2D(48,(5,5),padding='same'))
        self.model.add(Activation('relu'))


        #Adding third convolution layer with activation relu and dropout
        self.model.add(convolutional.Conv2D(48,(5,5),padding='same'))
        self.model.add(Activation('relu'))
        #model.add(Dropout(cn_drp_rate))


        #Adding fourth convolution layer with activation relu and dropout
        self.model.add(convolutional.Conv2D(64,(5,5),padding='same'))
        self.model.add(Activation('relu'))
        #model.add(Dropout(cn_drp_rate))


        #Adding fifth convolution layer with activation relu and dropout
        self.model.add(convolutional.Conv2D(64,(5,5),padding='same'))
        self.model.add(Activation('relu'))
        #model.add(Dropout(cn_drp_rate))


        #model.add(pooling.MaxPooling2D(pool_size=(2,2),padding='same'))
        #model.add(convolutional.Conv2D(48,(5,5),padding='same'))
        #model.add(Activation('relu'))
        #model.add(Dropout(cn_drp_rate))
        #model.add(pooling.MaxPooling2D(pool_size=(2,2),padding='same'))

        # Adding flatten layer and fully connected layer or deep neural network
        self.model.add(Flatten())
        #Adding first dense layer with acitivation relu and dropout
        self.model.add(Dense(150))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(d_drp_rate))
        #Adding second dense layer with acitivation relu and dropout
        self.model.add(Dense(150))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(d_drp_rate))

        self.model.add(Dense(150))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(d_drp_rate))



        #Adding final output layer with softmax
        self.model.add(Dense(units=num_class))
        self.model.add(Activation('softmax'))

        self.model.compile(loss='categorical_crossentropy', optimizer=Adamax(), metrics=['accuracy'])
        self.model.fit(train_data, train_target, batch_size = batch_size, epochs=epochs)
Esempio n. 13
0
    camcol = int(pathfile.split('/')[-1][-11:-10])
    field = int(pathfile.split('/')[-1][-9:-5])
    objid = int(pathfile.split('/')[-1].replace("new", "")[0:-26])
    d = list(
        set(sp.where(GZ2["run"] == run)[0]).intersection(
            sp.where(GZ2["camcol"] == camcol)[0],
            sp.where(GZ2["field"] == field)[0],
            sp.where(GZ2["obj"] == objid)[0]))[0]
    y_train.append(prob[d])
x_train = sp.array(x_train)  #.transpose()
y_train = sp.asarray(y_train)

model = Sequential()
model.add(
    Convolution2D(16, 10, 10, border_mode='same', input_shape=(207, 207, 1)))
model.add(pooling.MaxPooling2D(pool_size=(2, 2), border_mode='same'))  #98x98

model.add(Convolution2D(32, 9, 9, border_mode='same'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2), border_mode='same'))  # 45x45

model.add(Convolution2D(64, 6, 6, border_mode='same'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2), border_mode='same'))  #20x20

model.add(Convolution2D(128, 5, 5, border_mode='same'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2), border_mode='same'))  #8x8

model.add(Convolution2D(256, 3, 3, border_mode='same'))
#6x6

model.add(Convolution2D(256, 3, 3, border_mode='same'))  #4x4
model.add(MaxPooling2D(pool_size=(2, 2), border_mode='same'))  #2x2
    def __init__(self,
                 input_shape=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 filters=None,
                 kernel_size=None,
                 strides=None,
                 pool_size=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        for i in range(self.encoder_layers):
            if i == 0:
                self.input_data = Input(shape=self.input_shape)
                self.encoded = BatchNormalization()(self.input_data)
                self.encoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif 0 < i < self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif i == self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)

        self.encoded = pooling.MaxPooling2D(strides=self.pool_size,
                                            padding="same")(self.encoded)
        self.decoded = BatchNormalization()(self.encoded)
        self.decoded = convolutional.Conv2D(filters=self.filters,
                                            kernel_size=self.kernel_size,
                                            strides=self.strides,
                                            activation="elu",
                                            padding="same")(self.decoded)
        self.decoded = convolutional.UpSampling2D(size=self.pool_size)(
            self.decoded)

        for i in range(self.decoder_layers):
            if i < self.decoder_layers - 1:
                self.decoded = BatchNormalization()(self.decoded)
                self.decoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.decoded)
                self.decoded = Dropout(rate=0.5)(self.decoded)
            else:
                self.decoded = BatchNormalization()(self.decoded)
                self.decoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.decoded)

        # 4D tensor with shape: (samples, new_rows, new_cols, filters).
        # Remember think of this as a 2D-Lattice across potentially multiple channels per observation.
        # Rows represent time and columns represent some quantities of interest that evolve over time.
        # Channels might represent different sources of information.
        self.decoded = BatchNormalization()(self.decoded)
        self.decoded = convolutional.Conv2D(filters=self.input_shape[2],
                                            kernel_size=self.kernel_size,
                                            strides=self.strides,
                                            activation="sigmoid",
                                            padding="same")(self.decoded)

        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss="mean_squared_error")
Esempio n. 15
0
def main():
    parser = argparse.ArgumentParser(description="Convolutional Neural Net")
    parser.add_argument("mode",
                        help="Specify whether to train or test the model")
    args = parser.parse_args()

    #Define CNN architecture

    #Load model if it has a savefile, otherwise reinitialize
    try:
        print("Loading model")
        model = keras.models.load_model("./CNN.save")
    except:
        print("save file not found, reinstantiating...")
        #Initialize a model with five convolutional layers and one hidden layer
        model = Sequential([
            convolutional.Conv2D(16, 15, input_shape=(512, 512, 1)),
            pooling.MaxPooling2D(pool_size=(4)),
            convolutional.Conv2D(16, 7),
            Activation('tanh'),
            BatchNormalization(),
            convolutional.Conv2D(16, 7),
            Activation('tanh'),
            BatchNormalization(),
            convolutional.Conv2D(16, 7),
            Activation('tanh'),
            BatchNormalization(),
            pooling.MaxPooling2D(pool_size=(4)),
            convolutional.Conv2D(16, 3),
            Activation('tanh'),
            BatchNormalization(),
            pooling.MaxPooling2D(pool_size=(4)),
            Flatten(),
            Dense(16, activation="tanh"),
            BatchNormalization(),
            Dense(1, activation="tanh")
        ])

    #Compile the model and use stochastic gradient descent to minimize error
    model.compile(loss='mean_squared_error',
                  optimizer=optimizers.SGD(lr=.01),
                  metrics=['acc'])

    #Create a callback function that logs the results of training
    log = keras.callbacks.CSVLogger("KerasLog.csv", append=True)
    #END OF ARCHITECTURE

    if args.mode.lower() == "test":
        print("Beginning test")
        testImages, testLabels = readTest()
        print(model.evaluate(testImages.reshape(40, 512, 512, 1), testLabels))

    elif args.mode.lower() == "train":
        print("Reading validation images")

        batchSize = 16
        valImages, valLabels = readValidationImages()

        bestLoss = float('inf')
        for i in range(1000):
            batch, labels = readBatch(batchSize)

            history = model.fit(x=batch.reshape(batchSize, 512, 512, 1),
                                y=labels,
                                epochs=5,
                                validation_data=(valImages.reshape(
                                    16, 512, 512, 1), valLabels),
                                callbacks=[log])

            #Check to see whether the most recent round of training led to an
            #improved accuracy rate
            currLoss = history.history['val_loss'][-1]
            if currLoss < bestLoss:
                print("Saving model with loss of " + str(currLoss))
                model.save("./CNN.save")
                         activation='relu'))

# create a convolutional layer for 2 dimensions
model.add(
    convolutional.Conv2D(filters=32,
                         kernel_size=(2, 2),
                         padding='same',
                         strides=(1, 1),
                         activation='relu'))

model.add(BatchNormalization())
model.add(Dropout(.1))

# create a max pooling layer for 2 dimensions
model.add(pooling.MaxPooling2D(
    pool_size=(2, 2),
    padding='same',
))
# create a convolutional layer for 2 dimensions
model.add(
    convolutional.Conv2D(filters=64,
                         kernel_size=(2, 2),
                         padding='same',
                         strides=(1, 1),
                         activation='relu'))
# create a convolutional layer for 2 dimensions
model.add(
    convolutional.Conv2D(filters=64,
                         kernel_size=(2, 2),
                         padding='same',
                         strides=(1, 1),
                         activation='relu'))