def model_definition(self): self.model = models.Sequential() self.model.add( layers.Conv2D(64, (5, 5), activation='relu', input_shape=self.input_shape)) self.model.add(layers.Conv2D(64, (5, 5), activation='relu')) self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) self.model.add(layers.Dropout(0.25)) self.model.add(layers.Conv2D(64, (5, 5), activation='relu')) self.model.add(layers.Conv2D(64, (5, 5), activation='relu')) self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) self.model.add(layers.Dropout(0.25)) self.model.add(layers.Conv2D(128, (3, 3), activation='relu')) self.model.add(layers.AveragePooling2D()) self.model.add(layers.Conv2D(128, (1, 1), activation='relu')) self.model.add(layers.MaxPooling2D(pool_size=(2, 2))) self.model.add(layers.Dropout(0.25)) self.model.add(layers.Flatten()) self.model.add(layers.Dense(3072, activation='relu')) self.model.add(layers.Dropout(0.5)) self.model.add(layers.Dense(128, activation='relu')) self.model.add(layers.Dropout(0.5)) self.model.add(layers.Dense(3, activation='softmax')) adam = optimizers.Adamax() self.model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['acc'])
def olliNetwork(self): self.model = models.Sequential() self.model.add( layers.Conv2D(64, (5, 5), activation='relu', input_shape=(48, 48, 1))) self.model.add(layers.Conv2D(64, (5, 5), activation='relu')) self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) self.model.add(layers.Dropout(0.25)) self.model.add(layers.Conv2D(64, (5, 5), activation='relu')) self.model.add(layers.Conv2D(64, (5, 5), activation='relu')) self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) self.model.add(layers.Dropout(0.25)) self.model.add(layers.Conv2D(128, (4, 4), activation='relu')) self.model.add(layers.MaxPooling2D(pool_size=(2, 2))) self.model.add(layers.Dropout(0.25)) self.model.add(layers.Flatten()) self.model.add(layers.Dense(3072, activation='relu')) self.model.add(layers.Dropout(0.5)) self.model.add(layers.Dense(128, activation='relu')) self.model.add(layers.Dropout(0.5)) self.model.add(layers.Dense(3, activation='softmax'))
def double_conv_layer(x, filter_size, size, dropout, batch_norm=False): ''' construction of a double convolutional layer using SAME padding RELU nonlinear activation function :param x: input :param filter_size: size of convolutional filter :param size: number of filters :param dropout: FLAG & RATE of dropout. if < 0 dropout cancelled, if > 0 set as the rate :param batch_norm: flag of if batch_norm used, if True batch normalization :return: output of a double convolutional layer ''' axis = 3 conv = layers.Conv2D(size, (filter_size, filter_size), padding='same')(x) if batch_norm is True: conv = layers.BatchNormalization(axis=axis)(conv) conv = layers.Activation('relu')(conv) conv = layers.Conv2D(size, (filter_size, filter_size), padding='same')(conv) if batch_norm is True: conv = layers.BatchNormalization(axis=axis)(conv) conv = layers.Activation('relu')(conv) if dropout > 0: conv = layers.Dropout(dropout)(conv) shortcut = layers.Conv2D(size, kernel_size=(1, 1), padding='same')(x) if batch_norm is True: shortcut = layers.BatchNormalization(axis=axis)(shortcut) res_path = layers.add([shortcut, conv]) return res_path
def _bi_gru(units, x): x = layers.Dropout(0.2)(x) y1 = layers.GRU(units, return_sequences=True, kernel_initializer='he_normal', recurrent_initializer='orthogonal')(x) y2 = layers.GRU(units, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', recurrent_initializer='orthogonal')(x) y = layers.add([y1, y2]) return y
def build_model(self): states = layers.Input(shape=(self.state_size,), name='inputStates') # Hidden Layers model = layers.Dense(units=128, activation='linear')(states) model = layers.BatchNormalization()(model) model = layers.LeakyReLU(0.01)(model) model = layers.Dropout(0.3)(model) model = layers.Dense(units=256, activation='linear')(model) model = layers.BatchNormalization()(model) model = layers.LeakyReLU(0.01)(model) model = layers.Dropout(0.3)(model) model = layers.Dense(units=512, activation='linear')(model) model = layers.BatchNormalization()(model) model = layers.LeakyReLU(0.01)(model) model = layers.Dropout(0.3)(model) model = layers.Dense(units=128, activation='linear')(model) model = layers.BatchNormalization()(model) model = layers.LeakyReLU(0.01)(model) model = layers.Dropout(0.3)(model) output = layers.Dense( units=self.action_size, activation='tanh', kernel_regularizer=regularizers.l2(0.01), name='outputActions')(model) #Keras self.model = models.Model(inputs=states, outputs=output) #Definint Optimizer actionGradients = layers.Input(shape=(self.action_size,)) loss = K.mean(-actionGradients * output) optimizer = optimizers.Adam() update_operation = optimizer.get_updates(params=self.model.trainable_weights, loss=loss) self.train_fn = K.function( inputs=[self.model.input, actionGradients, K.learning_phase()], outputs=[], updates=update_operation)
def build_model(self): #Define input layers inputStates = layers.Input(shape=(self.state_size, ), name='inputStates') inputActions = layers.Input(shape=(self.action_size, ), name='inputActions') # Hidden layers for states modelS = layers.Dense(units=128, activation='linear')(inputStates) modelS = layers.BatchNormalization()(modelS) modelS = layers.LeakyReLU(0.01)(modelS) modelS = layers.Dropout(0.3)(modelS) modelS = layers.Dense(units=256, activation='linear')(modelS) modelS = layers.BatchNormalization()(modelS) modelS = layers.LeakyReLU(0.01)(modelS) modelS = layers.Dropout(0.3)(modelS) modelA = layers.Dense(units=256, activation='linear')(inputActions) modelA = layers.LeakyReLU(0.01)(modelA) modelA = layers.BatchNormalization()(modelA) modelA = layers.Dropout(0.5)(modelA) #Merging the models model = layers.add([modelS, modelA]) model = layers.Dense(units=256, activation='linear')(model) model = layers.BatchNormalization()(model) model = layers.LeakyReLU(0.01)(model) #Q Layer Qvalues = layers.Dense(units=1, activation=None, name='outputQvalues')(model) #Keras model self.model = models.Model(inputs=[inputStates, inputActions], outputs=Qvalues) optimizer = optimizers.Adam() self.model.compile(optimizer=optimizer, loss='mse') actionGradients = K.gradients(Qvalues, inputActions) self.get_action_gradients = K.function( inputs=[*self.model.input, K.learning_phase()], outputs=actionGradients)
def create_model(dropout_rate): model = models.Sequential() conv_base = applications.VGG16(include_top=False, input_shape=(150, 150, 3), weights='imagenet') conv_base.trainable = False model.add(conv_base) model.add(layers.Flatten()) model.add(layers.Dropout(dropout_rate)) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) return model
def create_model(dropout_rate): model = models.Sequential() model.add( layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dropout(dropout_rate)) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) return model
def generate_model(): conv_base = tf.contrib.keras.applications.VGG16(include_top=False, weights='imagenet', input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)) conv_base.trainable = True model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add( layers.Dense(HIDDEN_SIZE, name='dense', kernel_regularizer=regularizers.l2(L2_LAMBDA))) model.add(layers.Dropout(rate=0.3, name='dropout')) model.add( layers.Dense(NUM_CLASSES, activation='softmax', name='dense_output')) model = multi_gpu_model(model, gpus=NUM_GPUS) print(model.summary()) return model
def _create_discriminator(self): inputs = layers.Input(shape=(HEIGHT, WIDTH, CHANNELS)) x = layers.Conv2D(128, kernel_size=3)(inputs) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, kernel_size=4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, kernel_size=4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, kernel_size=4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Flatten()(x) x = layers.Dropout(self.args.dropout)(x) outputs = layers.Dense(1, activation='sigmoid')(x) discriminator = models.Model(inputs, outputs) return discriminator
def _cnn_ctc_init(self): self.input_data = layers.Input(name='the_input', shape=(self.AUDIO_LENGTH, self.AUDIO_FEATURE_LENGTH, 1)) layers_h1 = layers.Conv2D(filters=32, kernel_size=(3, 3), use_bias=False, activation='relu', padding='same', kernel_initializer='he_normal')( self.input_data) layers_h1 = layers.Dropout(rate=0.05)(layers_h1) layers_h2 = layers.Conv2D(filters=32, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h1) layers_h3 = layers.MaxPooling2D(pool_size=2, strides=None, padding='valid')(layers_h2) layers_h3 = layers.Dropout(rate=0.05)(layers_h3) layers_h4 = layers.Conv2D(filters=64, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h3) layers_h4 = layers.Dropout(rate=0.1)(layers_h4) layers_h5 = layers.Conv2D(filters=64, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h4) layers_h6 = layers.MaxPooling2D(pool_size=2, strides=None, padding='valid')(layers_h5) layers_h6 = layers.Dropout(rate=0.1)(layers_h6) layers_h7 = layers.Conv2D(filters=128, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h6) layers_h7 = layers.Dropout(rate=0.15)(layers_h7) layers_h8 = layers.Conv2D(filters=128, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h7) layers_h9 = layers.MaxPooling2D(pool_size=2, strides=None, padding='valid')(layers_h8) layers_h9 = layers.Dropout(rate=0.15)(layers_h9) layers_h10 = layers.Conv2D(filters=128, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h9) layers_h10 = layers.Dropout(rate=0.2)(layers_h10) layers_h11 = layers.Conv2D(filters=128, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h10) layers_h12 = layers.MaxPooling2D(pool_size=1, strides=None, padding='valid')(layers_h11) layers_h12 = layers.Dropout(rate=0.2)(layers_h12) layers_h13 = layers.Conv2D(filters=128, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h12) layers_h13 = layers.Dropout(rate=0.2)(layers_h13) layers_h14 = layers.Conv2D(filters=128, kernel_size=(3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layers_h13) layers_h15 = layers.MaxPooling2D(pool_size=1, strides=None, padding='valid')(layers_h14) layers_h16 = layers.Reshape( (self.AUDIO_FEATURE_LENGTH, self.AUDIO_LENGTH * 2))(layers_h15) layers_h16 = layers.Dropout(rate=0.3)(layers_h16) layers_h17 = layers.Dense(units=128, use_bias=True, activation='relu', kernel_initializer='he_normal')(layers_h16) layers_h17 = layers.Dropout(rate=0.3)(layers_h17) layers_h18 = layers.Dense(units=self.OUTPUT_SIZE, use_bias=True, kernel_initializer='he_normal')(layers_h17) y_pred = layers.Activation('softmax', name='activation_0')(layers_h18) self.cnn_model = models.Model(inputs=self.input_data, outputs=y_pred) self.labels = layers.Input(name='the_label', shape=[self.LABEL_SEQUENCE_LENGTH], dtype='float32') self.input_length = layers.Input(name='input_length', shape=[1], dtype='int64') self.label_length = layers.Input(name='label_length', shape=[1], dtype='int64') self.loss = layers.Lambda(function=self._ctc_lambda_func, output_shape=(1, ), name='ctc')([ y_pred, self.labels, self.input_length, self.label_length ]) self.ctc_model = models.Model(inputs=[ self.input_data, self.labels, self.input_length, self.label_length ], outputs=self.loss) optimizer = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.0, epsilon=10e-8) self.ctc_model.compile(optimizer=optimizer, loss={ 'ctc': lambda y_true, y_pred: y_pred }) print('[*Info] Create Model Successful, Compiles Model Successful. ') return self.cnn_model, self.ctc_model
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow.contrib.keras import backend as K from tensorflow.contrib.keras import layers K.set_session(tf.get_default_session()) img = tf.placeholder(tf.float32, shape=(None, 784)) x = layers.Dense(128, activation='relu')(img) print x.trainable_weights x = layers.Dropout(0.5)(x) x = layers.Dense(128, activation='relu')(x) x = layers.Dropout(0.5)(x) preds = layers.Dense(10, activation='softmax')(x) labels = tf.placeholder(tf.int32) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=preds, labels=labels)) mnist_data = input_data.read_data_sets("MNIST_data", one_hot=False) optimize = tf.train.RMSPropOptimizer(0.001).minimize(loss) acc = tf.reduce_mean(tf.cast(tf.not_equal(tf.cast(tf.argmax(preds, 1), tf.int32), labels), tf.float32)) init_op = tf.global_variables_initializer() with tf.Session().as_default() as sess:
def _dense(units, x, activation="relu"): x = layers.Dropout(0.2)(x) y = layers.Dense(units, activation=activation, use_bias=True, kernel_initializer='he_normal')(x) return y
def create_model(self, img_shape, num_class): concat_axis = 3 inputs = layers.Input(shape=img_shape) scale = layers.Lambda(lambda x: x / 255)(inputs) conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(scale) conv1 = layers.Dropout(0.1)(conv1) conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1) pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1) conv2 = layers.Dropout(0.1)(conv2) conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2) pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) conv3 = layers.Dropout(0.1)(conv3) conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3) pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3) conv4 = layers.Dropout(0.1)(conv4) conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4) pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4) conv5 = layers.Dropout(0.1)(conv5) conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5) up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5) ch, cw = self.get_crop_shape(conv4, up_conv5) crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4) up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis) conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6) conv6 = layers.Dropout(0.1)(conv6) conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6) up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6) ch, cw = self.get_crop_shape(conv3, up_conv6) crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3) up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis) conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7) conv7 = layers.Dropout(0.1)(conv7) conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7) up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7) ch, cw = self.get_crop_shape(conv2, up_conv7) crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2) up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis) conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8) conv8 = layers.Dropout(0.1)(conv8) conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8) up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8) ch, cw = self.get_crop_shape(conv1, up_conv8) crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1) up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis) conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9) conv9 = layers.Dropout(0.1)(conv9) conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9) ch, cw = self.get_crop_shape(inputs, conv9) conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9) conv10 = layers.Conv2D(num_class, (1, 1), activation='sigmoid')(conv9) model = models.Model(inputs=inputs, outputs=conv10) return model
def optimizedNework(self): self.model.add( layers.Conv2D(filters=16, kernel_size=(7, 7), padding='same', name='image_array', input_shape=(SIZE_FACE, SIZE_FACE, 1))) self.model.add(layers.BatchNormalization()) self.model.add( layers.Convolution2D(filters=16, kernel_size=(7, 7), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add(layers.Activation('relu')) self.model.add( layers.AveragePooling2D(pool_size=(2, 2), padding='same')) self.model.add(layers.Dropout(.5)) self.model.add( layers.Conv2D(filters=32, kernel_size=(5, 5), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add( layers.Convolution2D(filters=32, kernel_size=(5, 5), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add(layers.Activation('relu')) self.model.add( layers.AveragePooling2D(pool_size=(2, 2), padding='same')) self.model.add(layers.Dropout(.5)) self.model.add( layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add( layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add(layers.Activation('relu')) self.model.add( layers.AveragePooling2D(pool_size=(2, 2), padding='same')) self.model.add(layers.Dropout(.5)) self.model.add( layers.Conv2D(filters=128, kernel_size=(3, 3), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add( layers.Convolution2D(filters=128, kernel_size=(3, 3), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add(layers.Activation('relu')) self.model.add( layers.AveragePooling2D(pool_size=(2, 2), padding='same')) self.model.add(layers.Dropout(.5)) self.model.add( layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same')) self.model.add(layers.BatchNormalization()) self.model.add( layers.Conv2D(filters=3, kernel_size=(3, 3), padding='same')) self.model.add(layers.GlobalAveragePooling2D()) self.model.add(layers.Activation('softmax', name='predictions'))
input_shape=(IMG_WIDTH, IMG_HEIGHT, 3) # 3 per i canali RGB ) conv_base.summary() # stampa info sulla base conv # RETE # Layer finali per la vgg model = models.Sequential() model.add(conv_base) model.add(layers.Flatten()) model.add( layers.Dense(512, name='dense_1', kernel_regularizer=regularizers.l2(L2_LAMBDA))) model.add(layers.Dropout(rate=0.3, name='dropout_1')) model.add(layers.Activation(activation='relu', name='activation_1')) model.add(layers.Dense(NUM_CLASSES, activation='softmax', name='dense_output')) model.summary() conv_base.trainable = False model.summary() def load_batch(file_list): img_array = [] idx_array = [] label_array = [] for file_ in file_list:
data = DataSet( '/home/manoolia/code/python/kaggle/titanic-challange/input/train.csv', '/home/manoolia/code/python/kaggle/titanic-challange/input/test.csv' ) data.load_data() model = models.Sequential() model.add(layers.Dense( units=240, activation=activations.sigmoid, input_shape=[5,] )) model.add(layers.Dropout(rate=0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense( units=160, activation=activations.relu, )) model.add(layers.Dropout(rate=0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense( units=80, activation=activations.sigmoid, )) model.add(layers.Dropout(rate=0.2)) model.add(layers.BatchNormalization())
model = models.Sequential() model.add( layers.Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', input_shape=(58, 106, 1))) model.add( layers.Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu')) model.add(layers.MaxPool2D(pool_size=(2, 2))) model.add(layers.Dropout(0.25)) model.add( layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu')) model.add( layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu')) model.add(layers.MaxPool2D(pool_size=(2, 2))) model.add(layers.Dropout(0.25)) model.add(