def buildClassifier(input_shape=(100, 100, 3)): # Initialising the CNN classifier = Sequential() classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same')) classifier.add(MaxPooling2D(pool_size=(4, 4), padding='same')) classifier.add(Dropout(0.5)) # added extra Dropout layer classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same')) classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same')) classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu')) classifier.add(Dropout(0.5)) # added extra dropout layer classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same')) classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same')) classifier.add(Dropout(0.2)) # antes era 0.25 classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu')) classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same')) classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same')) classifier.add(Dense(units=1024, activation='relu')) # added new dense layer classifier.add(Dropout(0.2)) # antes era 0.25 # Step 3 - Flattening classifier.add(Flatten()) classifier.add(Dense(units=1024, activation='relu')) # added new dense layer classifier.add(Dense(units=256, activation='relu')) # added new dense layer # Step 4 - Full connection classifier.add(Dropout(0.2)) classifier.add(Dense(units=1, activation='sigmoid')) classifier.summary() # Compiling the CNN classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True) return classifier
def create_posla_net(self, raw=120, column=320, channel=1): # model setting inputShape = (raw, column, channel) activation = 'relu' keep_prob_conv = 0.25 keep_prob_dense = 0.5 # init = 'glorot_normal' # init = 'he_normal' init = 'he_uniform' chanDim = -1 classes = 3 model = Sequential() # CONV => RELU => POOL model.add( Conv2D(3, (3, 3), padding="valid", input_shape=inputShape, kernel_initializer=init, activation=activation)) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add( Conv2D(9, (3, 3), padding="valid", kernel_initializer=init, activation=activation)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add( Conv2D(18, (3, 3), padding="valid", kernel_initializer=init, activation=activation)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add( Conv2D(32, (3, 3), padding="valid", kernel_initializer=init, activation=activation)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(80, kernel_initializer=init, activation=activation)) model.add(Dropout(keep_prob_dense)) model.add(Dense(15, kernel_initializer=init, activation=activation)) model.add(Dropout(keep_prob_dense)) # softmax classifier model.add(Dense(classes, activation='softmax')) self.model = model
def create_keras_model(inputShape, nClasses, output_activation='linear'): """ SegNet model ---------- inputShape : tuple Tuple with the dimensions of the input data (ny, nx, nBands). nClasses : int Number of classes. """ filter_size = 64 kernel = (3, 3) pad = (1, 1) pool_size = (2, 2) inputs = Input(shape=inputShape, name='image') # Encoder x = Conv2D(64, kernel, padding='same')(inputs) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(128, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(256, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size)(x) x = Conv2D(512, kernel, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # Decoder x = Conv2D(512, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(256, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(128, kernel, padding='same')(x) x = BatchNormalization()(x) x = UpSampling2D(size=pool_size)(x) x = Conv2D(64, kernel, padding='same')(x) x = BatchNormalization()(x) x = Conv2D(nClasses, (1, 1), padding='valid')(x) outputs = Activation(output_activation, name='output')(x) model = Model(inputs=inputs, outputs=outputs, name='segnet') return model
def myModel(): no_Of_Filters = 60 size_of_Filter = (5, 5) size_of_Filter_2 = (3, 3) size_of_pool = (2, 2) no_Of_Nodes = 500 model = Sequential() model.add((Conv2D(no_Of_Filters, size_of_Filter, input_shape=(imageDimesions[0], imageDimesions[1], 1), activation='relu'))) model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu'))) model.add(MaxPooling2D(pool_size=size_of_pool)) model.add((Conv2D(no_Of_Filters // 2, size_of_Filter_2, activation='relu'))) model.add((Conv2D(no_Of_Filters // 2, size_of_Filter_2, activation='relu'))) model.add(MaxPooling2D(pool_size=size_of_pool)) model.add(Dropout) model.add(Flatten()) model.add(Dense(no_Of_Nodes, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(noOfClasses, activation='softmax')) model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) return model
def buildClassifier(input_shape=(100, 100, 3)): """ This creates the CNN algorithm. Args: input_shape(tuple): This is the image shape of (100,100,3) Returns: classifier(sequential): This is the sequential model. """ # Initialising the CNN opt = Adam(lr=0.0002) # lr = learning rate classifier = Sequential() classifier.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same')) classifier.add(MaxPooling2D(pool_size=(3, 3), padding='same')) classifier.add(Dropout(0.5)) # added extra Dropout layer classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same')) classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same')) classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu')) classifier.add(Dropout(0.5)) # added extra dropout layer classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same')) classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same')) classifier.add(Dropout(0.2)) # antes era 0.25 classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu')) classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same')) classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same')) classifier.add( Flatten()) # This is added before dense layer a flatten is needed classifier.add(Dense(units=1024, activation='relu')) # added new dense layer classifier.add(Dropout(0.2)) # antes era 0.25 # Step 3 - Flattening #classifier.add(Flatten()) classifier.add(Dense(units=1024, activation='relu')) # added new dense layer classifier.add(Dense(units=256, activation='relu')) # added new dense layer # Step 4 - Full connection classifier.add(Dropout(0.2)) classifier.add(Dense(units=1, activation='sigmoid')) classifier.summary() # Compiling the CNN classifier.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True) return classifier
def create_model(epochs=25): model = Sequential() model.add( Conv2D(32, (3, 3), input_shape=(3, 32, 32), padding='same', activation='relu', kernel_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add( Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add( Conv2D(64, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add( Conv2D(64, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add( Conv2D(128, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add( Conv2D(128, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.2)) model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) lrate = 0.01 decay = lrate / epochs sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) return model
def LeNet_model(): model = Sequential() model.add(Conv2D(30, (5, 5), input_shape=(32, 32, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(15, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) # Compile model model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics=['accuracy']) return model
def initialize_model(): model = Sequential() model.add( Conv2D(40, 11, strides=1, padding='same', input_shape=(1, 1024, 4))) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2D(40, 11, strides=1, padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(1, 64))) model.add(Flatten()) model.add(Dense(units=500)) model.add(Dense(units=640)) model.add(Reshape((1, 16, 40))) model.add(Conv2DTranspose(40, 11, strides=(1, 64), padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2DTranspose(40, 11, strides=(1, 1), padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2D(4, 11, strides=1, padding='same', activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss='mse') return model
def __init__(self, filter_sizes): super(DownsampleBlock, self).__init__() self.conv1 = Conv2D( filter=filter_sizes[0], kernel_size=(3, 3), strides=1, padding="same", ) self.bn1 = BatchNormalization() self.act1 = Activation("relu") self.conv2 = Conv2D( filter=filter_sizes[1], kernel_size=(3, 3), strides=1, padding="same", ) self.bn2 = BatchNormalization() self.act2 = Activation("relu") self.mp = MaxPooling2D(pool_size=(2, 2))
def initialize_model(): one_filter_keras_model = Sequential() one_filter_keras_model.add( Conv2D(filters=40, kernel_size=(1, 11), padding="same", input_shape=(1, 1500, 5), kernel_constraint=NonNeg())) one_filter_keras_model.add(BatchNormalization(axis=-1)) one_filter_keras_model.add(Activation('relu')) one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30))) one_filter_keras_model.add(Flatten()) one_filter_keras_model.add(Dense(40)) one_filter_keras_model.add(BatchNormalization(axis=-1)) one_filter_keras_model.add(Activation('relu')) one_filter_keras_model.add(Dropout(0.5)) one_filter_keras_model.add(Dense(1)) one_filter_keras_model.add(Activation("sigmoid")) one_filter_keras_model.summary() one_filter_keras_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[precision, recall, specificity]) return one_filter_keras_model
def MaxPooling(ndim=2, *args, **kwargs): if ndim==2: return MaxPooling2D(*args, **kwargs) elif ndim==3: return MaxPooling3D(*args, **kwargs) else: raise ValueError("ndim must be 2 or 3")
def modelEncode(cae, filterSize, poolSize, sampSize, gpus): if gpus > 1: cae = cae.layers[-2] # initialize encoder encode = Sequential() encode.add( Convolution2D(8, (filterSize, filterSize), input_shape=(3, sampSize, sampSize), padding='same', weights=cae.layers[0].get_weights())) encode.add(MaxPooling2D(pool_size=(poolSize, poolSize))) encode.add(Activation('relu')) encode.add( Convolution2D(16, (filterSize, filterSize), padding='same', weights=cae.layers[3].get_weights())) encode.add(MaxPooling2D(pool_size=(poolSize, poolSize))) encode.add(Activation('relu')) encode.add( Convolution2D(32, (filterSize, filterSize), padding='same', weights=cae.layers[6].get_weights())) encode.add(MaxPooling2D(pool_size=(poolSize, poolSize))) encode.add(Activation('relu')) encode.add( Convolution2D(64, (filterSize, filterSize), padding='same', weights=cae.layers[9].get_weights())) encode.add(MaxPooling2D(pool_size=(poolSize, poolSize))) encode.add(Activation('relu')) encode.add( Convolution2D(128, (filterSize, filterSize), padding='same', weights=cae.layers[12].get_weights())) encode.add(MaxPooling2D(pool_size=(poolSize, poolSize))) encode.add(Activation('relu')) encode.add(Flatten()) encode.add(Dense(1024, weights=cae.layers[16].get_weights())) encode.add(Activation('relu')) if gpus > 1: encode = multi_gpu_model(encode, gpus=gpus) encode.compile(loss='mse', optimizer='adam') return encode
def build(width, height, depth, classes): # initialize the model along with the input shape to be # "channels last" and the channels dimension itself model = Sequential() inputShape = (height, width, depth) chanDim = -1 # if we are using "channels first", update the input shape # and channels dimension if K.image_data_format() == "channels_first": inputShape = (depth, height, width) chanDim = 1 # first CONV => RELU => CONV => RELU => POOL layer set model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape)) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(32, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # second CONV => RELU => CONV => RELU => POOL layer set model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # first (and only) set of FC => RELU layers model.add(Flatten()) model.add(Dense(512)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) # softmax classifier model.add(Dense(classes)) model.add(Activation("softmax")) # return the constructed network architecture return model
def build(width, height, depth, classes): # depth refers to RGB image # initialize the model model = Sequential() inputShape = (height, width, depth) # if we are using "channels first", update the input shape if K.image_data_format() == "channels_first": inputShape = (depth, height, width) # first set of CONV => RELU => POOL layers model.add(Conv2D(20, (3, 3), padding="same", input_shape=inputShape)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.23)) # second set of CONV => RELU => POOL layers model.add(Conv2D(50, (3, 3), padding="same")) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.23)) #3 model.add(Conv2D(80, (3, 3), padding="same")) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.23)) #4 model.add(Conv2D(128, (3, 3), padding="same")) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.23)) # first (and only) set of FC => RELU layers model.add(Flatten()) model.add(Dense(1024)) model.add(Activation("relu")) # softmax classifier model.add(Dense(classes)) model.add(Activation("softmax")) # return the constructed network architecture return model
def build(size, seq_len , learning_rate , optimizer_class ,\ initial_weights ,\ cnn_class ,\ pre_weights , \ lstm_conf , \ cnn_train_type, classes = 1, dropout = 0.0): input_layer = Input(shape=(seq_len, size, size, 3)) if(cnn_train_type!='train'): if cnn_class.__name__ == "ResNet50": cnn = cnn_class(weights=pre_weights, include_top=False,input_shape =(size, size, 3)) else: cnn = cnn_class(weights=pre_weights,include_top=False) else: cnn = cnn_class(include_top=False) #control Train_able of CNNN if(cnn_train_type=='static'): for layer in cnn.layers: layer.trainable = False if(cnn_train_type=='retrain'): for layer in cnn.layers: layer.trainable = True cnn = TimeDistributed(cnn)(input_layer) #the resnet output shape is 1,1,20148 and need to be reshape for the ConvLSTM filters # if cnn_class.__name__ == "ResNet50": # cnn = Reshape((seq_len,4, 4, 128), input_shape=(seq_len,1, 1, 2048))(cnn) lstm = lstm_conf[0](**lstm_conf[1])(cnn) lstm = MaxPooling2D(pool_size=(2, 2))(lstm) flat = Flatten()(lstm) flat = BatchNormalization()(flat) flat = Dropout(dropout)(flat) linear = Dense(1000)(flat) relu = Activation('relu')(linear) linear = Dense(256)(relu) linear = Dropout(dropout)(linear) relu = Activation('relu')(linear) linear = Dense(10)(relu) linear = Dropout(dropout)(linear) relu = Activation('relu')(linear) activation = 'sigmoid' loss_func = 'binary_crossentropy' if classes > 1: activation = 'softmax' loss_func = 'categorical_crossentropy' predictions = Dense(classes, activation=activation)(relu) model = Model(inputs=input_layer, outputs=predictions) optimizer = optimizer_class[0](lr=learning_rate, **optimizer_class[1]) model.compile(optimizer=optimizer, loss=loss_func,metrics=['acc']) print(model.summary()) return model
def build_model(): inp = Input(shape=(FRAME_H, FRAME_W, 3)) x = Conv2D(filters=8, kernel_size=(5, 5), activation='relu')(inp) x = MaxPooling2D((2, 2))(x) x = Conv2D(filters=16, kernel_size=(5, 5), activation='relu')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(filters=32, kernel_size=(5, 5), activation='relu')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dropout(0.5)(x) x = Dense(128, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1, activation='tanh')(x) return Model(inputs=[inp], outputs=[x])
def model6(X_train, X_test, X_valid, y_train, y_test, y_valid): y_train = np.array(y_train) y_test = np.array(y_test) y_valid = np.array(y_valid) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = Sequential() model.add( Conv2D(filters=16, kernel_size=3, activation='relu', input_shape=(128, 128, 3))) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Conv2D(filters=64, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Flatten()) model.add(Dense(units=128, activation='relu')) model.add(Dense(5, activation='softmax')) model.summary() model.compile(loss="sparse_categorical_crossentropy", optimizer="Adam", metrics=["accuracy"]) history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid)) pd.DataFrame(history.history).plot(figsize=(8, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show() y_pred = model.predict_classes(X_test) accuracy = metrics.accuracy_score(y_test, y_pred) print(accuracy) return model, history
def build_model(self): self.inp = Input(input_shape=self.input_shape) self.down1 = DoubleConvBlock([64, 64])(self.inp) self.mp1 = MaxPooling2D()(self.down1) self.down2 = DoubleConvBlock([128, 128])(self.mp1) self.mp2 = MaxPooling2D()(self.down2) self.down3 = DoubleConvBlock([256, 256])(self.mp2) self.mp3 = MaxPooling2D()(self.down3) self.down4 = DoubleConvBlock([512, 512])(self.mp3) self.mp4 = MaxPooling2D()(self.down4) self.down5 = DoubleConvBlock([1024, 1024])(self.mp4) self.deconv4 = Conv2DTranspose(filters=512, kernel_size=(2, 2), padding="same")(self.down5) self.concat4 = Concatenate()([self.deconv4, self.down4]) self.up4 = DoubleConvBlock([512, 512])(self.concat4) self.deconv3 = Conv2DTranspose(filters=512, kernel_size=(2, 2), padding="same")(self.up4) self.concat3 = Concatenate()([self.deconv3, self.down3]) self.up3 = DoubleConvBlock([512, 512])(self.concat3) self.deconv2 = Conv2DTranspose(filters=512, kernel_size=(2, 2), padding="same")(self.up3) self.concat2 = Concatenate()([self.deconv2, self.down2]) self.up2 = DoubleConvBlock([512, 512])(self.concat2) self.deconv1 = Conv2DTranspose(filters=512, kernel_size=(2, 2), padding="same")(self.up2) self.concat1 = Concatenate()([self.deconv1, self.down1]) self.up1 = DoubleConvBlock([512, 512])(self.concat1) self.out = Conv2D(1, kernel_size=(1, 1), activation="sigmoid")(self.up1) self.model = Model(inputs=[self.inp], outputs=[self.out])
def build(input_shape_width, input_shape_height, classes, weight_path = '', input_shape_depth = 3): ''' weight_path: a .hdf5 file. If exists, we can load model. ''' # initialize the model model = Sequential() input_shape = (input_shape_height, input_shape_width, input_shape_depth) # if we are using "channels first", update the input shape if K.image_data_format() == 'channels_first': input_shape = (input_shape_depth, input_shape_height, input_shape_width) # first Convolution + relu + pooling layer model.add(Conv2D(filters = 20, kernel_size = (5, 5), padding = 'same', input_shape = input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2))) # second convolutional layer model.add(Conv2D(filters = 50, kernel_size = (5, 5), padding = 'same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Flattening model.add(Flatten()) # Full connection model.add(Dense(units = 500)) model.add(Activation('relu')) # output layer model.add(Dense(units = classes)) model.add(Activation('softmax')) if weight_path: model.load_weights(weight_path) # return the constructed network architecture return model
def model4(X_train, X_test, X_valid, y_train, y_test, y_valid): y_train = np.array(y_train) y_test = np.array(y_test) y_valid = np.array(y_valid) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=(128, 128, 3), padding="same")) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(16, (2, 2), padding="same")) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(5, activation="softmax")) model.summary() model.compile(loss="sparse_categorical_crossentropy", optimizer="Adam", metrics=["accuracy"]) history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid)) pd.DataFrame(history.history).plot(figsize=(8, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show() y_pred = model.predict_classes(X_test) accuracy = metrics.accuracy_score(y_test, y_pred) print(accuracy) return model, history
def build(width, height, depth, classes): # Model initialization model = Sequential() input_shape = (height, width, depth) chan_dim = -1 # Data formatting if k.image_data_format() == "channels_first": chan_dim = 1 # First layer set model.add(Conv2D(16, (3, 3), padding="same", input_shape=input_shape)) model.add(Activation("relu")) model.add(BatchNormalization(axis=chan_dim)) model.add(Conv2D(16, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chan_dim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # Second layer set model.add(Conv2D(32, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chan_dim)) model.add(Conv2D(32, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chan_dim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # Third layer set model.add(Flatten()) model.add(Dense(64)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) # Softmax classification layer set model.add(Dense(classes)) model.add(Activation("softmax")) return model
def model(): model = Sequential() model.add(Conv2D(60, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(Conv2D(60, (5, 5), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(30, (3, 3), activation='relu')) model.add(Conv2D(30, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) return model
def createModel(train_data): classes = [ 'battery', 'disc', 'glass', 'metals', 'paper', 'plastic_jug_bottle', 'plastic_packaging', 'styrofoam' ] model = Sequential() # Add layers model.add( Conv2D(32, (3, 3), padding='same', input_shape=train_data.shape[1:], activation='relu', name='conv_1')) model.add(Conv2D(32, (3, 3), activation='relu', name='conv_2')) model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_1')) model.add(Dropout(0.25)) model.add( Conv2D(64, (3, 3), padding='same', activation='relu', name='conv_3')) model.add(Conv2D(64, (3, 3), activation='relu', name='conv_4')) model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_2')) model.add(Dropout(0.25)) model.add( Conv2D(128, (3, 3), padding='same', activation='relu', name='conv_5')) model.add(Conv2D(128, (3, 3), activation='relu', name='conv_6')) model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_3')) model.add(Flatten()) model.add(Dense(512, activation='relu', name='dense_1')) model.add(Dropout(0.5)) model.add(Dense(128, activation='relu', name='dense_2')) model.add(Dense(len(classes), name='output')) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) # optimizer=RMSprop(lr=0.001) return model
def baseline_model(): # create model model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(num_classes, activation='softmax')) # Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def reduction_A(input, k=192, l=224, m=256, n=384): channel_axis = -1 r1 = MaxPooling2D((3, 3), strides=(2, 2))(input) r2 = Conv2D(n, (3, 3), activation='relu', strides=(2, 2))(input) r3 = Conv2D(k, (1, 1), activation='relu', padding='same')(input) r3 = Conv2D(l, (3, 3), activation='relu', padding='same')(r3) r3 = Conv2D(m, (3, 3), activation='relu', strides=(2, 2))(r3) m = merge.concatenate([r1, r2, r3], axis=channel_axis) m = BatchNormalization(axis=channel_axis)(m) m = Activation('relu')(m) return m
def build(input_shape, num_outputs, block_fn, repetitions): """Builds a custom ResNet like architecture. Args: input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols) num_outputs: The number of outputs at final softmax layer block_fn: The block function to use. This is either `basic_block` or `bottleneck`. The original paper used basic_block for layers < 50 repetitions: Number of repetitions of various block units. At each block unit, the number of filters are doubled and the input size is halved Returns: The keras `Model`. """ _handle_dim_ordering() if len(input_shape) != 3: raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)") # Permute dimension order if necessary if K.image_dim_ordering() == 'tf': input_shape = (input_shape[1], input_shape[2], input_shape[0]) # Load function from str if needed. block_fn = _get_block(block_fn) input = Input(shape=input_shape) conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input) pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1) block = pool1 filters = 64 for i, r in enumerate(repetitions): block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block) filters *= 2 # Last activation block = _bn_relu(block) # Classifier block block_shape = K.int_shape(block) pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]), strides=(1, 1))(block) flatten1 = Flatten()(pool2) dense = Dense(units=num_outputs, kernel_initializer="he_normal", activation="softmax")(flatten1) model = Model(inputs=input, outputs=dense) return model
def reduction_resnet_v2_B(input): channel_axis = -1 r1 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input) r2 = Conv2D(256, (1, 1), activation='relu', padding='same')(input) r2 = Conv2D(384, (3, 3), activation='relu', strides=(2, 2))(r2) r3 = Conv2D(256, (1, 1), activation='relu', padding='same')(input) r3 = Conv2D(288, (3, 3), activation='relu', strides=(2, 2))(r3) r4 = Conv2D(256, (1, 1), activation='relu', padding='same')(input) r4 = Conv2D(288, (3, 3), activation='relu', padding='same')(r4) r4 = Conv2D(320, (3, 3), activation='relu', strides=(2, 2))(r4) m = merge.concatenate([r1, r2, r3, r4], axis=channel_axis) m = BatchNormalization(axis=channel_axis)(m) m = Activation('relu')(m) return m
def primaryModel(): print('-----primary model training-----') # 모델 구성 model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(24, 24, 3))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(4, activation='softmax')) # 모델 학습과정 설정 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) modelFitting(model)
# y_test = y.reshape(-1, 4) x_test = np.load("gcnn_test_x.npy") river_index = 0 if river_index == 0: print("Model for F1") lr = 0.0005 inputs = Input(shape=(num_history, num_history, num_rivers)) x1 = Conv2D(4, kernel_size=(2, 2), input_shape=(num_history, num_history, num_rivers), padding="same", activation='relu')(inputs) x2 = Conv2D(4, kernel_size=(2, 2), padding="valid", activation='relu')(x1) x3 = Conv2D(4, kernel_size=(2, 2), padding="valid", activation='relu')(x2) m1 = MaxPooling2D(pool_size=(2, 2), padding="valid", strides=1)(x3) x10 = Reshape((1, 4))(m1) x12 = Dense(1)(x10) model = Model(inputs=inputs, outputs=x12) fname_param = os.path.join('F1.h5') true_value_station, pred_value_station = "true_value_station_G-CNN_1001.txt", "pred_value_station_G-CNN_1001.txt" figure_name = "Farm1_CNN_4-1_MSE.pdf" adam = Adam(lr=lr) #------------ model.compile(loss='mean_squared_error', optimizer=adam, metrics=['mean_squared_error']) # print(model.summary()) early_stopping = EarlyStopping(monitor="mean_squared_error", patience=15, mode='min') model_checkpoint = ModelCheckpoint(fname_param,
# ds_test = ds_test.cache() # ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE) # Initialize the model. # with tpu_strategy.scope(): # creating the model in the TPUStrategy scope means we will train the model on the TPU model = Sequential() # 1st Convolutional Layer model.add(Conv2D(filters = 96, input_shape = (224,224,3), kernel_size = (11,11), strides = (4,4), padding = 'valid')) model.add(Activation('relu')) # Batch Normalisation before passing it to the next layer model.add(BatchNormalization()) # Pooling Layer model.add(MaxPooling2D(pool_size = (3,3), strides = (2,2), padding = 'valid')) # 2nd Convolutional Layer model.add(Conv2D(filters = 256, kernel_size = (5,5), strides = (1,1), padding = 'same')) model.add(Activation('relu')) # Batch Normalisation model.add(BatchNormalization()) # Pooling Layer model.add(MaxPooling2D(pool_size = (3,3), strides = (2,2), padding = 'valid')) # 3rd Convolutional Layer model.add(Conv2D(filters = 384, kernel_size = (3,3), strides = (1,1), padding = 'same')) model.add(Activation('relu')) # Batch Normalisation model.add(BatchNormalization()) # Dropout