def build_embedding_dims_CNN(): InputImg = [] files = listdir('img') for file in range(1, 8985): InputImg.append(img_to_array(load_img('img/' + '{}.jpg'.format(file)))) InputImg = numpy.reshape(InputImg, [-1, 3, 36, 36]) / 255 input_img = Input(shape=(3, 36, 36,)) conv1 = Conv2D(32, (3, 3), input_shape=(3, 36, 36), data_format='channels_first', activation='relu')(input_img) max_pool1 = MaxPool2D((2, 2))(conv1) conv2 = Conv2D(32, (3, 3), input_shape=(3, 36, 36), data_format='channels_first', activation='relu')(max_pool1) max_pool2 = MaxPool2D((2, 2))(conv2) conv3 = Conv2D(32, (3, 3), input_shape=(3, 36, 36), data_format='channels_first', activation='relu')(max_pool2) reshape = Reshape((32*5*5))(conv3) fc1 = Dense(128, activation='relu')(reshape) fc2 = Dense(128, activation='relu')(fc1) CNN = Model(inputs=input_img, outputs=fc2) CNN.compile(optimizer='adadelta', loss='binary_crossentropy') CNN.fit(InputImg, InputImg, nb_epoch=25, batch_size=32, shuffle=True) print('saving weights and config') with open('net_config.json', 'w') as f: f.write(CNN.to_json()) numpy.save('CNN_weights.npy', CNN.get_weights())
def build_model(dropout=0.8, output_class=2, learning_rate=1e-4): # kerasによるモデル構築 model = Sequential() model.add(Conv2D(32, 3, input_shape=(128, 128, 3))) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(3, 3))) model.add(Conv2D(64, 3)) # 数値の意味 1つ目は層の数,2つ目は? model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(3, 3))) model.add(Conv2D(128, 3)) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(3, 3))) model.add(Flatten()) # 入力を平滑化 (None, 64, 64, 32) -> (None, 131072) model.add(Dense(128)) # 全結合ネットワーク model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(output_class, activation='softmax')) # ラスト adam = Adam(lr=learning_rate) model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=["accuracy"]) return model
def get_model(drop1=0.25, drop2=0.5, lr=0.001): if K.image_dim_ordering() == 'tf': inp = Input(shape=(28, 28, 1)) bn_axis = 3 else: inp = Input(shape=(1, 28, 28)) bn_axis = 1 model = Sequential() model.add(Conv2D(32, (5, 5), padding='Same', input_shape=(28, 28, 1))) # model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(32, (5, 5), padding='same')) # model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPool2D((2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding='Same')) # model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3), padding='Same')) # model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPool2D((2, 2), (2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy']) return model
def cnn(self): self.model.add( Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3))) self.model.add(Activation('relu')) self.model.add(Conv2D(32, (3, 3), padding='same')) self.model.add(Activation('relu')) self.model.add(MaxPool2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Conv2D(64, (3, 3), padding='same')) self.model.add(Activation('relu')) self.model.add(Conv2D(64, (3, 3), padding='same')) self.model.add(Activation('relu')) self.model.add(MaxPool2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Flatten()) self.model.add(Dense(512)) self.model.add(Activation('relu')) self.model.add(Dropout(0.5)) self.model.add(Dense(10, activation='softmax')) return self.model #modelを呼び出した時に子要素としてmodelを持っているせいでfitなどが違和感
def get_model(): model = Sequential() model.add( Conv2D(20, (4, 4), strides=(1, 1), input_shape=(w, h, 1), activation='relu', name='first_conv_layer')) model.add(Conv2D(20, (4, 4), strides=(1, 1), activation='relu')) model.add(MaxPool2D()) model.add(Conv2D(10, (4, 4), strides=(1, 1), activation='relu')) model.add( Conv2D(5, (3, 3), strides=(1, 1), activation='relu', name='last_conv_layer')) model.add(MaxPool2D()) model.add(Flatten()) model.add(Dropout(0.1)) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(16, activation='relu')) model.add(Dense(6, activation='softmax')) model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy']) return model
def main(x, y, batch_size=10, epochs=10): model = Sequential() # 1 block model.add(Conv2D(20, (5, 5), strides=(1, 1), padding='valid', kernel_initializer='he_normal', input_shape=(60, 60, 3))) model.add(Activation('relu')) # model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) # 2 block model.add(Conv2D(40, (7, 7), strides=(1, 1), padding='valid', kernel_initializer='he_normal')) model.add(Activation('relu')) # model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) # 3 block model.add(Conv2D(80, (11, 11), strides=(1, 1), padding='valid', kernel_initializer='he_normal')) model.add(Activation('relu')) # model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(80)) model.add(Activation('sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x, y, batch_size=batch_size, epochs=epochs)
def yolo(): ip = Input(shape=(416, 416, 3)) h = Conv2D(16, (3, 3), strides=(1, 1), padding='same', use_bias=False)(ip) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2))(h) for i in range(0, 4): h = Conv2D(32 * (2**i), (3, 3), strides=(1, 1), padding='same', use_bias=False)(h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2))(h) h = Conv2D(512, (3, 3), strides=(1, 1), padding='same', use_bias=False)(h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')(h) for _ in range(0, 2): h = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', use_bias=False)(h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = Conv2D(125, (1, 1), strides=(1, 1), kernel_initializer='he_normal')(h) h = Activation('linear')(h) h = Reshape((13, 13, 5, 25))(h) return Model(ip, h)
def LeNet(width, height, channels, output): model = Sequential() # Convulation model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(2, 2), input_shape=(width, height, channels))) # ReLU Activation model.add(Activation('relu')) # Pooling model.add(MaxPool2D(pool_size=(2, 2))) # Convolution model.add(Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2))) # ReLU Activation model.add(Activation('relu')) # Pooling model.add(MaxPool2D(pool_size=(2, 2))) model.add(Flatten()) # Hidden Layer model.add(Dense(100)) model.add(Activation('relu')) model.add(Dense(output)) model.add(Activation('softmax')) return model
def k_cnn2_mlp(yao_indices_dim, face_image_shape, with_compile=True): ''' 'k_' prefix means keras_layers some layer parameters ''' # cnn layer parameters _nb_filters_1 = 80 _kernel_size_1 = (3, 3) _cnn_activation_1 = 'relu' _pool_size_1 = (2, 2) _cnn_dropout_1 = 0.0 _nb_filters_2 = 64 _kernel_size_2 = (5, 5) _cnn_activation_2 = 'relu' _pool_size_2 = (2, 2) _cnn_dropout_2 = 0.0 # mlp layer parameters _mlp_units = 40 _mlp_activation = 'sigmoid' _mlp_dropout = 0.0 _output_units = yao_indices_dim _output_activation = 'sigmoid' print('Build 2 * CNN + MLP model...') cnn2_mlp_model = Sequential() cnn2_mlp_model.add( Conv2D(filters=_nb_filters_1, kernel_size=_kernel_size_1, input_shape=face_image_shape)) cnn2_mlp_model.add(Activation(activation=_cnn_activation_1)) cnn2_mlp_model.add(MaxPool2D(pool_size=_pool_size_1)) cnn2_mlp_model.add(Dropout(rate=_cnn_dropout_1)) cnn2_mlp_model.add( Conv2D(filters=_nb_filters_2, kernel_size=_kernel_size_2)) cnn2_mlp_model.add(Activation(activation=_cnn_activation_2)) cnn2_mlp_model.add(MaxPool2D(pool_size=_pool_size_2, name='conv1_2')) cnn2_mlp_model.add(Dropout(rate=_cnn_dropout_2)) cnn2_mlp_model.add(BatchNormalization()) cnn2_mlp_model.add(Flatten()) cnn2_mlp_model.add( Dense(units=_mlp_units, activation=_mlp_activation, name='dense2_1')) cnn2_mlp_model.add(Dropout(rate=_mlp_dropout)) cnn2_mlp_model.add(BatchNormalization()) # cnn2_mlp_model.add(Dropout(rate=_mlp_dropout)) cnn2_mlp_model.add(Dense(units=_output_units)) cnn2_mlp_model.add(Activation(activation=_output_activation)) # print layers framework cnn2_mlp_model.summary() if with_compile == True: return compiler(cnn2_mlp_model) else: # ready to joint in some other frameworks like Tensorflow return cnn2_mlp_model
def create_model(): """ Creates the keras model, feel free to try alter the architecture. :return: Keras model """ model = Sequential() model.add( Conv2D(8, (3, 3), padding='same', input_shape=(48, 48, 1), kernel_regularizer=l2(), activation='relu')) model.add( Conv2D(16, (3, 3), padding='same', kernel_regularizer=l2(), activation='relu')) model.add(MaxPool2D(padding='same')) model.add(Dropout(0.25)) model.add( Conv2D(16, (3, 3), padding='same', kernel_regularizer=l2(), activation='relu')) model.add( Conv2D(16, (3, 3), padding='same', kernel_regularizer=l2(), activation='relu')) model.add(MaxPool2D(padding='same')) model.add(Dropout(0.25)) model.add( Conv2D(32, (3, 3), padding='same', kernel_regularizer=l2(), activation='relu')) model.add( Conv2D(32, (3, 3), padding='same', kernel_regularizer=l2(), activation='relu')) model.add(BatchNormalization()) model.add( Conv2D(32, (3, 3), padding='same', kernel_regularizer=l2(), activation='relu')) model.add( Conv2D(32, (3, 3), padding='same', kernel_regularizer=l2(), activation='relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, kernel_regularizer=l2(), activation='relu')) model.add(Dropout(0.1)) model.add(Dense(7, activation='softmax')) return model
def unet(): img_size = 240 num_filters = 64 X = Input((img_size, img_size, 3)) # Encode level 1 encode1 = conv2d_bn(X, num_filters) encode1 = conv2d_bn(encode1, num_filters) # Encode level 2 encode2 = MaxPool2D((2, 2))(encode1) encode2 = conv2d_bn(encode2, num_filters * 2) encode2 = conv2d_bn(encode2, num_filters * 2) # Encode level 3 encode3 = MaxPool2D((2, 2))(encode2) encode3 = conv2d_bn(encode3, num_filters * 4) encode3 = conv2d_bn(encode3, num_filters * 4) # Encode level 4 encode4 = MaxPool2D((2, 2))(encode3) encode4 = conv2d_bn(encode4, num_filters * 8) encode4 = conv2d_bn(encode4, num_filters * 8) # Encode level 5 encode5 = MaxPool2D((2, 2))(encode4) encode5 = conv2d_bn(encode5, num_filters * 16) encode5 = conv2d_bn(encode5, num_filters * 8) # Decode level 4 decode4 = conv2d_bn(encode5, num_filters * 8, transpose=True) decode4 = Concatenate()([encode4, decode4]) decode4 = conv2d_bn(decode4, num_filters * 8) decode4 = conv2d_bn(decode4, num_filters * 8) # Decode level 3 decode3 = conv2d_bn(decode4, num_filters * 4, transpose=True) decode3 = Concatenate()([encode3, decode3]) decode3 = conv2d_bn(decode3, num_filters * 4) decode3 = conv2d_bn(decode3, num_filters * 4) # Decode level 2 decode2 = conv2d_bn(decode3, num_filters * 2, transpose=True) decode2 = Concatenate()([encode2, decode2]) decode2 = conv2d_bn(decode2, num_filters * 2) decode2 = conv2d_bn(decode2, num_filters * 2) # Decode level 1 decode1 = conv2d_bn(decode2, num_filters, transpose=True) decode1 = Concatenate()([encode1, decode1]) decode1 = conv2d_bn(decode1, num_filters) decode1 = conv2d_bn(decode1, num_filters / 2) decode1 = Conv2D(1, kernel_size=(1, 1))(decode1) decode1 = Activation('sigmoid')(decode1) mdl = Model(X, decode1) return mdl
def CNN_model02B(nz, nx, channels, ng): #CNN to concatenate global features #ng is the number of global features #shared CNN weights conv_1 = Conv2D(10, kernel_size=1, activation='relu') conv_2 = Conv2D(5, kernel_size=2, activation='relu') batch_norm_layer = BatchNormalization() #conv_1 = Conv2D(4, kernel_size=2, activation='relu', kernel_initializer='glorot_normal') #conv_2 = Conv2D(4, kernel_size=2, activation='relu', kernel_initializer='glorot_normal') #first input model input_1 = Input(shape=(nz, nx, channels)) conv_11 = conv_1(input_1) pool_11 = MaxPool2D(pool_size=(2, 2))(conv_11) conv_12 = conv_2(pool_11) #pool_12 = MaxPool2D(pool_size=(1, 1))(conv_12) #f1 = Flatten()(pool_12) f1 = Flatten()(conv_12) #second input model input_2 = Input(shape=(nz, nx, channels)) conv_21 = conv_1(input_2) pool_21 = MaxPool2D(pool_size=(2, 2))(conv_21) conv_22 = conv_2(pool_21) pool_22 = MaxPool2D(pool_size=(2, 1))(conv_22) f2 = Flatten()(pool_22) # f2 = Flatten()(conv_22) #add global features input_3 = Input(shape=(ng, )) #merge two layers and add dense merge_layer = concatenate([f1, f2, input_3]) #add batch norm here #batch_norm = batch_norm_layer(merge_layer) dense_1 = Dense(20, activation='relu')(merge_layer) #dense_1 = Dense(20, activation='relu')(batch_norm) out_1 = Dense(1, activation='linear')(dense_1) model_cnn = Model(inputs=[input_1, input_2, input_3], outputs=out_1) print(model_cnn.summary()) return model_cnn
def model_fn(learning_rate, lam, dropout): """Create a Keras Sequential model with layers.""" input = Input(shape=(60, 60, 3)) # 1 block model = Dropout(0.2, input_shape=(60, 60, 3))(input) model = Conv2D(20, (5, 5), strides=(1, 1), padding='valid', kernel_initializer='he_normal', kernel_regularizer=keras.regularizers.l2(lam))(model) model = BatchNormalization()(model) model = Activation('relu')(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Dropout(dropout)(model) # 2 block model = Conv2D(40, (7, 7), strides=(1, 1), padding='valid', kernel_initializer='he_normal', kernel_regularizer=keras.regularizers.l2(lam))(model) model = BatchNormalization()(model) model = Activation('relu')(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Dropout(dropout)(model) # 3 block model = Conv2D(80, (11, 11), strides=(1, 1), padding='valid', kernel_initializer='he_normal', kernel_regularizer=keras.regularizers.l2(lam))(model) model = BatchNormalization()(model) model = Activation('relu')(model) model = Dropout(dropout)(model) model = Flatten()(model) model = Dropout(dropout)(model) model = Dense(80, kernel_regularizer=keras.regularizers.l2(lam))(model) model = Activation('relu')(model) outputs = [] for i in range(80): out = Dropout(dropout, name='out_dropout%s' % i)(model) out = Dense(2, kernel_regularizer=keras.regularizers.l2(lam), name='dense_out%s' % i)(out) out = Activation('softmax', name='out%s' % i)(out) outputs.append(out) model = Model(inputs=input, outputs=outputs) compile_model(model, learning_rate) return model
def vgg_yolo(): vgg19 = VGG19(include_top=False, weights='imagenet', input_tensor=None, input_shape=(416, 416, 3)) ip = Input(shape=(416, 416, 3)) # Block1 h = vgg19.layers[1](ip) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = vgg19.layers[2](h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2))(h) # Block2 for i in range(4, 6): h = vgg19.layers[i](h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2))(h) # Block3 for i in range(7, 11): h = vgg19.layers[i](h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2))(h) # Block4 for i in range(12, 16): h = vgg19.layers[i](h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2))(h) # Block5 for i in range(17, 21): h = vgg19.layers[i](h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) h = MaxPool2D(pool_size=(2, 2))(h) # Block6 for _ in range(0, 2): h = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', use_bias=False)(h) h = BatchNormalization()(h) h = LeakyReLU(alpha=0.1)(h) # Block7 h = Conv2D(125, (1, 1), strides=(1, 1), kernel_initializer='he_normal')(h) h = Activation('linear')(h) h = Reshape((13, 13, 5, 25))(h) return Model(ip, h)
def build_LeNet(width, height, channels, output): model = Sequential() model.add(Conv2D(filters=32, kernel_size=(5,5), strides=(2,2), input_shape=(width, height, channels))) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Conv2D(filters=64, kernel_size=(5,5), strides=(2,2))) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(100)) model.add(Activation('relu')) model.add(Dense(output)) return model
def network(self, img_shape=(160, 320, 3)): # normalize self.model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=img_shape)) self.model.add(Cropping2D(cropping=((25, 10),(0, 0)))) # 160x320x3 to 158x318x6 self.model.add(Convolution2D(6, kernel_size=3, strides=1, padding="valid")) self.model.add(Activation('relu')) # to 154x314x16 self.model.add(Convolution2D(16, kernel_size=5, strides=1, padding="valid")) self.model.add(Activation('relu')) # to 77x157x16 self.model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")) self.model.add(Dropout(0.5)) # to 76x156x26 self.model.add(Convolution2D(26, kernel_size=2, strides=1, padding="valid")) self.model.add(Activation('relu')) self.model.add(Dropout(0.5)) self.model.add(Convolution2D(52, kernel_size=3, strides=1, padding="valid")) self.model.add(Activation('relu')) self.model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")) self.model.add(Dropout(0.5)) self.model.add(Flatten()) self.model.add(Dense(400)) self.model.add(Activation('relu')) self.model.add(Dropout(0.6)) self.model.add(Dense(120)) self.model.add(Activation('relu')) self.model.add(Dropout(0.6)) self.model.add(Dense(60)) self.model.add(Activation('relu')) # Linear Regression Layer self.model.add(Dense(1)) return self.model
def build_network(self): # print('[+] Building CNN') # self.model = Sequential() # # input: 100x100 images with 3 channels -> (3, 100, 100) tensors. # # this applies 32 convolution filters of size 3x3 each. # self.model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(48, 48, 1))) # self.model.add(Activation('relu')) # self.model.add(Convolution2D(32, 3, 3)) # self.model.add(Activation('relu')) # self.model.add(MaxPooling2D(pool_size=(2, 2))) # self.model.add(Dropout(0.25)) # # self.model.add(Convolution2D(64, 3, 3, border_mode='same')) # self.model.add(Activation('relu')) # self.model.add(Convolution2D(64, 3, 3)) # self.model.add(Activation('relu')) # self.model.add(MaxPooling2D(pool_size=(2, 2))) # self.model.add(Dropout(0.25)) # # self.model.add(Flatten()) # # Note: Keras does automatic shape inference. # self.model.add(Dense(256)) # self.model.add(Activation('relu')) # self.model.add(Dropout(0.5)) # # self.model.add(Dense(7)) # self.model.add(Activation('softmax')) # sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True) # self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['acc']) self.mnist_input = Input(shape=(128, 128, 1), name='input') self.conv1 = Conv2D(64, kernel_size=3, activation='relu', name='conv1')(self.mnist_input) self.pool1 = MaxPool2D(pool_size=(5, 5), strides=2, name='pool1')(self.conv1) self.conv2 = Conv2D(128, kernel_size=2, activation='relu', name='conv2')(self.pool1) self.pool2 = MaxPool2D(pool_size=(10, 10), strides=1, name='pool2')(self.conv2) self.conv3 = Conv2D(254, kernel_size=6, activation='relu', name='conv3')(self.pool2) # self.conv1_1 = Conv2D(128, kernel_size=5, strides=3, activation='relu', name='conv1_1')(self.mnist_input) # self.pool1_1 = MaxPool2D(pool_size=(5, 5), strides=2, name='pool1_1')(self.conv1_1) # self.concat1 = merge([self.conv3, self.pool1_1], mode='concat', concat_axis=-1) # self.conv4 = Conv2D(192, kernel_size=3, activation='relu', name='conv4')(self.concat1) self.conv4 = Conv2D(192, kernel_size=2, activation='relu', name='conv4')(self.conv3) self.conv5 = Conv2D(256, kernel_size=1, activation='relu', name='conv5')(self.conv4) # self.Dropout = Dropout(0.25) self.flat1 = Flatten()(self.conv5) self.dense1 = Dense(128, activation='relu', name='dense1')(self.flat1) self.Dropout = Dropout(0.5) self.output = Dense(7, activation='softmax', name='output')(self.dense1) self.model = Model(inputs=self.mnist_input, outputs=self.output) # sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True) adam = Adam(lr=0.00008, beta_1=0.9, beta_2=0.999, epsilon=1e-08) self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def build_convnet(shape=(256, 256, 3)): # momentum = .9 model = Sequential() model.add( Conv2D(64, (3, 3), input_shape=shape, padding='same', activation='relu')) # model.add(Conv2D(64, (3, 3), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D()) model.add(Conv2D(128, (3, 3), padding='same', activation='relu')) # model.add(Conv2D(128, (3, 3), padding='same', activation='relu')) model.add(BatchNormalization()) # model.add(MaxPool2D(pool_size=(2, 2))) # model.add(Conv2D(256, (3, 3), padding='same', activation='relu')) # model.add(Conv2D(256, (3, 3), padding='same', activation='relu')) # model.add(BatchNormalization()) # model.add(MaxPool2D(pool_size=(2, 2))) # # model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) # model.add(Conv2D(512, (3, 3), padding='same', activation='relu')) # model.add(BatchNormalization()) # flatten... model.add(Flatten()) return model
def Inception(x, d_out): d1 = math.floor(d_out / 4) d2 = math.floor(d_out / 2) # Inception Module Layer 1: 1x1 Convolution conv1x1a = Conv2D(d_out, 1, padding='same', activation='relu')(x) conv1x1a = Dropout(0.5)(conv1x1a) # Inception Module Layer 2: 1x1 Convolution -> 3x3 Convolution -> 3x3 Convolution conv1x1b = Conv2D(d1, 1, padding='same', activation='relu')(x) conv3x3 = Conv2D(d2, 3, padding='same', activation='relu')(conv1x1b) conv3x3 = Conv2D(d_out, 3, padding='same', activation='relu')(conv3x3) conv3x3 = Dropout(0.5)(conv3x3) # Inception Module Layer 3: 1x1 Convolution -> 5x5 Convolution -> 5x5 Convolution conv1x1c = Conv2D(d1, 1, padding='same', activation='relu')(x) conv5x5 = Conv2D(d2, 5, padding='same', activation='relu')(conv1x1c) conv5x5 = Conv2D(d_out, 5, padding='same', activation='relu')(conv5x5) conv5x5 = Dropout(0.5)(conv5x5) # Inception Module Layer 4: MaxPooling -> 1x1 Convolution -> 1x1 Convolution -> 1x1 Convolution maxpool = MaxPool2D(pool_size=(3, 3), padding='same', strides=(1, 1))(x) conv1x1d = Conv2D(d1, 1, padding='same', activation='relu')(maxpool) conv1x1d = Conv2D(d2, 1, padding='same', activation='relu')(conv1x1d) conv1x1d = Conv2D(d_out, 1, padding='same', activation='relu')(conv1x1d) conv1x1d = Dropout(0.5)(conv1x1d) output = Concatenate(axis=-1)([conv1x1a, conv3x3, conv5x5, conv1x1d]) return output
def lenet_model(): model = Sequential() model.add(Cropping2D(cropping=((20, 12), (0, 0)), input_shape=(64, 64, 3))) model.add(Lambda(lambda x: (x/255.0) - 0.5)) model.add(Conv2D(6, (5, 5), strides=(1, 1), padding='valid', activation='relu')) model.add(Dropout(0.2)) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')) model.add(Conv2D(16, (5, 5), strides=(1, 1), padding='valid', activation='relu')) model.add(Dropout(0.2)) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')) model.add(Flatten()) model.add(Dense(120)) model.add(Dense(84)) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') return model
def create_model( self, rho=0.9, decay=0.0, ): inputs = Input(shape=(self.max_length, self.max_index)) char_embedding = TimeDistributed( Dense(self.char_embedding_size, use_bias=False, activation='tanh'))(inputs) char_embedding = Reshape( (self.max_length, self.char_embedding_size, 1))(char_embedding) masked_embedding = MaskConv(0.0)(char_embedding) masked_seq = MaskToSeq(layer=MaskConv(0.0), time_axis=1)(char_embedding) char_feature = MaskConvNet( Conv2D( self.channel_size, (2, self.conv_size), strides=(1, self.conv_size), activation='tanh', padding='same', use_bias=False, ))(masked_embedding) mask_feature = MaskPooling(MaxPool2D((self.max_length, 1), padding='same'), pool_mode='max')(char_feature) encoded_feature = ConvEncoder()([mask_feature, masked_seq]) dense_input = RNNDecoder( RNNCell(LSTM( units=self.latent_size, return_sequences=True, implementation=2, unroll=False, dropout=0., recurrent_dropout=0., ), Dense(units=self.encoding_size, activation='tanh'), dense_dropout=0.))(encoded_feature) outputs = TimeDistributed( Dense(self.word_embedding_size, activation='tanh'))(dense_input) model = Model(inputs, outputs) picked = Pick()(encoded_feature) encoder = Model(inputs, picked) optimizer = RMSprop( lr=self.learning_rate, rho=rho, decay=decay, ) model.compile(loss='cosine_proximity', optimizer=optimizer) return model, encoder
def maxpool2d(x, kernel_size=config.MAXPOOL2D_KERNEL_SIZE, stride=config.MAXPOOL2D_STRIDES, padding=config.MAXPOOL2D_PADDING): output = MaxPool2D(pool_size=kernel_size, strides=stride, padding=padding)(x) return output
def create_model(self): inputs = Input(shape=(self.x, self.y, self.channel_size)) masked_inputs = MaskConv(self.mask_value)(inputs) outputs = MaskPooling(MaxPool2D(self.pool_size, self.strides, self.padding), pool_mode='max')(masked_inputs) model = Model(inputs, outputs) model.compile('sgd', 'mean_squared_error') return model
def autoencoder(self, x): x = Conv2D(32, (3, 3), activation = 'relu', padding = 'same', name = 'conv1', data_format = 'channels_last')(x) x = MaxPool2D((2,2), strides = (2,2), name = 'max1')(x) x = Conv2D(32, (3, 3), activation = 'relu', padding = 'same', name = 'conv2')(x) x = MaxPool2D((2, 2), strides = (2,2), name = 'max2')(x) x = Flatten()(x) x = Dense(20)(x) x = Dense(75*120*32)(x) x = Reshape((75, 120, 32))(x) x = UpSampling2D()(x) x = Conv2D(32, (3, 3), activation = 'relu', padding = 'same', name = 'conv_t11')(x) x = UpSampling2D()(x) x = Conv2D(1, (3, 3), activation = 'sigmoid', padding = 'same', name = 'conv_t12')(x) return x
def max_pooling2d(self, pool_size=(2, 2), strides=(2, 2), padding='same'): self.stream = MaxPool2D( pool_size=pool_size, strides=strides, padding=padding ) (self.stream) return self
def simple_model(input_shape=(1, 256, 256)): inp = Input(shape=input_shape) network = Conv2D(32, 3, kernel_initializer='glorot_uniform', activation='relu')(inp) network = Conv2D(64, 3, kernel_initializer='glorot_uniform', activation='relu')(network) network = MaxPool2D((2, 2))(network) network = Conv2D(64, 3, kernel_initializer='glorot_uniform', activation='relu')(network) network = Conv2D(64, 3, kernel_initializer='glorot_uniform', activation='relu')(network) network = MaxPool2D((2, 2))(network) network = Conv2D(64, 3, kernel_initializer='glorot_uniform', activation='relu')(network) network = MaxPool2D((2, 2))(network) network = Conv2D(128, 3, kernel_initializer='glorot_uniform', activation='relu')(network) network = MaxPool2D((2, 2))(network) network = Conv2D(256, 3, kernel_initializer='glorot_uniform', activation='relu')(network) network = Flatten()(network) network = Dense(1024, activation='relu')(network) network = Dropout(0.25)(network) network = Dense(512, activation='relu')(network) network = Dropout(0.25)(network) network = Dense(2, activation='softmax')(network) return inp, network
def build_model(): model = Sequential() model.add(Conv2D(32, 3, input_shape=X_train.shape[1:])) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, 3)) model.add(Activation('relu')) model.add(Conv2D(64, 3)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
def model_fn(learning_rate=0.1): """Create a Keras Sequential model with layers.""" model = models.Sequential() # 1 block model.add( Conv2D(20, (5, 5), strides=(1, 1), padding='valid', kernel_initializer='he_normal', batch_input_shape=(None, 60, 60, 3))) model.add(Activation('relu')) # model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) # 2 block model.add( Conv2D(40, (7, 7), strides=(1, 1), padding='valid', kernel_initializer='he_normal')) model.add(Activation('relu')) # model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) # 3 block model.add( Conv2D(80, (11, 11), strides=(1, 1), padding='valid', kernel_initializer='he_normal')) model.add(Activation('relu')) # model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(80)) model.add(Activation('sigmoid', name='last')) compile_model(model, learning_rate) return model
def build(height, width, depth, classes): model = Sequential() input_shape = (height, width, depth) chanDim = -1 if k.image_data_format == "channel_first": input_shape = (depth, height, width) chanDim = 1 # first set model.add(Conv2D(32, (3, 3), input_shape=input_shape, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) # second set model.add(Conv2D(32, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) # pooling layer model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # third set model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) # fourth set model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) # pooling layer model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dense(classes)) model.add(Activation("softmax")) return model
def cnn_generate(data_train_2d, target_train): """ CNNモデルの構築 """ target_train_oh = np_utils.to_categorical(target_train, NUM_CLASS) cnn_model = Sequential() cnn_model.add( Conv2D(20, 2, input_shape=(data_train_2d.shape[1], data_train_2d.shape[2], 1))) cnn_model.add(Activation('relu')) cnn_model.add(MaxPool2D(pool_size=(2, 2))) cnn_model.add(Conv2D(10, 2)) cnn_model.add(Activation('relu')) cnn_model.add(MaxPool2D(pool_size=(2, 2))) cnn_model.add(Flatten()) cnn_model.add(Dense(200)) cnn_model.add(Activation('relu')) cnn_model.add(Dense(target_train_oh.shape[1], activation='softmax')) adam = Adam(lr=1e-3) cnn_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) history = cnn_model.fit(data_train_2d, target_train_oh, batch_size=20, epochs=CNN_EPOCH, verbose=1, validation_split=0.2) return cnn_model