def create_simple_model(num_classes, layer1_filters=32, layer2_filters=64): epochs = 5 n_conv = 2 model = models.Sequential() # First layer model.add(conv.ZeroPadding2D( (1, 1), input_shape=(1, IMG_COLS, IMG_ROWS), )) model.add( conv.Convolution2D(layer1_filters, n_conv, n_conv, activation="relu")) model.add(conv.MaxPooling2D(strides=(2, 2))) # Second layer model.add(conv.ZeroPadding2D((1, 1))) model.add( conv.Convolution2D(layer2_filters, n_conv, n_conv, activation="relu")) model.add(conv.MaxPooling2D(strides=(2, 2))) model.add(core.Flatten()) model.add(core.Dropout(0.2)) model.add(core.Dense(128, activation="relu")) model.add(core.Dense(num_classes, activation="softmax")) model.summary() model.compile(loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"]) return model, epochs
def Alex_Net(IMG_SIZE, class_num=16): model = Sequential() model.add( Conv2D(96, (11, 11), strides=(4, 4), padding="valid", input_shape=(IMG_SIZE[0], IMG_SIZE[1], 3), kernel_regularizer=l2(0.0002))) model.add(Activation("relu")) model.add(BatchNormalization(axis=-1)) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) model.add(Dropout(0.25)) model.add( convolutional.ZeroPadding2D(padding=(2, 2), dim_ordering='default')) model.add( Conv2D(256, (5, 5), padding="valid", kernel_regularizer=l2(0.0002))) model.add(Activation("relu")) model.add(BatchNormalization(axis=-1)) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) model.add(Dropout(0.25)) model.add( convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='default')) model.add( Conv2D(384, (3, 3), padding="valid", kernel_regularizer=l2(0.0002))) model.add(Activation("relu")) model.add(BatchNormalization(axis=-1)) model.add( convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='default')) model.add( Conv2D(384, (3, 3), padding="valid", kernel_regularizer=l2(0.0002))) model.add(Activation("relu")) model.add(BatchNormalization(axis=-1)) model.add( convolutional.ZeroPadding2D(padding=(1, 1), dim_ordering='default')) model.add( Conv2D(256, (3, 3), padding="valid", kernel_regularizer=l2(0.0002))) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(4096, kernel_regularizer=l2(0.0002))) model.add(Activation("relu")) model.add(BatchNormalization(axis=-1)) model.add(Dropout(0.25)) model.add(Dense(4096, kernel_regularizer=l2(0.0002))) model.add(Activation("relu")) model.add(BatchNormalization(axis=-1)) model.add(Dropout(0.25)) model.add(Dense(class_num, kernel_regularizer=l2(0.0002))) model.add(Activation("softmax")) return model
def test_zero_padding_2d(): num_samples = 2 stack_size = 2 input_num_row = 4 input_num_col = 5 for data_format in ['channels_first', 'channels_last']: inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size)) inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col)) # basic test layer_test(convolutional.ZeroPadding2D, kwargs={'padding': (2, 2), 'data_format': data_format}, input_shape=inputs.shape) layer_test(convolutional.ZeroPadding2D, kwargs={'padding': ((1, 2), (3, 4)), 'data_format': data_format}, input_shape=inputs.shape) # correctness test layer = convolutional.ZeroPadding2D(padding=(2, 2), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, offset, :, :], 0.) assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) elif data_format == 'channels_first': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, :, :, offset], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) layer = convolutional.ZeroPadding2D(padding=((1, 2), (3, 4)), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for top_offset in [0]: assert_allclose(np_output[:, top_offset, :, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, bottom_offset, :, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, left_offset, :], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, right_offset, :], 0.) assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.) elif data_format == 'channels_first': for top_offset in [0]: assert_allclose(np_output[:, :, top_offset, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, :, bottom_offset, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, :, left_offset], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, :, right_offset], 0.) assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
def test_zero_padding_2d(): nb_samples = 9 stack_size = 7 input_nb_row = 11 input_nb_col = 12 input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col)) layer = convolutional.ZeroPadding2D(padding=(2, 2)) layer.input = K.variable(input) for train in [True, False]: out = K.eval(layer.get_output(train)) for offset in [0, 1, -1, -2]: assert_allclose(out[:, :, offset, :], 0.) assert_allclose(out[:, :, :, offset], 0.) assert_allclose(out[:, :, 2:-2, 2:-2], 1.) layer.get_config()
def test_zero_padding_2d(self): nb_samples = 9 stack_size = 7 input_nb_row = 11 input_nb_col = 12 input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col)) layer = convolutional.ZeroPadding2D(pad=(2,2)) layer.input = theano.shared(value=input) for train in [True, False]: out = layer.get_output(train).eval() for offset in [0,1,-1,-2]: assert_allclose(out[:, :, offset, :], 0.) assert_allclose(out[:, :, :, offset], 0.) assert_allclose(out[:, :, 2:-2, 2:-2], 1.) config = layer.get_config()
def test_mixing_preprocessing_and_regular_layers(self): x0 = Input(shape=(10, 10, 3)) x1 = Input(shape=(10, 10, 3)) x2 = Input(shape=(10, 10, 3)) y0 = merge.Add()([x0, x1]) y1 = image_preprocessing.CenterCrop(8, 8)(x2) y1 = convolutional.ZeroPadding2D(padding=1)(y1) z = merge.Add()([y0, y1]) z = normalization.Normalization()(z) z = convolutional.Conv2D(4, 3)(z) stage = preprocessing_stage.FunctionalPreprocessingStage([x0, x1, x2], z) data = [ np.ones((12, 10, 10, 3), dtype='float32'), np.ones((12, 10, 10, 3), dtype='float32'), np.ones((12, 10, 10, 3), dtype='float32') ] stage.adapt(data) _ = stage(data) stage.compile('rmsprop', 'mse') with self.assertRaisesRegex(ValueError, 'Preprocessing stage'): stage.fit(data, np.ones((12, 8, 8, 4))) ds_x0 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3))) ds_x1 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3))) ds_x2 = tf.data.Dataset.from_tensor_slices(np.ones((12, 10, 10, 3))) ds_x = tf.data.Dataset.zip((ds_x0, ds_x1, ds_x2)) ds_y = tf.data.Dataset.from_tensor_slices(np.ones((12, 8, 8, 4))) dataset = tf.data.Dataset.zip((ds_x, ds_y)).batch(4) with self.assertRaisesRegex(ValueError, 'Preprocessing stage'): stage.fit(dataset) _ = stage.evaluate(data, np.ones((12, 8, 8, 4))) _ = stage.predict(data)
def test_zero_padding_2d(): nb_samples = 2 stack_size = 2 input_nb_row = 11 input_nb_col = 12 input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col)) # basic test layer_test(convolutional.ZeroPadding2D, kwargs={'padding': (2, 2)}, input_shape=input.shape) # correctness test layer = convolutional.ZeroPadding2D(padding=(2, 2)) layer.set_input(K.variable(input), shape=input.shape) out = K.eval(layer.output) for offset in [0, 1, -1, -2]: assert_allclose(out[:, :, offset, :], 0.) assert_allclose(out[:, :, :, offset], 0.) assert_allclose(out[:, :, 2:-2, 2:-2], 1.) layer.get_config()
def test_zero_padding_2d(): nb_samples = 2 stack_size = 2 input_nb_row = 4 input_nb_col = 5 dim_ordering = K.image_dim_ordering() assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}' if dim_ordering == 'tf': input = np.ones((nb_samples, input_nb_row, input_nb_col, stack_size)) elif dim_ordering == 'th': input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col)) # basic test layer_test(convolutional.ZeroPadding2D, kwargs={'padding': (2, 2)}, input_shape=input.shape) layer_test(convolutional.ZeroPadding2D, kwargs={'padding': (1, 2, 3, 4)}, input_shape=input.shape) layer_test(convolutional.ZeroPadding2D, kwargs={'padding': {'top_pad': 1, 'bottom_pad': 2, 'left_pad': 3, 'right_pad': 4}}, input_shape=input.shape) # correctness test layer = convolutional.ZeroPadding2D(padding=(2, 2)) layer.build(input.shape) output = layer(K.variable(input)) np_output = K.eval(output) if dim_ordering == 'tf': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, offset, :, :], 0.) assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) elif dim_ordering == 'th': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, :, :, offset], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) layer = convolutional.ZeroPadding2D(padding=(1, 2, 3, 4)) layer.build(input.shape) output = layer(K.variable(input)) np_output = K.eval(output) if dim_ordering == 'tf': for top_offset in [0]: assert_allclose(np_output[:, top_offset, :, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, bottom_offset, :, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, left_offset, :], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, right_offset, :], 0.) assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.) elif dim_ordering == 'th': for top_offset in [0]: assert_allclose(np_output[:, :, top_offset, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, :, bottom_offset, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, :, left_offset], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, :, right_offset], 0.) assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.) layer.get_config()
import keras.optimizers as kopt # read data from hard drive train_data_raw = pd.read_csv("./input/train.csv").values test_data_raw = pd.read_csv("./input/test.csv").values img_cols = 28 img_rows = 28 train_X = train_data_raw[:, 1:].reshape(train_data_raw.shape[0], 1, img_rows, img_cols) train_Y = kutils.to_categorical(train_data_raw[:, 0]) num_class = train_Y.shape[1] num_filters_1 = 64 conv_dim = 3 cnn = kmodels.Sequential() cnn.add(kconv.ZeroPadding2D((1,1), input_shape=(1, 28, 28),)) cnn.add(kconv.Convolution2D(num_filters_1, conv_dim, conv_dim, activation="relu")) cnn.add(kpool.MaxPooling2D(strides=(2, 2))) num_filters_2 = 128 cnn.add(kconv.ZeroPadding2D((1, 1))) cnn.add(kconv.Convolution2D(num_filters_2, conv_dim, conv_dim, activation="relu")) cnn.add(kpool.MaxPooling2D(strides=(2, 2))) conv_dim_2 = 3 cnn.add(kconv.ZeroPadding2D((1, 1))) cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu")) cnn.add(kpool.MaxPooling2D(strides=(2, 2))) cnn.add(kconv.ZeroPadding2D((1, 1))) cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu"))
nb_filters_1 = 32 # 64 nb_filters_2 = 64 # 128 nb_filters_3 = 128 # 256 nb_conv = 3 trainX = train[:, 1:].reshape(train.shape[0], 1, img_rows, img_cols) trainX = trainX.astype(float) trainX /= 255.0 # preprocess the data trainY = kutils.to_categorical(train[:, 0]) nb_classes = trainY.shape[1] cnn = models.Sequential() cnn.add(conv.ZeroPadding2D( (1, 1), input_shape=(1, 48, 48), )) cnn.add(conv.Convolution2D(32, 3, 3, activation="relu")) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(32, 3, 3, activation="relu")) cnn.add(conv.MaxPooling2D(strides=(2, 2))) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(64, 3, 3, activation="relu")) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(64, 3, 3, activation="relu")) cnn.add(conv.MaxPooling2D(strides=(2, 2))) # cnn.add(conv.ZeroPadding2D((1, 1))) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu")) # cnn.add(conv.ZeroPadding2D((1, 1)))
filters = [64, 128] kernel = 3 pool = 2 trainX = train[:, 1:].reshape(train.shape[0], 1, img_rows, img_cols) trainX = trainX.astype(float) trainX /= 255.0 trainY = kutils.to_categorical(train[:, 0]) nb_classes = trainY.shape[1] cnn = models.Sequential() cnn.add(conv.ZeroPadding2D( (1, 1), input_shape=(1, img_rows, img_cols), )) cnn.add(conv.Convolution2D(filters[0], kernel, kernel)) cnn.add(core.Activation('relu')) cnn.add(conv.MaxPooling2D(strides=(pool, pool))) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(filters[1], kernel, kernel)) cnn.add(core.Activation('relu')) cnn.add(conv.MaxPooling2D(strides=(pool, pool))) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(core.Flatten())
#build CCPM #使用max pooling而不是k-max pooling,实验的结果证明,max pooling的效果略好于k-max pooling print ('Build model...') convs = [] main_input = Input(shape=(maxlen,), dtype='int32') embedding_map = Embedding(output_dim=embedding_dims, input_dim=max_features, input_length=maxlen,W_regularizer=l2(reg_conf[0]))(main_input) for index in range(embedding_dims): print ("i:",index) t = Lambda(slice,output_shape=(maxlen,1),arguments={'index':index}, name='slice_'+str(index+1))(embedding_map) x = Reshape((maxlen,1,1))(t) #第一层conv and pooling x = convolutional.ZeroPadding2D(padding=(w1-1,0))(x) #卷积1后的尺寸conv1 conv1 = maxlen+w1-1 x = Convolution2D(m1,w1,1,border_mode='valid',subsample=(1,1), activation='linear',dim_ordering='tf',W_regularizer=l2(reg_conf[1]), b_regularizer=l2(reg_conf[1]))(x) #池化1的尺寸pool1 pool1 = 2#conv1+1-L1 print ("第一层pool size:",pool1) x = MaxPooling2D(pool_size=(pool1,1),strides=(2,1), border_mode='valid',dim_ordering='tf')(x) #x = Activation('tanh')(x) x = Activation('sigmoid')(x) #第二层conv and pooling x = convolutional.ZeroPadding2D(padding=(w2-2,0))(x)
def convnet_alexnet_lion_keras(image_dims): # model = Sequential() # model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims)) NR_CLASSES = 6 input = layers.Input(shape=image_dims, name="Input") conv_1 = convolutional.Convolution2D(96, 11, 11, border_mode='valid', name="conv_1", activation='relu', init='glorot_uniform')(input) pool_1 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_1")(conv_1) zero_padding_1 = convolutional.ZeroPadding2D(padding=(1, 1), name="zero_padding_1")(pool_1) conv_2 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_2", activation='relu', init='glorot_uniform')(zero_padding_1) pool_2 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_2")(conv_2) zero_padding_2 = keras.layers.convolutional.ZeroPadding2D( padding=(1, 1), name="zero_padding_2")(pool_2) conv_3 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_3", activation='relu', init='glorot_uniform')(zero_padding_2) conv_4 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_4", activation='relu', init='glorot_uniform')(conv_3) conv_5 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_5", activation='relu', init='glorot_uniform')(conv_4) pool_3 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_3")(conv_5) flatten = core.Flatten(name="flatten")(pool_3) fc_1 = core.Dense(4096, name="fc_1", activation='relu', init='glorot_uniform')(flatten) fc_1 = core.Dropout(0.5, name="fc_1_dropout")(fc_1) output = core.Dense(4096, name="Output", activation='relu', init='glorot_uniform')(fc_1) output = core.Dropout(0.5, name="Output_dropout")(output) fc_2 = core.Dense(NR_CLASSES, name="fc_2", activation='softmax', init='glorot_uniform')(output) return models.Model([input], [fc_2])