def rpn(base_layers, num_anchors): x = convolutional.Conv2D(512, 3, 3, border_mode='same', activation='relu', name='rpn_conv1')(base_layers) x = convolutional.Conv2D(1024, 3, 3, border_mode='same', activation='relu', name='rpn_conv2')(x) x = convolutional.Conv2D(512, 3, 3, border_mode='same', activation='relu', name='rpn_conv3')(x) x_class = convolutional.Conv2D(num_anchors, 1, 1, activation='sigmoid', border_mode='same', name='rpn_out_class')(x) x_regr = convolutional.Conv2D(num_anchors * 4, 1, 1, activation=None, name='rpn_out_regress')(x) return [x_class, x_regr, base_layers]
def cnn_model_1(): model = Sequential() model.add(convolutional.Conv2D(filters=16, kernel_size=3, input_shape=(img_channels, img_rows, img_cols), activation='relu', strides=1 )) model.add(convolutional.Conv2D(filters=16, kernel_size=3, activation='relu', strides=1 )) model.add(MaxPooling2D(pool_size=2, strides=2)) # flatten to 1 dim model.add(Flatten()) model.add(Dense(128, activation = 'relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation = 'softmax')) return model
def test_convolution_2d(): num_samples = 2 filters = 2 stack_size = 3 kernel_size = (3, 2) num_row = 7 num_col = 6 for padding in _convolution_paddings: for strides in [(1, 1), (2, 2)]: if padding == 'same' and strides != (1, 1): continue layer_test(convolutional.Conv2D, kwargs={ 'filters': filters, 'kernel_size': kernel_size, 'padding': padding, 'strides': strides, 'data_format': 'channels_first' }, input_shape=(num_samples, stack_size, num_row, num_col)) layer_test(convolutional.Conv2D, kwargs={ 'filters': filters, 'kernel_size': 3, 'padding': padding, 'data_format': 'channels_last', 'activation': None, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'kernel_constraint': 'max_norm', 'bias_constraint': 'max_norm', 'strides': strides }, input_shape=(num_samples, num_row, num_col, stack_size)) # Test dilation if K.backend() != 'cntk': # cntk only support dilated conv on GPU layer_test(convolutional.Conv2D, kwargs={ 'filters': filters, 'kernel_size': kernel_size, 'padding': padding, 'dilation_rate': (2, 2) }, input_shape=(num_samples, num_row, num_col, stack_size)) # Test invalid use case with pytest.raises(ValueError): model = Sequential([ convolutional.Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, batch_input_shape=(None, None, 5, None)) ])
def _cnn(): train_data, train_target, test_data = load_data() # load data from utility train_data, validation_data, train_target, validation_target = train_test_split( train_data, train_target, test_size=0.2, random_state=42 ) #r andomly split data into traingin and validation sets test_data, input_shape = _reshape(test_data) # see docstring train_data, input_shape = _reshape(train_data) # see docstring validation_data, input_shape = _reshape(validation_data) # see docstring model = Sequential() # sequential model model.add( convolutional.Conv2D( # first convolitional layer filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(convolutional.Conv2D( 64, (3, 3), activation='relu')) # 2nd convo layer, using relu for activation model.add(pooling.MaxPooling2D(pool_size=(2, 2))) #1st pooling model.add(Dropout(0.25)) # prevent overfit w/dropout 1 model.add(Flatten()) # flatten for dnn model.add(Dense(128, activation='relu')) # 1st dnn layer model.add(Dropout(0.5)) # prevent overfit w/dropout 2 model.add(Dense(3, activation='softmax')) # using softmax for activation model.compile( # compile using Adadelta for optimizer loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.fit(train_data, train_target, batch_size=128, epochs=12, verbose=1) # fit using training data loss, accuracy = model.evaluate( validation_data, validation_target, verbose=0) # evaluate using validation data print "accuracy: {}".format(accuracy) class_output = model.predict_classes(test_data) # predict on test_data return class_output
def mnist_model(input_shape): """Creates a MNIST model.""" model = sequential_model_lib.Sequential() # Adding custom pass-through layer to visualize input images. model.add(LayerForImageSummary()) model.add( conv_layer_lib.Conv2D( 32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(conv_layer_lib.Conv2D(64, (3, 3), activation='relu')) model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2))) model.add(layer_lib.Dropout(0.25)) model.add(layer_lib.Flatten()) model.add(layer_lib.Dense(128, activation='relu')) model.add(layer_lib.Dropout(0.5)) model.add(layer_lib.Dense(NUM_CLASSES, activation='softmax')) # Adding custom pass-through layer for summary recording. model.add(LayerForHistogramSummary()) return model
def test_mixing_preprocessing_and_regular_layers(self): stage = preprocessing_stage.PreprocessingStage([ image_preprocessing.CenterCrop(16, 16), normalization.Normalization(), convolutional.Conv2D(4, 3) ]) data = np.ones((16, 20, 20, 3), dtype='float32') stage.adapt(data) _ = stage(data) stage.compile('rmsprop', 'mse') stage.fit(data, np.ones((16, 14, 14, 4))) _ = stage.evaluate(data, np.ones((16, 14, 14, 4))) _ = stage.predict(data)
def add_resnet_unit(path, K, **params): """Add a resnet unit to path starting at layer 'K', adding as many (ReLU + Conv2D) modules as specified by n_skip_K Returns new path and next layer index, i.e. K + n_skip_K, in a tuple """ # loosely based on https://github.com/keunwoochoi/residual_block_keras # see also # keras docs here: # http://keras.io/getting-started/functional-api-guide/#all-models-are-callable-just-like-layers block_input = path # use n_skip_K if it is there, default to 1 skip_key = "n_skip_{:d}".format(K) n_skip = params.get(skip_key, 1) for i in range(n_skip): layer = K + i # add BatchNorm path = BatchNormalization()(path) # add ReLU path = Activation('relu')(path) # use filter_width_K if it is there, otherwise use 3 filter_key = "filter_width_{:d}".format(layer) filter_width = params.get(filter_key, 3) # add Conv2D path = convolutional.Conv2D( filters=params["filters_per_layer"], kernel_size=(filter_width, filter_width), kernel_initializer='uniform', activation='linear', padding='same', kernel_constraint=None, activity_regularizer=None, trainable=True, strides=(1, 1), use_bias=True, bias_regularizer=None, bias_constraint=None, data_format="channels_first", kernel_regularizer=None)(path) # Merge 'input layer' with the path # path = merge([block_input, path], mode='sum') path = add([block_input, path]) return path, K + n_skip
def create_network(**kwargs): """construct a fast rollout neural network. Keword Arguments: - input_dim: depth of features to be processed by first layer (no default) - board: width of the go board to be processed (default 19) """ defaults = {"board": 19} # copy defaults, but override with anything in kwargs params = defaults params.update(kwargs) # create the network: network = Sequential() # create one convolutional layer network.add( convolutional.Conv2D(input_shape=(params["input_dim"], params["board"], params["board"]), filters=1, kernel_size=(1, 1), kernel_initializer='uniform', activation='relu', padding='same', kernel_constraint=None, activity_regularizer=None, trainable=True, strides=[1, 1], use_bias=True, bias_regularizer=None, bias_constraint=None, data_format="channels_first", kernel_regularizer=None)) # reshape output to be board x board network.add(Flatten()) # add a bias to each board location network.add(Bias()) # softmax makes it into a probability distribution network.add(Activation('softmax')) return network