def cnn(keep_prob=0.5, input_shape=(200, 400, 3)): reg = tf.contrib.layers.l2_regularizer(1e-3) model = Sequential() model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=input_shape)) model.add( Conv2D(filters=15, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) model.add( Conv2D(filters=25, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) model.add( Conv2D(filters=35, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) model.add(Flatten()) model.add(Dense(50)) if keep_prob < 1: model.add(Dropout(keep_prob)) model.add(Dense(10)) if keep_prob < 1: model.add(Dropout(keep_prob)) model.add(Dense(6, activation='softmax', name=LOGITS)) return model
def __init__(self): reg = tf.contrib.layers.l2_regularizer(1e-3) model = Sequential() model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(114, 52, 3))) model.add( Conv2D(filters=24, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) model.add( Conv2D(filters=36, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) model.add( Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) model.add(Flatten()) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(6, activation='softmax', name=LOGITS)) model.load_weights('./light_classification/weights.h5') self.model = model
def build_read_tensor_2d_model(args): '''Build Read Tensor 2d CNN model for classifying variants. 2d Convolutions followed by dense connection. Dynamically sets input channels based on args via defines.total_input_channels_from_args(args) Uses the functional API. Supports theano or tensorflow channel ordering. Prints out model summary. Arguments args.window_size: Length in base-pairs of sequence centered at the variant to use as input. args.labels: The output labels (e.g. SNP, NOT_SNP, INDEL, NOT_INDEL) args.channels_last: Theano->False or Tensorflow->True channel ordering flag Returns The keras model ''' if args.channels_last: in_shape = (args.read_limit, args.window_size, args.channels_in) else: in_shape = (args.channels_in, args.read_limit, args.window_size) read_tensor = Input(shape=in_shape, name="read_tensor") read_conv_width = 16 x = Conv2D(128, (read_conv_width, 1), padding='valid', activation="relu", kernel_initializer="he_normal")(read_tensor) x = Conv2D(64, (1, read_conv_width), padding='valid', activation="relu", kernel_initializer="he_normal")(x) x = MaxPooling2D((3, 1))(x) x = Conv2D(64, (1, read_conv_width), padding='valid', activation="relu", kernel_initializer="he_normal")(x) x = MaxPooling2D((3, 3))(x) x = Flatten()(x) x = Dense(units=32, kernel_initializer='normal', activation='relu')(x) prob_output = Dense(units=len(args.labels), kernel_initializer='normal', activation='softmax')(x) model = Model(inputs=[read_tensor], outputs=[prob_output]) adamo = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.) my_metrics = [metrics.categorical_accuracy] model.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=my_metrics) model.summary() if os.path.exists(args.weights_hd5): model.load_weights(args.weights_hd5, by_name=True) print('Loaded model weights from:', args.weights_hd5) return model
def shared_dq_model(): img_input = Input(shape=IMG_DIM, name="discriminator_input") h = Conv2D(64, (5, 5), padding='same', input_shape=(28, 28, 1))(img_input) h = Activation('tanh')(h) h = MaxPooling2D(pool_size=(2, 2))(h) h = Conv2D(128, (5, 5))(h) h = Activation('tanh')(h) h = MaxPooling2D(pool_size=(2, 2))(h) h = Flatten()(h) h = Dense(1024)(h) h = Activation('tanh')(h) return Model(inputs=img_input, outputs=[h], name="shared_dq")
def build_network(row, col, channels, num_class): model = Sequential() model.add( Conv2D(32, (8, 8), (4, 4), activation='relu', input_shape=(row, col, channels))) model.add(Conv2D(64, (4, 4), (2, 2), activation='relu')) model.add(Conv2D(64, (3, 3), (1, 1), activation='relu')) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(num_class)) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) return model
def add_context(model: Sequential) -> Sequential: """ Append the context layers to the frontend. """ model.add(ZeroPadding2D(padding=(33, 33))) model.add(Conv2D(42, 3, activation='relu', name='ct_conv1_1')) model.add(Conv2D(42, 3, activation='relu', name='ct_conv1_2')) model.add(Conv2D(84, 3, 3, dilation_rate=(2, 2), activation='relu', name='ct_conv2_1')) model.add(Conv2D(168, 3, dilation_rate=(4, 4), activation='relu', name='ct_conv3_1')) model.add(Conv2D(336, 3, dilation_rate=(8, 8), activation='relu', name='ct_conv4_1')) model.add(Conv2D(672, 3, dilation_rate=(16, 16), activation='relu', name='ct_conv5_1')) model.add(Conv2D(672, 3, activation='relu', name='ct_fc1')) model.add(Conv2D(21, 1, name='ct_final')) return model
def layers(keep_prob: Any, input_shape=(114, 52, 3), add_softmax=False): reg = tf.contrib.layers.l2_regularizer(1e-3) model = Sequential() model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=input_shape)) model.add(Conv2D(filters=24, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) # model.add(MaxPooling2D((2, 2), 2)) model.add(Conv2D(filters=36, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) # model.add(MaxPooling2D((2, 2), 2)) model.add(Conv2D(filters=64, kernel_size=5, strides=1, padding='same', activation='relu', kernel_regularizer=reg)) # model.add(MaxPooling2D((2, 2), 2)) model.add(Flatten()) model.add(Dense(50)) if keep_prob < 1: model.add(Dropout(keep_prob)) model.add(Dense(10)) if keep_prob < 1: model.add(Dropout(keep_prob)) if add_softmax: model.add(Dense(6, activation='softmax', name=LOGITS)) else: model.add(Dense(6, name=LOGITS)) return model
def generator_model(): noise_input = Input(batch_shape=(BATCH_SIZE, Z_DIM), name='z_input') disc_input = Input(batch_shape=(BATCH_SIZE, DISC_DIM), name='disc_input') cont_input = Input(batch_shape=(BATCH_SIZE, CONT_DIM), name='cont_input') input_list = [noise_input, disc_input, cont_input] gen_input = concatenate(input_list, axis=1, name="generator_input") h = Dense(1024)(gen_input) h = Activation('tanh')(h) h = Dense(128 * 7 * 7)(h) h = BatchNormalization()(h) h = Activation('tanh')(h) h = Reshape((7, 7, 128), input_shape=(128 * 7 * 7, ))(h) h = UpSampling2D(size=(2, 2))(h) h = Conv2D(64, (5, 5), padding='same')(h) h = Activation('tanh')(h) h = UpSampling2D(size=(2, 2))(h) h = Conv2D(1, (5, 5), padding='same')(h) h = Activation('tanh')(h) return Model(inputs=input_list, outputs=[h], name="generator")
def run(model): # Download kitti dataset helper.maybe_download_training_img(DATA_DIRECTORY) x, y = helper.get_data(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE) if model is None: inputs = Input(shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3)) # Block 1 block1_conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs) block1_conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(block1_conv1) block1_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(block1_conv2) # Block 2 block2_conv1 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(block1_pool) block2_conv2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(block2_conv1) block2_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(block2_conv2) # Block 3 block3_conv1 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(block2_pool) block3_conv2 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(block3_conv1) block3_conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(block3_conv2) block3_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(block3_conv3) # Block 4 block4_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(block3_pool) block4_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(block4_conv1) block4_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(block4_conv2) block4_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(block4_conv3) # Block 5 block5_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(block4_pool) block5_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(block5_conv1) block5_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(block5_conv2) block5_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(block5_conv3) pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block5_pool) upsample_1 = Conv2DTranspose(2, kernel_size=(4, 4), strides=(2, 2), padding="same")(pool5_conv1x1) pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block4_pool) add_1 = Add()([upsample_1, pool4_conv1x1]) upsample_2 = Conv2DTranspose(2, kernel_size=(4, 4), strides=(2, 2), padding="same")(add_1) pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block3_pool) add_2 = Add()([upsample_2, pool3_conv1x1]) upsample_3 = Conv2DTranspose(2, kernel_size=(16, 16), strides=(8, 8), padding="same")(add_2) output = Dense(2, activation='softmax')(upsample_3) model = Model(inputs, output, name='multinet_seg') adam = Adam(lr=LEARNING_RATE) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model.fit(x, y, batch_size=BATCH_SIZE, epochs=EPOCHS) model.save('trained_model' + str(time.time()) + '.h5')
def get_frontend(input_width, input_height) -> Sequential: model = Sequential() # model.add(ZeroPadding2D((1, 1), input_shape=(input_width, input_height, 3))) model.add(Conv2D(64, 3, activation='relu', name='conv1_1', input_shape=(input_width, input_height, 3))) model.add(Conv2D(64, 3, activation='relu', name='conv1_2')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Conv2D(128, 3, activation='relu', name='conv2_1')) model.add(Conv2D(128, 3, activation='relu', name='conv2_2')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Conv2D(256, 3, activation='relu', name='conv3_1')) model.add(Conv2D(256, 3, activation='relu', name='conv3_2')) model.add(Conv2D(256, 3, activation='relu', name='conv3_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Conv2D(512, 3, activation='relu', name='conv4_1')) model.add(Conv2D(512, 3, activation='relu', name='conv4_2')) model.add(Conv2D(512, 3, activation='relu', name='conv4_3')) # Compared to the original VGG16, we skip the next 2 MaxPool layers, # and go ahead with dilated convolutional layers instead model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_1')) model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_2')) model.add(Conv2D(512, 3, dilation_rate=(2, 2), activation='relu', name='conv5_3')) # Compared to the VGG16, we replace the FC layer with a convolution model.add(Conv2D(4096, 7, dilation_rate=(4, 4), activation='relu', name='fc6')) model.add(Dropout(0.5)) model.add(Conv2D(4096, 1, activation='relu', name='fc7')) model.add(Dropout(0.5)) # Note: this layer has linear activations, not ReLU model.add(Conv2D(21, 1, activation='linear', name='fc-final')) # model.layers[-1].output_shape == (None, 16, 16, 21) return model
def fcn(num_classes=2, learning=True) -> Sequential: K.set_learning_phase(learning) reg = tf.contrib.layers.l2_regularizer(1e-3) kernel_size = 3 pad = 'same' act2 = 'relu' model = Sequential() model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(200, 400, 3))) model.add( Conv2D(32, kernel_size, 1, padding=pad, activation=act2, kernel_regularizer=reg)) model.add( Conv2D(32, kernel_size, 1, padding=pad, activation=act2, kernel_regularizer=reg)) model.add(MaxPooling2D((2, 2), 2)) model.add( Conv2D(64, kernel_size, 1, padding=pad, activation=act2, kernel_regularizer=reg)) model.add( Conv2D(64, kernel_size, 1, padding=pad, activation=act2, kernel_regularizer=reg)) model.add(MaxPooling2D((2, 2), 2)) model.add( Conv2D(128, kernel_size, 1, padding=pad, activation=act2, kernel_regularizer=reg)) model.add( Conv2D(128, kernel_size, 1, padding=pad, activation=act2, kernel_regularizer=reg)) model.add(MaxPooling2D((2, 2), 2)) model.add(Conv2D(num_classes, 1, 1, padding=pad, kernel_regularizer=reg)) model.add( Conv2DTranspose(num_classes, kernel_size, strides=2, activation=act2, padding=pad)) model.add(BatchNormalization()) model.add( Conv2DTranspose(num_classes, kernel_size, strides=2, activation=act2, padding=pad)) model.add(BatchNormalization()) model.add( Conv2DTranspose(num_classes, kernel_size, strides=2, activation=act2, padding=pad, name=LOGITS)) model.add(Reshape((-1, num_classes))) return model
lb = LabelBinarizer().fit(Y_train) Y_train = lb.transform(Y_train) Y_test = lb.transform(Y_test) # Save the mapping from labels to one-hot encodings. # We'll need this later when we use the model to decode what it's predictions mean with open(MODEL_LABELS_FILENAME, "wb") as f: pickle.dump(lb, f) # Build the neural network! model = Sequential() # First convolutional layer with max pooling model.add( Conv2D(20, (5, 5), padding="same", input_shape=(20, 20, 1), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Second convolutional layer with max pooling model.add(Conv2D(50, (5, 5), padding="same", activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Hidden layer with 500 nodes model.add(Flatten()) model.add(Dense(500, activation="relu")) # Output layer with 32 nodes (one for each possible letter/number we predict) model.add(Dense(32, activation="softmax")) # Ask Keras to build the TensorFlow model behind the scenes