def build_resnet(input_shape, block_fn, repetitions, plot_model_f): """Builds a custom ResNet like architecture. Args: input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols) num_outputs: The number of outputs at final softmax layer block_fn: The block function to use. This is either `basic_block` or `bottleneck`. The original paper used basic_block for layers < 50 repetitions: Number of repetitions of various block units. At each block unit, the number of filters are doubled and the input size is halved Returns: The resnet part of the model. """ kr._handle_dim_ordering() if len(input_shape) != 3: raise Exception( "Input shape should be a tuple (nb_channels, nb_rows, nb_cols)") # Permute dimension order if necessary if K.image_dim_ordering() == 'tf': input_shape = (input_shape[1], input_shape[2], input_shape[0]) # Load function from str if needed. block_fn = kr._get_block(block_fn) input = Input(shape=input_shape) conv1 = kr._conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input) pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1) block = pool1 filters = 64 for i, r in enumerate(repetitions): block = kr._residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block) filters *= 2 # Last activation block = kr._bn_relu(block) # Classifier block block_shape = K.int_shape(block) pool2 = AveragePooling2D(pool_size=(block_shape[kr.ROW_AXIS], block_shape[kr.COL_AXIS]), strides=(1, 1))(block) model = Model(inputs=input, outputs=pool2) # plot_model(model, to_file='../model_resnet_inner.png', show_shapes=True) return model
block3conv2 = BatchNormalization(axis=CHANNEL_AXIS)(block3conv2) block3conv2 = layers.LeakyReLU()(block3conv2) block3b = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=False)(block3conv2) block4 = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=True)(block3b) block4concat = keras.layers.Concatenate()([block4, conv6]) block4se = squeeze_excite_block(block4concat) block4conv1 = Conv2D(512, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block4se) block4conv1 = BatchNormalization(axis=CHANNEL_AXIS)(block4conv1) block4conv1 = layers.LeakyReLU()(block4conv1) block4conv2 = Conv2D(512, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block4conv1) block4conv2 = BatchNormalization(axis=CHANNEL_AXIS)(block4conv2) block4conv2 = layers.LeakyReLU()(block4conv2) block4b = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=False)(block4conv2) blockact = _bn_relu(block4b) block_shape = K.int_shape(blockact) poolr = AveragePooling2D(pool_size=(block_shape[1], block_shape[2]), strides=(1, 1))(blockact) flatten = Flatten()(poolr) dense = Dense(units=20, kernel_initializer="he_normal", activation="linear")(flatten) model = Model(inputs=input, outputs=dense) model.load_weights('resnet_val_loss_twoconvsafterse_ldmk_checkpoint.h5') model.summary() model.compile(loss=wingLoss, optimizer='Adam', metrics=['mean_absolute_error'])