def channel_attention(input): avg_pool = GlobalAveragePooling2D()(input) max_pool = GlobalMaxPool2D()(input) pool = Add()([avg_pool, max_pool]) net = Dense(pool.shape[-1], activation="relu")(pool) net = Dense(pool.shape[-1], activation="sigmoid")(net) net = Reshape((1, 1, pool.shape[-1]))(net) # output = Multiply()[input, net] output = Lambda(broadcast_multiply, arguments={'input2': net})(input) return output
def ResNet50(include_top=True, non_top_pooling=None, model_input=None, num_classes=1000, weights='imagenet', model_path=""): layers = [3, 4, 6, 3] channel_depths = [256, 512, 1024, 2048] input_object = model_input output = Conv2D(64, kernel_size=7, strides=2, padding="same")(input_object) output = BatchNormalization()(output) output = Activation("relu")(output) output = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(output) output = resnet_first_block_first_module(output, channel_depths[0]) for i in range(4): channel_depth = channel_depths[i] num_layers = layers[i] strided_pool_first = True if (i == 0): strided_pool_first = False num_layers = num_layers - 1 output = resnet_block(output, channel_depth=channel_depth, num_layers=num_layers, strided_pool_first=strided_pool_first) if (include_top): output = GlobalAvgPool2D(name="global_avg_pooling")(output) output = Dense(num_classes)(output) output = Activation("softmax")(output) else: if (non_top_pooling == "Average"): output = GlobalAvgPool2D()(output) elif (non_top_pooling == "Maximum"): output = GlobalMaxPool2D()(output) elif (non_top_pooling == None): pass model = Model(inputs=input_object, outputs=output) if (weights == "imagenet"): weights_path = model_path model.load_weights(weights_path) elif (weights == "trained"): weights_path = model_path model.load_weights(weights_path) return model
def build_simpnet(input_shape, num_classes): inputs = Input(shape=input_shape) # group #1 from functools import partial layer_fn = partial(simpnet_layer, type='binary') x = layer_fn(inputs=inputs, num_filters=66) x = layer_fn(inputs=x, num_filters=128) x = layer_fn(inputs=x, num_filters=128) x = layer_fn(inputs=x, num_filters=128) x = layer_fn(inputs=x, num_filters=192, should_dropout=True, should_pool=True) # group #2 x = layer_fn(inputs=x, num_filters=192) x = layer_fn(inputs=x, num_filters=192) x = layer_fn(inputs=x, num_filters=192) x = layer_fn(inputs=x, num_filters=192) x = layer_fn(inputs=x, num_filters=288, should_dropout=True, should_pool=True, dropout_ratio=0.3) x = layer_fn(inputs=x, num_filters=288) x = layer_fn(inputs=x, num_filters=355) x = layer_fn(inputs=x, num_filters=432) x = GlobalMaxPool2D()(x) x = Dropout(rate=0.3)(x) y = Flatten()(x) outputs = Dense( num_classes, activation='softmax', kernel_initializer='glorot_normal', kernel_regularizer=l2(WEIGHT_DECAY), )(y) model = tf.keras.models.Model(inputs=inputs, outputs=outputs) return model
def model_init(self, input_shape=(257, 98, 2)): x_in = Input(shape=input_shape) x = BatchNormalization()(x_in) for i in range(4): x = Conv2D(16 * (2**i), (3, 3))(x) x = Activation('elu')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(128, (1, 1))(x) x_branch_1 = GlobalAveragePooling2D()(x) x_branch_2 = GlobalMaxPool2D()(x) x = concatenate([x_branch_1, x_branch_2]) x = Dense(256, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(len(config.POSSIBLE_LABELS), activation='softmax')(x) model = Model(inputs=x_in, outputs=x) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) self.model = model
y_batch = to_categorical(y_batch, num_classes=len(POSSIBLE_LABELS)) yield x_batch, y_batch # In[ ]: x_in = Input(shape=(257, 98, 2)) x = BatchNormalization()(x_in) for i in range(1): x = Conv2D(16 * (2**i), (3, 3))(x) x = Activation('elu')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(128, (1, 1))(x) x_branch_1 = GlobalAveragePooling2D()(x) x_branch_2 = GlobalMaxPool2D()(x) x = concatenate([x_branch_1, x_branch_2]) x = Dense(256, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(len(POSSIBLE_LABELS), activation='sigmoid')(x) model = Model(inputs=x_in, outputs=x) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # In[ ]: from keras_tqdm import TQDMNotebookCallback from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
def efficient_model(filters=48, input_shape=(32, 32, 3), regularizers=(0, 1e-5), num_classes=43, prefix="", loss="categorical_crossentropy"): """ This function is used to create the network. Args: filters (int): maximum number of filters used in the convolution layers of this network input_shape (tuple or list): the input shape of the network regularizers(tuple or list): L1 and L2 alpha values respectively num_classes (int): the number of classes prefix (str): to avoid the problem of having a unique layer name loss (str or loss function): the loss function to use Returns: model (tf.keras.Model): the network already compiled and ready to use for training """ def conv_batch_prelu(name, tensor, num_filters, kernel_size=(3, 3), strides=(1, 1), padding="same"): """ This function combines conv2d layer, batch normalization layer and prelu activation. Args: name (str): layer's name ('conv_', 'batchnorm' and 'prelu' are added to the name) tensor (tf.Tensor): the input tensor num_filters (int): number of filters used in the convolution layer kernel_size (tuple or list): size of each kernel in the convolution strides (tuple or list): strides used in the convolution padding (str): one of 'same' or 'valid' Return: tensor (tf.Tensor): the output tensor """ tensor = Conv2D(filters=num_filters, kernel_size=kernel_size, strides=strides, kernel_initializer="he_uniform", bias_initializer="zeros", kernel_regularizer=L1L2(regularizers[0], regularizers[1]), padding=padding, name=f"{prefix}_conv_{name}")(tensor) tensor = BatchNormalization(momentum=0.1, name=f"{prefix}_batchnorm_{name}")(tensor) tensor = PReLU(shared_axes=[1, 2], name=f"{prefix}_prelu_{name}")(tensor) return tensor def dense_batch_prelu(name, tensor, n_units): """ This function combines dense layer, batch normalization layer and prelu activation. Args: name (str): layer's name ('dense_', 'batchnorm' and 'prelu' are added to the name) tensor (tf.Tensor): the input tensor n_units (int): number of units in the dense layer Return: tensor (tf.Tensor): the output tensor """ tensor = Dense(n_units, name=f"{prefix}_dense_{name}", kernel_initializer="he_uniform", bias_initializer="zeros")(tensor) tensor = BatchNormalization(momentum=0.1, name=f"{prefix}_batchnorm_{name}")(tensor) tensor = PReLU(name=f"{prefix}_prelu_{name}")(tensor) return tensor # input layer inp = Input(shape=input_shape, name=f"{prefix}_input") # 1st convolution block cbp1 = conv_batch_prelu("cbp1", inp, num_filters=filters, kernel_size=(9, 9), strides=(4, 4)) # 2nd convolution block cbp2 = conv_batch_prelu("cbp2", inp, num_filters=filters // 2, kernel_size=(5, 5), strides=(2, 2)) # 3rd convolution block cbp3 = conv_batch_prelu("cbp3", inp, num_filters=filters // 2, kernel_size=(5, 5), padding='Same') cbp4 = conv_batch_prelu("cbp4", cbp3, num_filters=filters // 2, kernel_size=(5, 5), padding='Same') max_pool1 = MaxPool2D(pool_size=(2, 2), name=f"{prefix}_max_pool1")(cbp4) # 1st concatenation concatenate1 = Concatenate(name=f"{prefix}_concatenate1")( [cbp2, max_pool1]) # 4th convolution block cbp5 = conv_batch_prelu("cbp5", concatenate1, num_filters=filters, kernel_size=(5, 5), strides=(2, 2)) # 5th convolution block cbp6 = conv_batch_prelu("cbp6", concatenate1, num_filters=filters, kernel_size=(5, 5), padding='Same') cbp7 = conv_batch_prelu("cbp7", cbp6, num_filters=filters, kernel_size=(5, 5), padding='Same') max_pool2 = MaxPool2D(pool_size=(2, 2), name=f"{prefix}_max_pool2")(cbp7) # 2nd concatenation concatenate2 = Concatenate(name=f"{prefix}_concatenate2")( [cbp5, max_pool2, cbp1]) # 1st fully connected avg_pool = GlobalAveragePooling2D(name=f"{prefix}_avg_pool")(concatenate2) fc1 = dense_batch_prelu("fc1", avg_pool, 1024) # 2nd fully connected global_pool = GlobalMaxPool2D(name=f"{prefix}_global_pool")(concatenate2) fc2 = dense_batch_prelu("fc2", global_pool, 1024) # combine add = Add(name=f"{prefix}_add")([fc1, fc2]) drop1 = Dropout(0.5, name=f"{prefix}_drop1")(add) # 3rd fully connected fc3 = dense_batch_prelu("fc3", drop1, 512) drop2 = Dropout(0.5, name=f"{prefix}_drop2")(fc3) # output out = Dense(num_classes, activation="softmax", name=f"{prefix}_output")(drop2) # compile model model = tf.keras.Model(inp, out) optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08) model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy", f1_score]) return model
def SqueezeNet(include_top=True, weights="imagenet", model_input=None, non_top_pooling=None, num_classes=1000, model_path=""): if (weights == "imagenet" and num_classes != 1000): raise ValueError( "You must parse in SqueezeNet model trained on the 1000 class ImageNet" ) image_input = model_input network = Conv2D(64, (3, 3), strides=(2, 2), padding="valid")(image_input) network = Activation("relu")(network) network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network) network = squeezenet_fire_module(input=network, input_channel_small=16, input_channel_large=64) network = squeezenet_fire_module(input=network, input_channel_small=16, input_channel_large=64) network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network) network = squeezenet_fire_module(input=network, input_channel_small=32, input_channel_large=128) network = squeezenet_fire_module(input=network, input_channel_small=32, input_channel_large=128) network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network) network = squeezenet_fire_module(input=network, input_channel_small=48, input_channel_large=192) network = squeezenet_fire_module(input=network, input_channel_small=48, input_channel_large=192) network = squeezenet_fire_module(input=network, input_channel_small=64, input_channel_large=256) network = squeezenet_fire_module(input=network, input_channel_small=64, input_channel_large=256) if (include_top): network = Dropout(0.5)(network) network = Conv2D(num_classes, kernel_size=(1, 1), padding="valid", name="last_conv")(network) network = Activation("relu")(network) network = GlobalAvgPool2D()(network) network = Activation("softmax")(network) else: if (non_top_pooling == "Average"): network = GlobalAvgPool2D()(network) elif (non_top_pooling == "Maximum"): network = GlobalMaxPool2D()(network) elif (non_top_pooling == None): pass input_image = image_input model = Model(inputs=input_image, outputs=network) if (weights == "imagenet"): weights_path = model_path model.load_weights(weights_path) elif (weights == "trained"): weights_path = model_path model.load_weights(weights_path) return model
def SqueezeNet(include_top=True, weights="imagenet", model_input=None, non_top_pooling=None, num_classes=1000, model_path="", initial_num_classes=None, transfer_with_full_training=True): if (weights == "imagenet" and num_classes != 1000): raise ValueError( "You must parse in SqueezeNet model trained on the 1000 class ImageNet" ) image_input = model_input network = Conv2D(64, (3, 3), strides=(2, 2), padding="valid")(image_input) network = Activation("relu")(network) network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network) network = squeezenet_fire_module(input=network, input_channel_small=16, input_channel_large=64) network = squeezenet_fire_module(input=network, input_channel_small=16, input_channel_large=64) network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network) network = squeezenet_fire_module(input=network, input_channel_small=32, input_channel_large=128) network = squeezenet_fire_module(input=network, input_channel_small=32, input_channel_large=128) network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network) network = squeezenet_fire_module(input=network, input_channel_small=48, input_channel_large=192) network = squeezenet_fire_module(input=network, input_channel_small=48, input_channel_large=192) network = squeezenet_fire_module(input=network, input_channel_small=64, input_channel_large=256) network = squeezenet_fire_module(input=network, input_channel_small=64, input_channel_large=256) if (include_top): network = Dropout(0.5)(network) if (initial_num_classes != None): network = Conv2D(initial_num_classes, kernel_size=(1, 1), padding="valid", name="last_conv")(network) else: network = Conv2D(num_classes, kernel_size=(1, 1), padding="valid", name="last_conv")(network) network = Activation("relu")(network) network = GlobalAvgPool2D()(network) network = Activation("softmax")(network) else: if (non_top_pooling == "Average"): network = GlobalAvgPool2D()(network) elif (non_top_pooling == "Maximum"): network = GlobalMaxPool2D()(network) elif (non_top_pooling == None): pass input_image = image_input model = Model(inputs=input_image, outputs=network) if (weights == "imagenet"): weights_path = model_path model.load_weights(weights_path) return model elif (weights == "trained"): weights_path = model_path model.load_weights(weights_path) return model elif (weights == "continued"): weights_path = model_path model.load_weights(weights_path) return model elif (weights == "transfer"): weights_path = model_path model.load_weights(weights_path) if (transfer_with_full_training == False): for eachlayer in model.layers: eachlayer.trainable = False print("Training with top layers of the Model") else: print("Training with all layers of the Model") network2 = model.layers[-5].output network2 = Conv2D(num_classes, kernel_size=(1, 1), padding="valid", name="last_conv")(network2) network2 = Activation("relu")(network2) network2 = GlobalAvgPool2D()(network2) network2 = Activation("softmax")(network2) new_model = Model(inputs=model.input, outputs=network2) return new_model elif (weights == "custom"): return model
def ResNet50(include_top=True, non_top_pooling=None, model_input=None, num_classes=1000, weights='imagenet', model_path="", initial_num_classes=None, transfer_with_full_training = True): layers = [3,4,6,3] channel_depths = [256, 512, 1024, 2048] input_object = model_input output = Conv2D(64, kernel_size=7, strides=2, padding="same")(input_object) output = BatchNormalization()(output) output = Activation("relu")(output) output = MaxPool2D(pool_size=(3,3), strides=(2,2))(output) output = resnet_first_block_first_module(output, channel_depths[0]) for i in range(4): channel_depth = channel_depths[i] num_layers = layers[i] strided_pool_first = True if(i == 0): strided_pool_first = False num_layers = num_layers - 1 output = resnet_block(output, channel_depth=channel_depth, num_layers=num_layers, strided_pool_first=strided_pool_first) if(include_top): output = GlobalAvgPool2D(name="global_avg_pooling")(output) if(initial_num_classes != None): output = Dense(initial_num_classes)(output) else: output = Dense(num_classes)(output) output = Activation("softmax")(output) else: if (non_top_pooling == "Average"): output = GlobalAvgPool2D()(output) elif (non_top_pooling == "Maximum"): output = GlobalMaxPool2D()(output) elif (non_top_pooling == None): pass model = Model(inputs=input_object, outputs=output) if(weights == "imagenet"): weights_path = model_path model.load_weights(weights_path) return model elif(weights == "trained"): weights_path = model_path model.load_weights(weights_path) return model elif(weights == "continued"): weights_path = model_path model.load_weights(weights_path) return model elif(weights == "transfer"): weights_path = model_path model.load_weights(weights_path) if(transfer_with_full_training == False): for eachlayer in model.layers: eachlayer.trainable = False print("Training with top layers of the Model") else: print("Training with all layers of the Model") output2 = model.layers[-3].output output2 = Dense(num_classes)(output2) output2 = Activation("softmax")(output2) new_model = Model(inputs=model.input, outputs = output2) return new_model elif(weights == "custom"): return model