def resnet_v1(input_shape, depth, num_classes=2): """ResNet Version 1 Model builder [a] Stacks of 2 x (3 x 3) Conv2D-BN-ReLU Last ReLU is after the shortcut connection. At the beginning of each stage, the feature map size is halved (downsampled) by a convolutional layer with strides=2, while the number of filters is doubled. Within each stage, the layers have the same number filters and the same number of filters. Features maps sizes: stage 0: 32x32, 16 stage 1: 16x16, 32 stage 2: 8x8, 64 The Number of parameters is approx the same as Table 6 of [a]: ResNet20 0.27M ResNet32 0.46M ResNet44 0.66M ResNet56 0.85M ResNet110 1.7M # Arguments input_shape (tensor): shape of input image tensor depth (int): number of core convolutional layers num_classes (int): number of classes (CIFAR10 has 10) # Returns model (Model): Keras model instance """ if (depth-2)%6 != 0: raise ValueError('depth should be 6n+2') # Start model definition. num_filters = 32 num_res_blocks = int((depth-2)/6) inputs = tf.keras.Input(shape=input_shape) x = resnet_layer(inputs, num_filters) # Instantiate teh stack of residual units for stack in range(3): for res_block in range(num_res_blocks): strides = 1 if stack > 0 and res_block == 0: # first layer but not first stack strides = 2 # downsample y = resnet_layer(x, num_filters, strides=strides) y = resnet_layer(y, num_filters, activation=None) if stack > 0 and res_block == 0: # first layer but not first stack # linear projection residual shortcut connection to match # change dims x = resnet_layer(x, num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False) x = layers.add([x, y]) x = layers.Activation('relu')(x) num_filters *= 2 # Add classifier on top. # v1 does not use BN after last shortcut connection-ReLU ax = layers.GlobalAveragePooling2D()(x) #x = layers.AveragePooling2D()(x) ax = layers.Dense(num_filters//8, activation='relu')(ax) ax = layers.Dense(num_filters//2, activation='softmax')(ax) ax = layers.Reshape((1,1,num_filters//2))(ax) ax = layers.Multiply()([ax, x]) y = layers.Flatten()(ax) outputs = layers.Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y) # Instantiate model model = models.Model(inputs=inputs, outputs=outputs) return model
def hard_swish(x): return layers.Multiply()([hard_sigmoid(x), x])
# Here every feature becomes a line. it's not column wise its line wise embed_2d=Keras.Concatenate(axis=1)([emb1,emb2,emb3,emb4,emb5,emb6,emb7]) # Function to add all features line wise. Output Shape=>(None,1,Embd_Size) tensor_sum = Keras.Lambda(lambda x: K.sum(x, axis = 1), name = 'sum_of_tensors') # Function that squares every number. OUtput Shape=>(None,p,Embddd_Size) p is number of features tensor_square = Keras.Lambda(lambda x: K.square(x), name = 'square_of_tensors') # Sum of embeddings. Shape = (None, 1 , Embed_Size) sum_of_embed = tensor_sum(embed_2d) # Squares of embeddings Shape = (None,p, Embed_Size) square_of_embed = tensor_square(embed_2d) # Square of embedding sum Shape=(None,1,Embed_Size) square_of_sum = Keras.Multiply()([sum_of_embed, sum_of_embed]) # Sum of embeddings which squares were taken. Shape = (None,1,Embd_Size) sum_of_square = tensor_sum(square_of_embed) # Substraction of sum of sqares and squares of sums sub = Keras.Subtract()([square_of_sum, sum_of_square]) # Second order is the half of sub y_fm_2d = Keras.Lambda(lambda x: x*0.5)(sub) y=Keras.Concatenate()([y_fm_1d,y_fm_2d]) # Output layer Output=Keras.Dense(1, activation='linear',kernel_regularizer=regularizers.l2(Scale))(y) model=tf.keras.Model(inputs=[Conc],outputs=Output) print(model.summary())
def call(self, inputs): y = self.fc(inputs) output = layers.Multiply()([inputs, y]) return output