def __init__(self, out_channel, kernel_size=3, stride=1, **kwargs): super(ConvBNReLU, self).__init__(**kwargs) self.conv = layers.Conv2D(filters=out_channel, kernel_size=kernel_size, strides=stride, padding='SAME', use_bias=False, name='Conv2d') self.bn = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name='BatchNorm') self.activation = layers.ReLU(max_value=6.0)
def make_generator_model(label_dim, g_conv_dim): # Concatenate inputs input_img = layers.Input(shape=(128, 128, 3), name='gen_input_img') # (None, 128, 128, 3) input_lbl = layers.Input(shape=(label_dim, ), name='gen_input_lbl') # (None, label_dim) lbl_reshape = layers.Reshape( (1, 1, label_dim))(input_lbl) # (None, 1, 1, label_dim) lbl_stack = layers.Lambda(lambda x: tf.tile(x, (1, 128, 128, 1)))( lbl_reshape) # (None, 128, 128, label_dim) input_concat = layers.Concatenate(name='gen_input_concat')( [input_img, lbl_stack]) # (None, 128, 128, 3+label_dim) # Downsampling part x = layers.Conv2D(g_conv_dim, (7, 7), strides=(1, 1), padding='same', use_bias=False, name='gen_conv1')( input_concat) # (None, 128, 128, g_conv_dim) x = layers.LayerNormalization(axis=(1, 2), epsilon=1e-5, name='gen_i_norm1')(x) x = layers.ReLU()(x) x = layers.Conv2D(g_conv_dim * 2, (4, 4), strides=(2, 2), padding='same', use_bias=False, name='gen_conv2')(x) # (None, 64, 64, g_conv_dim*2) x = layers.LayerNormalization(axis=(1, 2), epsilon=1e-5, name='gen_i_norm2')(x) x = layers.ReLU()(x) x = layers.Conv2D(g_conv_dim * 4, (4, 4), strides=(2, 2), padding='same', use_bias=False, name='gen_conv3')(x) # (None, 32, 32, g_conv_dim*4) x = layers.LayerNormalization(axis=(1, 2), epsilon=1e-5, name='gen_i_norm3')(x) x = layers.ReLU()(x) # Bottleneck part x = ResidualBlock(name='gen_res_block1')(x) x = ResidualBlock(name='gen_res_block2')(x) x = ResidualBlock(name='gen_res_block3')(x) x = ResidualBlock(name='gen_res_block4')(x) x = ResidualBlock(name='gen_res_block5')(x) x = ResidualBlock(name='gen_res_block6')(x) # (None, 32, 32, g_conv_dim*4) # Upsampling part x = layers.Conv2DTranspose(g_conv_dim * 2, (4, 4), strides=(2, 2), padding='same', use_bias=False, name='gen_deconv1')( x) # (None, 64, 64, g_conv_dim*2) x = layers.LayerNormalization(axis=(1, 2), epsilon=1e-5, name='gen_i_norm4')(x) x = layers.ReLU()(x) x = layers.Conv2DTranspose(g_conv_dim, (4, 4), strides=(2, 2), padding='same', use_bias=False, name='gen_deconv2')( x) # (None, 128, 128, g_conv_dim) x = layers.LayerNormalization(axis=(1, 2), epsilon=1e-5, name='gen_i_norm5')(x) x = layers.ReLU()(x) x = layers.Conv2D(3, (7, 7), strides=(1, 1), padding='same', use_bias=False, name='gen_last_conv')(x) # (None, 128, 128, 3) output_layer = layers.Activation('tanh')(x) return tf.keras.Model([input_img, input_lbl], output_layer)
def build_last(input_shape,classes): inputs=layers.Input(shape=input_shape) x=layers.Conv2D(64,(3,3),padding='same')(inputs) x=layers.BatchNormalization()(x) x=layers.Conv2D(64,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.Conv2D(64,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x=layers.Conv2D(64,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.Conv2D(64,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.Conv2D(64,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x__=layers.Conv2D(64,(3,3),padding='same')(inputs) x__=layers.BatchNormalization()(x__) x__=layers.ReLU()(x__) x_=tf.add(x,x__) x_=layers.Dropout(0.5)(x_) x=layers.Conv2D(128,(3,3),strides=(2,2))(x_) x=layers.BatchNormalization()(x) x=layers.Conv2D(128,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x=layers.Conv2D(128,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.Conv2D(128,(3,3),padding='same')(x) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x_=layers.Conv2D(128,(3,3),strides=(2,2))(x_) x_=layers.BatchNormalization()(x_) x_=layers.ReLU()(x_) x_=tf.add(x,x_) x_=layers.Dropout(0.5)(x_) x=layers.Conv2D(256,(3,3),strides=(2,2))(x_) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x=layers.ZeroPadding2D()(x) x=layers.Conv2D(256,(3,3))(x) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x_=layers.Conv2D(256,(3,3),strides=(2,2))(x_) x_=layers.BatchNormalization()(x_) x_=layers.ReLU()(x_) x_=tf.add(x,x_) x=layers.Conv2D(256,(3,3),strides=(2,2))(x_) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x=layers.ZeroPadding2D()(x) x=layers.Conv2D(256,(3,3))(x) x=layers.BatchNormalization()(x) x=layers.ReLU()(x) x_=layers.Conv2D(256,(3,3),strides=(2,2))(x_) x_=layers.BatchNormalization()(x_) x_=layers.ReLU()(x_) x_=tf.add(x,x_) x=layers.GlobalAveragePooling2D()(x_) pred=layers.Dense(classes,activation="softmax")(x) model=tf.keras.Model(inputs=inputs,outputs=pred) return model
def unet(input_shape: Tuple[int, int, int], **kwargs) -> tf.keras.Model: input_layer = layers.Input(input_shape) x = input_layer # Encoder x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) skip1 = x x = layers.MaxPool2D(2)(x) x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) skip2 = x x = layers.MaxPool2D(2)(x) x = layers.Conv2D(128, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(128, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) skip3 = x x = layers.MaxPool2D(2)(x) # Bottleneck x = layers.Conv2D(256, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(256, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) # Decoder x = layers.UpSampling2D(2)(x) x = layers.Concatenate(axis=-1)([x, skip3]) x = layers.Conv2D(128, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(128, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.UpSampling2D(2)(x) x = layers.Concatenate(axis=-1)([x, skip2]) x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.UpSampling2D(2)(x) x = layers.Concatenate(axis=-1)([x, skip1]) x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x) x = layers.ReLU()(x) x = layers.Conv2D(1, 1, activation='sigmoid', padding='same', kernel_initializer='he_normal')(x) model = tf.keras.Model(input_layer, x) return model
def __init__(self, channels, data_format="channels_last", **kwargs): super(PreActivation, self).__init__(**kwargs) self.bn = dpn_batch_norm(channels=channels, data_format=data_format, name="bn") self.activ = nn.ReLU()
def __init__(self, num_classes, alpha=1, **kwargs): super(Mnasnet, self).__init__(**kwargs) self.blocks = [] self.conv_bn_initial = Conv_BN( filters=32 * alpha, kernel_size=3, strides=2) # Frist block (non-identity) Conv+ DepthwiseConv self.conv1_block1 = depthwiseConv( depth_multiplier=1, kernel_size=3, strides=1) self.bn1_block1 = layers.BatchNormalization( epsilon=1e-3, momentum=0.999) self.relu1_block1 = layers.ReLU(max_value=6) self.conv_bn_block_1 = Conv_BN( filters=16 * alpha, kernel_size=1, strides=1) # MBConv3 3x3 self.blocks.append( MBConv_idskip( input_filters=16 * alpha, filters=24, kernel_size=3, strides=2, filters_multiplier=3, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=24 * alpha, filters=24, kernel_size=3, strides=1, filters_multiplier=3, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=24 * alpha, filters=24, kernel_size=3, strides=1, filters_multiplier=3, alpha=alpha)) # MBConv3 5x5 self.blocks.append( MBConv_idskip( input_filters=24 * alpha, filters=40, kernel_size=5, strides=2, filters_multiplier=3, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=40 * alpha, filters=40, kernel_size=5, strides=1, filters_multiplier=3, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=40 * alpha, filters=40, kernel_size=5, strides=1, filters_multiplier=3, alpha=alpha)) # MBConv6 5x5 self.blocks.append( MBConv_idskip( input_filters=40 * alpha, filters=80, kernel_size=5, strides=2, filters_multiplier=6, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=80 * alpha, filters=80, kernel_size=5, strides=1, filters_multiplier=6, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=80 * alpha, filters=80, kernel_size=5, strides=1, filters_multiplier=6, alpha=alpha)) # MBConv6 3x3 self.blocks.append( MBConv_idskip( input_filters=80 * alpha, filters=96, kernel_size=3, strides=1, filters_multiplier=6, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=96 * alpha, filters=96, kernel_size=3, strides=1, filters_multiplier=6, alpha=alpha)) # MBConv6 5x5 self.blocks.append( MBConv_idskip( input_filters=96 * alpha, filters=192, kernel_size=5, strides=2, filters_multiplier=6, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=192 * alpha, filters=192, kernel_size=5, strides=1, filters_multiplier=6, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=192 * alpha, filters=192, kernel_size=5, strides=1, filters_multiplier=6, alpha=alpha)) self.blocks.append( MBConv_idskip( input_filters=192 * alpha, filters=192, kernel_size=5, strides=1, filters_multiplier=6, alpha=alpha)) # MBConv6 3x3 self.blocks.append( MBConv_idskip( input_filters=192 * alpha, filters=320, kernel_size=3, strides=1, filters_multiplier=6, alpha=alpha)) # Last convolution self.conv_bn_last = Conv_BN( filters=1152 * alpha, kernel_size=1, strides=1) # Pool + FC self.avg_pool = layers.GlobalAveragePooling2D() self.fc = layers.Dense(num_classes)
def mobile_net_v2(input_shape=(224,224,3), num_classes=1000): """ mobile net v2 based on https://arxiv.org/pdf/1801.04381.pdf Args: input_shape (tuple): input shape num_classes (int): number of categories Returns: mobile net v2 model """ input = layers.Input(shape=input_shape) x = layers.Conv2D( 32, 3, 2, padding='same', use_bias=False )(input) x = layers.BatchNormalization()(x) x = layers.ReLU(6.0)(x) x = conv_block(x, 16, 1, 1, expand=False) x = conv_block(x, 24, 2, 6) x = conv_block(x, 24, 1, 6) x = conv_block(x, 32, 2, 6) x = conv_block(x, 32, 1, 6) x = conv_block(x, 32, 1, 6) x = conv_block(x, 64, 2, 6) x = conv_block(x, 64, 1, 6) x = conv_block(x, 64, 1, 6) x = conv_block(x, 64, 1, 6) x = conv_block(x, 96, 1, 6) x = conv_block(x, 96, 1, 6) x = conv_block(x, 96, 1, 6) x = conv_block(x, 160, 2, 6) x = conv_block(x, 160, 1, 6) x = conv_block(x, 160, 1, 6) x = conv_block(x, 320, 1, 6) x = layers.Conv2D( 1280, 1, 1, padding='same', use_bias=False )(x) x = layers.BatchNormalization()(x) x = layers.ReLU(6.0)(x) x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(num_classes)(x) x = layers.Activation('softmax')(x) model = Model(inputs=input, outputs=x) model.summary() return model
images, labels, table = load_pokemon('pokemon', mode='train') db_train = tf.data.Dataset.from_tensor_slices((images, labels)) db_train = db_train.shuffle(1000).map(preprocess).batch(batchsz) # crate validation db images2, labels2, table = load_pokemon('pokemon', mode='val') db_val = tf.data.Dataset.from_tensor_slices((images2, labels2)) db_val = db_val.map(preprocess).batch(batchsz) # create test db images3, labels3, table = load_pokemon('pokemon', mode='test') db_test = tf.data.Dataset.from_tensor_slices((images3, labels3)) db_test = db_test.map(preprocess).batch(batchsz) resnet = keras.Sequential([ layers.Conv2D(16, 5, 3), layers.MaxPool2D(3, 3), layers.ReLU(), layers.Conv2D(64, 5, 3), layers.MaxPool2D(2, 2), layers.ReLU(), layers.Flatten(), layers.Dense(64), layers.ReLU(), layers.Dense(5) ]) resnet = ResNet(5) resnet.build(input_shape=(4, 224, 224, 3)) resnet.summary() early_stopping = EarlyStopping(monitor='val_accuracy', min_delta=0.001,
""" 上采样:Pooling pooling并不会改变channel的大小 """ x = tf.random.normal([1, 14, 14, 4]) #MaxPooling pool = layers.MaxPool2D(2,strides=2) # 2是kernel_size strides是步长 默认'valid' print(pool(x).shape) #(1, 7, 7, 4) 14-2+1=13 13/2=7 #MaxPooling pool = layers.MaxPool2D(3,strides=2) # 2是kernel_size strides是步长 默认'valid' print(pool(x).shape) #(1, 6, 6, 4) 14-3+1=12 12/2=6 ''' 上采样 : 将图片放大 ''' x = tf.random.normal([1,7,7,4]) layer = layers.UpSampling2D(size=3) print(layer(x).shape) #(1, 21, 21, 4) layer = layers.UpSampling2D(size=2) print(layer(x).shape) #(1, 14, 14, 4) ''' ReLU : 将图片负的去掉 ''' x = tf.random.normal([2,3]) print(tf.nn.relu(x)) #二者相同 print(layers.ReLU()(x)) #二者相同 这里是新建了一个ReLU实例 调用实例的__call__()方法
def projection_block(x, filters_in, filters_out, cardinality=32, strides=(2, 2)): """ Construct a ResNeXT block with projection shortcut x : input to the block filters_in : number of filters (channels) at the input convolution filters_out: number of filters (channels) at the output convolution cardinality: width of group convolution strides : whether entry convolution is strided (i.e., (2, 2) vs (1, 1)) """ # Construct the projection shortcut # Increase filters by 2X to match shape when added to output of block shortcut = layers.Conv2D(filters_out, kernel_size=(1, 1), strides=strides, padding='same', kernel_initializer='he_normal', use_bias=False)(x) shortcut = layers.BatchNormalization()(shortcut) # Dimensionality Reduction x = layers.Conv2D(filters_in, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal', use_bias=False)(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Cardinality (Wide) Layer (split-transform) filters_card = filters_in // cardinality groups = [] for i in range(cardinality): group = layers.Lambda(lambda z: z[:, :, :, i * filters_card:i * filters_card + filters_card])(x) groups.append( layers.Conv2D(filters_card, kernel_size=(3, 3), strides=strides, padding='same', kernel_initializer='he_normal', use_bias=False)(group)) # Concatenate the outputs of the cardinality layer together (merge) x = layers.concatenate(groups) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Dimensionality restoration x = layers.Conv2D(filters_out, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal', use_bias=False)(x) x = layers.BatchNormalization()(x) # Identity Link: Add the shortcut (input) to the output of the block x = layers.add([shortcut, x]) x = layers.ReLU()(x) return x
def __init__(self, in_channels_list, out_channels_list, num_branches, num_subblocks, data_format="channels_last", **kwargs): super(HRBlock, self).__init__(**kwargs) self.in_channels_list = in_channels_list self.num_branches = num_branches self.branches = SimpleSequential(name="branches") for i in range(num_branches): layers = SimpleSequential(name="branches/branch{}".format(i + 1)) in_channels_i = self.in_channels_list[i] out_channels_i = out_channels_list[i] for j in range(num_subblocks[i]): layers.add(ResUnit( in_channels=in_channels_i, out_channels=out_channels_i, strides=1, bottleneck=False, data_format=data_format, name="unit{}".format(j + 1))) in_channels_i = out_channels_i self.in_channels_list[i] = out_channels_i self.branches.add(layers) if num_branches > 1: self.fuse_layers = SimpleSequential(name="fuse_layers") for i in range(num_branches): fuse_layer_name = "fuse_layers/fuse_layer{}".format(i + 1) fuse_layer = SimpleSequential(name=fuse_layer_name) for j in range(num_branches): if j > i: fuse_layer.add(UpSamplingBlock( in_channels=in_channels_list[j], out_channels=in_channels_list[i], scale_factor=2 ** (j - i), data_format=data_format, name=fuse_layer_name + "/block{}".format(j + 1))) elif j == i: fuse_layer.add(Identity(name=fuse_layer_name + "/block{}".format(j + 1))) else: conv3x3_seq_name = fuse_layer_name + "/block{}_conv3x3_seq".format(j + 1) conv3x3_seq = SimpleSequential(name=conv3x3_seq_name) for k in range(i - j): if k == i - j - 1: conv3x3_seq.add(conv3x3_block( in_channels=in_channels_list[j], out_channels=in_channels_list[i], strides=2, activation=None, data_format=data_format, name="subblock{}".format(k + 1))) else: conv3x3_seq.add(conv3x3_block( in_channels=in_channels_list[j], out_channels=in_channels_list[j], strides=2, data_format=data_format, name="subblock{}".format(k + 1))) fuse_layer.add(conv3x3_seq) self.fuse_layers.add(fuse_layer) self.activ = nn.ReLU()
def v2_stem(input_tensor): """ stem for inception v4 Args: input_tensor (keras tensor): input tensor Returns: keras tensor """ x = layers.Conv2D(32, 3, 2, padding='valid')(input_tensor) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(32, 3, 1, padding='valid')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(64, 3, 1, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) m = layers.MaxPooling2D(3, 2, padding='valid')(x) conv = layers.Conv2D(96, 3, 2, padding='valid')(x) conv = layers.BatchNormalization()(conv) conv = layers.ReLU()(conv) x = layers.Concatenate()([m, conv]) x1 = layers.Conv2D(64, 1, 1, padding='same')(x) x1 = layers.BatchNormalization()(x1) x1 = layers.ReLU()(x1) x1 = layers.Conv2D(64, (7, 1), 1, padding='same')(x1) x1 = layers.BatchNormalization()(x1) x1 = layers.ReLU()(x1) x1 = layers.Conv2D(64, (1, 7), 1, padding='same')(x1) x1 = layers.BatchNormalization()(x1) x1 = layers.ReLU()(x1) x1 = layers.Conv2D(96, 3, 1, padding='valid')(x1) x1 = layers.BatchNormalization()(x1) x1 = layers.ReLU()(x1) x2 = layers.Conv2D(64, 1, 1, padding='same')(x) x2 = layers.BatchNormalization()(x2) x2 = layers.ReLU()(x2) x2 = layers.Conv2D(96, 3, 1, padding='valid')(x2) x2 = layers.BatchNormalization()(x2) x2 = layers.ReLU()(x2) x = layers.Concatenate()([x2, x1]) conv = layers.Conv2D(192, 3, 2, padding='valid')(x) conv = layers.BatchNormalization()(conv) conv = layers.ReLU()(conv) m = layers.MaxPooling2D(strides=2, padding='valid')(x) x = layers.Concatenate()([m, conv]) return x
def mobilenet_v2(input_shape): img_input = layers.Input(shape=input_shape) first_block_filters = _make_divisible(32 * alpha, 8) x = layers.Conv2D(first_block_filters, kernel_size=3, strides=(2, 2), padding="same", kernel_regularizer=kernel_reg, use_bias=False, name='Conv1')(img_input) x = layers.BatchNormalization(momentum=0.999, name='bn_Conv1')(x) x = layers.ReLU(6., name="Conv1_relu")(x) # [filters, alpha, stride, expansion, block_id, skip_connection, rate x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0, skip_connection=False, rate=1) x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1, skip_connection=False, rate=1) x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2, skip_connection=True, rate=1) x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3, skip_connection=False, rate=1) x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4, skip_connection=True, rate=1) x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5, skip_connection=True, rate=1) x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=6, skip_connection=False, rate=1) x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7, skip_connection=True, rate=2) x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8, skip_connection=True, rate=2) x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9, skip_connection=True, rate=2) x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10, skip_connection=False, rate=2) x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11, skip_connection=True, rate=2) x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12, skip_connection=True, rate=2) x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=13, skip_connection=False, rate=2) x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14, skip_connection=True, rate=4) x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15, skip_connection=True, rate=4) x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16, skip_connection=False, rate=4) backbone = tf.keras.Model(img_input, x) weights_path = tf.keras.utils.get_file(model_name, weight_path, cache_subdir='models') backbone.load_weights(weights_path, by_name=True) x = deeplab_head(x, dilations=[12, 24, 36]) x = layers.experimental.preprocessing.Resizing(input_shape[0], input_shape[1])(x) model = tf.keras.Model(img_input, x, name='deeplab_v3_mobilenet_v2') return model
def get_model(input_shpae, num_classes): # Encoder inputs = layers.Input(input_shpae) conv1 = layers.Conv3D(32*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(inputs) conv1 = layers.BatchNormalization()(conv1) conv1 = layers.ReLU()(conv1) conv1 = layers.Conv3D(64*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv1) conv1 = layers.BatchNormalization()(conv1) conv1 = layers.ReLU()(conv1) pool1 = layers.MaxPooling3D(pool_size=2, strides=2)(conv1) conv2 = layers.Conv3D(64*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(pool1) conv2 = layers.BatchNormalization()(conv2) conv2 = layers.ReLU()(conv2) conv2 = layers.Conv3D(128*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv2) conv2 = layers.BatchNormalization()(conv2) conv2 = layers.ReLU()(conv2) pool2 = layers.MaxPooling3D(pool_size=2, strides=2)(conv2) conv3 = layers.Conv3D(128*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(pool2) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv3 = layers.Conv3D(256*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) pool3 = layers.MaxPooling3D(pool_size=2, strides=2)(conv3) # Bridge conv5 = layers.Conv3D(256*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(pool3) conv5 = layers.BatchNormalization()(conv5) conv5 = layers.ReLU()(conv5) conv5 = layers.Conv3D(512*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv5) conv5 = layers.BatchNormalization()(conv5) conv5 = layers.ReLU()(conv5) # Decoder up7 = layers.Conv3DTranspose(512*n_filter, 2, strides=2, padding='same')(conv5) merge7 = layers.concatenate([conv3, up7], axis=-1) conv7 = layers.Conv3D(256*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(merge7) conv7 = layers.BatchNormalization()(conv7) conv7 = layers.ReLU()(conv7) conv7 = layers.Conv3D(256*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv7) conv7 = layers.BatchNormalization()(conv7) conv7 = layers.ReLU()(conv7) up8 = layers.Conv3DTranspose(256*n_filter, 2, strides=2, padding='same')(conv7) merge8 = layers.concatenate([conv2, up8], axis=-1) conv8 = layers.Conv3D(128*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(merge8) conv8 = layers.BatchNormalization()(conv8) conv8 = layers.ReLU()(conv8) conv8 = layers.Conv3D(128*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv8) conv8 = layers.BatchNormalization()(conv8) conv8 = layers.ReLU()(conv8) up9 = layers.Conv3DTranspose(128*n_filter, 2, strides=2, padding='same')(conv8) merge9 = layers.concatenate([conv1, up9], axis=-1) conv9 = layers.Conv3D(64*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(merge9) conv9 = layers.BatchNormalization()(conv9) conv9 = layers.ReLU()(conv9) conv9 = layers.Conv3D(64*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv9) conv9 = layers.BatchNormalization()(conv9) conv9 = layers.ReLU()(conv9) conv9 = layers.Conv3D(num_classes, 1, padding='SAME', kernel_initializer='he_normal', activation='softmax')(conv9) model = tf.keras.Model(inputs=inputs, outputs=conv9) return model
def resnet(block, blocks_num, im_width=224, im_height=224, num_classes=1000, include_top=True): """ role:搭建resnet模型结构 noted:tensorflow中的tensor顺序NHWC(即数量、高、宽、通道),here(None,224,224,3) :param block:block类型(BasicBlock/Bottleneck) :param blocks_num:对应论文中conv2_x ~ conv5_x中每部分residual结构的个数,列表类型(ex.:resnet34[3,4,6,3]) :param im_width:width of image :param im_height:height of image :param num_classes:类别数,默认1000类 :param include_top:是否使用顶层结构(即最后的平均池化下采样层和全连接层) """ # 输入图片数据 input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32") # 进行初始的卷积和池化操作 x = layers.Conv2D(filters=64, kernel_size=7, strides=2, padding="SAME", use_bias=False, name="conv1")(input_image) x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv1/BatchNorm")(x) x = layers.ReLU()(x) # 这里池化层步长为2,尺寸折半 x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME")(x) # 使用blocks_num数据构建block结构 x = parse_layer(block, x.shape[-1], 64, blocks_num[0], name="block1")(x) x = parse_layer(block, x.shape[-1], 128, blocks_num[1], strides=2, name="block2")(x) x = parse_layer(block, x.shape[-1], 256, blocks_num[2], strides=2, name="block3")(x) x = parse_layer(block, x.shape[-1], 512, blocks_num[3], strides=2, name="block4")(x) if include_top: # 包含顶层结构 x = layers.GlobalAvgPool2D()(x) # 全局平均池化+展平操作 x = layers.Dense(num_classes, name="logits")(x) # 全连接层操作 predict = layers.Softmax()(x) # 进行类别概率分布预测 else: predict = x # 方便自己添加其他的层结构 model = Model(inputs=input_image, outputs=predict) return model
def __init__(self, num_action: int, num_hidden_units: int): super(ActorCritic, self).__init__() self.common = layers.Dense(num_hidden_units, activation=None) self.activation = layers.ReLU() self.actor = layers.Dense(num_action) self.critic = layers.Dense(1)
Y = keras.utils.to_categorical(Y) Y[0] #%% train_x, test_x, train_y, test_y = train_test_split(X[:3000000], Y[:3000000], test_size=0.2) #%% len(Y) #%% # Sarcasm Model --> inputs = tf.keras.Input(shape=X[0].shape) x = layers.Dense(70, kernel_regularizer=keras.regularizers.l2(0.001))(inputs) x = layers.ReLU()(x) #x = layers.GaussianDropout(0.3)(x) #x = layers.BatchNormalization(axis=1)(x) #x = layers.Dropout(0.2)(x) #x = layers.BatchNormalization()(x) #x = layers.Dense(64, activation = "selu",kernel_regularizer=keras.regularizers.l2(0.001))(x) #x = layers.BatchNormalization(axis=1)(x) #x = layers.Dropout(0.5)(x) x = layers.Dense(10, activation="sigmoid", kernel_regularizer=keras.regularizers.l2(0.001))(x) stars = layers.Dense(2, activation='softmax', name="stars")(x) #(len(uniqs), activation='softmax')(x) sarcasmmodel = tf.keras.Model(inputs=inputs, outputs=stars) sarcasmmodel.compile(optimizer='rmsprop',
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers x = tf.constant([2, 1, 0.1], dtype=tf.float32) layer = layers.Softmax(axis=-1) # 创建softmax层 layer(x) # 调用 softmax前向计算 # np.exp(2)/ sum([ np.exp(i) for i in [2, 1, 0.1] ]) ## 8.1.2 网络容器 """ 通过 Sequential 封装成一个大网络模型 """ from tensorflow.keras import layers, Sequential network = Sequential([ layers.Dense(3, activation=None), layers.ReLU(), layers.Dense(2, activation=None), layers.ReLU() ]) x = tf.random.normal([4, 3]) network(x) # 也可以通过追加的方法增加网络 layers_num = 2 network = Sequential([]) for _ in range(layers_num): network.add(layers.Dense(3)) network.add(layers.ReLU()) network.build(input_shape=(None, 4)) # layer1 4 * 3 + 3 layer2 3*3 + 3 network.summary()
def __init__(self, enc_channels, dec_channels, init_block_channels, layers, int_bends, use_preresnet, in_channels=3, in_size=(640, 640), data_format="channels_last", **kwargs): super(LFFD, self).__init__(**kwargs) self.in_size = in_size self.data_format = data_format unit_class = PreResUnit if use_preresnet else ResUnit use_bias = True use_bn = False self.encoder = MultiOutputSequential(return_last=False) self.encoder.add( conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, strides=2, padding=0, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] stage = MultiOutputSequential(multi_output=False, dual_output=True, name="stage{}".format(i + 1)) stage.add( conv3x3(in_channels=in_channels, out_channels=channels_per_stage, strides=2, padding=0, use_bias=use_bias, data_format=data_format, name="trans{}".format(i + 1))) for j in range(layers_per_stage): unit = unit_class(in_channels=channels_per_stage, out_channels=channels_per_stage, strides=1, use_bias=use_bias, use_bn=use_bn, bottleneck=False, data_format=data_format, name="unit{}".format(j + 1)) if layers_per_stage - j <= int_bends_per_stage: unit.do_output = True stage.add(unit) final_activ = nn.ReLU(name="final_activ") final_activ.do_output = True stage.add(final_activ) stage.do_output2 = True in_channels = channels_per_stage self.encoder.add(stage) self.decoder = ParallelConcurent() k = 0 for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] for j in range(layers_per_stage): if layers_per_stage - j <= int_bends_per_stage: self.decoder.add( LffdDetectionBlock(in_channels=channels_per_stage, mid_channels=dec_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="unit{}".format(k + 1))) k += 1 self.decoder.add( LffdDetectionBlock(in_channels=channels_per_stage, mid_channels=dec_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="unit{}".format(k + 1))) k += 1
def __init__(self, heads: Dict[str, int], head_convs: Dict[str, List[int]], num_stacks: int, opt: BaseModelOptions = None): """ heads: Dict[str, int] - head name: corresponding number of output classes head_convs: Dict[str, List[int]] - head name: list with number of output channels for each convolution num_stacks: int - how many times the output is replicated in the output list opt: BaseModelOptions """ super().__init__() if opt is not None and opt.head_kernel != 3: print('Using head kernel:', opt.head_kernel) head_kernel = opt.head_kernel else: head_kernel = 3 self.num_stacks = num_stacks self.heads = heads for head in self.heads: classes = self.heads[head] head_conv = head_convs[head] if head_conv: out = layers.Conv2D(classes, kernel_size=1, strides=1, padding='valid', use_bias=True, bias_initializer='zeros', data_format='channels_first') conv = layers.Conv2D(head_conv[0], kernel_size=head_kernel, padding='same', use_bias=True, bias_initializer='zeros', data_format='channels_first') convs = [conv, layers.ReLU()] for k in range(1, len(head_conv)): convs.extend([ layers.Conv2D(head_conv[k], kernel_size=1, use_bias=True, data_format='channels_first'), layers.ReLU() ]) convs.append(out) fc = tf.keras.Sequential(convs) if 'hm' in head: fc.layers[ -1].bias_initializer = tf.keras.initializers.constant( opt.prior_bias) else: fc = layers.Conv2D(classes, kernel_size=1, strides=1, padding='valid', use_bias=True, bias_initializer='zeros') if 'hm' in head: fc.bias_initializer = tf.keras.initializers.constant( opt.prior_bias) self.__setattr__(head, fc)