Ejemplo n.º 1
0
def midium(inputs):
    cnn = Conv1D(36, 3, padding="same")(inputs)
    cnn = layers.PReLU()(cnn)
    cnn = Conv1D(72, 3, padding="same")(cnn)
    cnn = layers.PReLU()(cnn)
    
    return cnn
Ejemplo n.º 2
0
 def __init__(self, n_channels, name="decoder", **kwargs):
     super(Decoder, self).__init__(name=name, **kwargs)
     self.data_format = "channels_last"
     num_filters = 256
     self.sublayers = [
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_out",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_out", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_0",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_0", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_1",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_1", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_2",
             corr=False,
             strides_up=2,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_2", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             n_channels,
             (9, 9),
             name="layer_3",
             corr=False,
             strides_up=2,
             padding="same_zeros",
             use_bias=True,
             activation=tf.nn.sigmoid,
         ),
     ]
Ejemplo n.º 3
0
 def __init__(self, conv_depth, name="encoder", **kwargs):
     super(Encoder, self).__init__(name=name, **kwargs)
     self.data_format = "channels_last"
     num_filters = 256
     self.sublayers = [
         tfc.SignalConv2D(
             num_filters,
             (9, 9),
             name="layer_0",
             corr=True,
             strides_down=2,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_0"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_1",
             corr=True,
             strides_down=2,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_1"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_2",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_2"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_3",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_3"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             conv_depth,
             (5, 5),
             name="layer_out",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
             activation=None,
         ),
     ]
Ejemplo n.º 4
0
def Decoder(x, feedback_bits):
    B = 4
    #decoder_input = DeuantizationLayer(B)(x)
    decoder_input = tf.keras.layers.Reshape((-1, int(feedback_bits / B)))(x)
    x = layers.Dense(1024, activation='linear')(decoder_input)
    x = layers.Reshape((16, 32, 2))(x)

    x = layers.Conv2D(conv_feature_size, 5, padding='SAME')(x)
    x = layers.BatchNormalization()(x)
    x_ini = layers.PReLU()(x)
    x_tmp = layers.PReLU()(x)

    for i in range(3):
        x = layers.Conv2D(conv_feature_size, 5, padding='SAME')(x_ini)
        x = layers.BatchNormalization()(x)
        x = layers.PReLU()(x)
        x = layers.Conv2D(conv_feature_size, 5, padding='SAME')(x)
        x = layers.BatchNormalization()(x)
        x = layers.PReLU()(x)
        x_ini = keras.layers.Add()([x_ini, x])

    x = keras.layers.Add()([x_ini, x_tmp])

    #x = layers.UpSampling2D(size=(2,2))(x)
    x = layers.Conv2D(conv_feature_size, 5, padding='SAME')(x)
    x = layers.BatchNormalization()(x)
    x = layers.PReLU()(x)
    #x = layers.UpSampling2D(size=(4,4))(x)
    decoder_output = layers.Conv2D(2, 7, padding='SAME',
                                   activation='sigmoid')(x)

    return decoder_output
Ejemplo n.º 5
0
def build_model():
    model = Sequential([
        layers.Dense(
            50,
            input_shape=(test_dataset.shape[-1], ),
        ),
        layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)),
        layers.LayerNormalization(),
        layers.Dropout(0.5),
        layers.Dense(50),
        layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)),
        layers.LayerNormalization(),
        layers.Dropout(0.5),
        layers.Dense(1, activation="sigmoid"),
    ])

    metrics = [
        keras.metrics.FalseNegatives(name="fn"),
        keras.metrics.FalsePositives(name="fp"),
        keras.metrics.TrueNegatives(name="tn"),
        keras.metrics.TruePositives(name="tp"),
        keras.metrics.Precision(name="precision"),
        keras.metrics.Recall(name="recall"),
        keras.metrics.AUC(name='auc'),
    ]

    model.compile(optimizer=keras.optimizers.Adam(0.001),
                  loss="binary_crossentropy",
                  metrics=metrics)

    return model
Ejemplo n.º 6
0
def Decoder(x, feedback_bits):
    B = 4
    #decoder_input = DeuantizationLayer(B)(x)
    decoder_input = layers.Reshape((-1, int(feedback_bits / B)))(x)
    x = layers.Reshape((8, 16, 2))(x)
    x = layers.UpSampling2D(size=(2, 2),
                            data_format=None,
                            interpolation='nearest')(x)
    tmp = layers.Conv2D(conv_feature_size, 7, padding='SAME')(x)
    x = layers.PReLU()(tmp)

    x = layers.UpSampling2D(size=(2, 2),
                            data_format=None,
                            interpolation='nearest')(x)
    x = layers.Conv2D(conv_feature_size / 2, 3, padding='SAME')(x)
    x = layers.BatchNormalization()(x)
    x = layers.PReLU()(x)
    x = layers.Conv2D(conv_feature_size / 2, 3, padding='SAME')(x)
    x = layers.BatchNormalization()(x)
    x = layers.PReLU()(x)
    x = layers.Conv2D(conv_feature_size / 2, 3, padding='SAME')(x)
    x = layers.BatchNormalization()(x)
    x = layers.PReLU()(x)

    x = layers.Conv2D(conv_feature_size / 4, 3, padding='SAME')(x)

    x = layers.Add()([tmp, x])

    decoder_output = layers.Conv2D(2, 7, padding='SAME',
                                   activation='sigmoid')(x)

    return decoder_output
    def con2d_net_block(self, input_layer, block=1, repeat_times=1):
        for i in range(repeat_times):
            con2d_layer = layers.Conv2D(
                int(math.pow(2, block - 1) * self.start_nodes), 1,
                1)(input_layer)
            bn_layer = layers.BatchNormalization()(con2d_layer)
            act_layer = layers.PReLU()(bn_layer)

            con2d_layer = layers.Conv2D(
                int(math.pow(2, block - 1) * self.start_nodes), 3, 1,
                'same')(act_layer)
            bn_layer = layers.BatchNormalization()(con2d_layer)
            act_layer = layers.PReLU()(bn_layer)

            increase_channels_con2d = layers.Conv2D(
                int(math.pow(2, block - 1) * self.start_nodes) * 4, 1,
                1)(act_layer)
            increase_channels_bn_layer = layers.BatchNormalization()(
                increase_channels_con2d)

            residual_con2d_layer = layers.Conv2D(
                int(math.pow(2, block - 1) * self.start_nodes) * 4, 1,
                1)(input_layer)
            residual_bn_layer = layers.BatchNormalization()(
                residual_con2d_layer)

            add_layer = layers.add(
                [increase_channels_bn_layer, residual_bn_layer])
            #為方便做迴圈
            input_layer = layers.PReLU()(add_layer)

        return input_layer
Ejemplo n.º 8
0
def get_model():
    inputs = keras.Input(shape=(50, 100, 1))
    x = layers.Conv2D(16, 3)(inputs)
    x = layers.Conv2D(16, 3, padding="same")(x)
    x = layers.PReLU()(x)
    x = layers.Conv2D(32, 3)(x)
    x = layers.Conv2D(32, 3, padding="same")(x)
    x = layers.PReLU()(x)
    x = layers.MaxPooling2D(3)(x)
    x = layers.Dropout(0.25)(x)

    x = layers.Conv2D(64, 3)(x)
    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(128, 3)(x)
    x = layers.Conv2D(128, 3, padding="same")(x)
    x = layers.ReLU()(x)
    x = layers.MaxPooling2D(3)(x)

    x = layers.Flatten()(x)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(CODE_LEN * DICT_LEN)(x)
    x = layers.Reshape([CODE_LEN, DICT_LEN])(x)
    outputs = layers.Softmax()(x)
    model = keras.Model(inputs=inputs, outputs=outputs, name="captcha_model")
    model.compile(
        loss="categorical_crossentropy",
        optimizer="Adam",
        metrics=["accuracy"],
    )
    return model
Ejemplo n.º 9
0
def make_rnet(train=True):
    input = layers.Input(shape=(24, 24, 3))

    x = layers.Conv2D(28, kernel_size=(3, 3))(input)
    x = layers.PReLU(shared_axes=[1, 2])(x)
    x = layers.MaxPooling2D(pool_size=3, strides=2)(x)

    x = layers.Conv2D(48, kernel_size=(3, 3))(x)
    x = layers.PReLU(shared_axes=[1, 2])(x)
    x = layers.MaxPooling2D(pool_size=3, strides=2)(x)

    x = layers.Conv2D(64, kernel_size=(2, 2))(x)
    x = layers.PReLU(shared_axes=[1, 2])(x)

    x = layers.Flatten()(x)
    x = layers.Dense(128)(x)
    x = layers.PReLU()(x)

    classifier = layers.Dense(3, activation='softmax', name='face_cls')(x)
    bbox_regress = layers.Dense(5, name='bbox_reg')(x)
    landmark_regress = layers.Dense(11, name='ldmk_reg')(x)

    if train is False:
        classifier = SampleLabelFilter(classifier)
        classifier = layers.Activation('softmax')(classifier)
        bbox_regress = SampleLabelFilter(bbox_regress)
        landmark_regress = SampleLabelFilter(landmark_regress)

    # outputs = layers.Concatenate()(
    #     [classifier, bbox_regress, landmark_regress])

    outputs = [classifier, bbox_regress, landmark_regress]
    model = models.Model(input, outputs)

    return model
    def reduce_image_size(self, input_layer, block=1):
        #未來考慮加入名稱增強架構辨識度
        block_name = "block_" + str(block)
        reduce_size_con2d_layer = layers.Conv2D(
            int(math.pow(2, block - 1) * self.start_nodes),
            1,
            2,
            name=block_name + "_reduce_size_con2d_layer")(input_layer)
        reduce_size_bn_layer = layers.BatchNormalization()(
            reduce_size_con2d_layer)
        reduce_size_act_layer = layers.PReLU()(reduce_size_bn_layer)

        con2d_layer = layers.Conv2D(
            int(math.pow(2, block - 1) * self.start_nodes), 3, 1,
            "same")(reduce_size_act_layer)
        bn_layer = layers.BatchNormalization()(con2d_layer)
        act_layer = layers.PReLU()(bn_layer)

        increase_channels_con2d = layers.Conv2D(
            int(math.pow(2, block - 1) * self.start_nodes) * 4, 1,
            1)(act_layer)
        increase_channels_bn_layer = layers.BatchNormalization()(
            increase_channels_con2d)

        residual_con2d_layer = layers.Conv2D(
            int(math.pow(2, block - 1) * self.start_nodes) * 4, 1,
            2)(input_layer)
        residual_bn_layer = layers.BatchNormalization()(residual_con2d_layer)

        add_layer = layers.add([increase_channels_bn_layer, residual_bn_layer])
        output_act_layer = layers.PReLU()(add_layer)

        return output_act_layer
    def classification_model(window_size, layer_name_list):
        input_layer = Input(shape=(
            window_size,
            window_size,
            1,
        ))
        con2d_layer = layers.Conv2D(8, 2, name=layer_name_list[0])(input_layer)
        act_layer = layers.PReLU(name=layer_name_list[1])(con2d_layer)
        bn_layer = layers.BatchNormalization(
            name=layer_name_list[2])(act_layer)
        dropout_layer = layers.Dropout(0.5, name=layer_name_list[3])(bn_layer)
        con2d_layer = layers.Conv2D(16, 2,
                                    name=layer_name_list[4])(dropout_layer)
        act_layer = layers.PReLU(name=layer_name_list[5])(con2d_layer)
        bn_layer = layers.BatchNormalization(
            name=layer_name_list[6])(act_layer)
        dropout_layer = layers.Dropout(0.5, name=layer_name_list[7])(bn_layer)
        con2d_layer = layers.Conv2D(32, 2,
                                    name=layer_name_list[8])(dropout_layer)
        act_layer = layers.PReLU(name=layer_name_list[9])(con2d_layer)
        bn_layer = layers.BatchNormalization(
            name=layer_name_list[10])(act_layer)
        dropout_layer = layers.Dropout(0.5, name=layer_name_list[11])(bn_layer)
        flatten_layer = layers.Flatten()(dropout_layer)
        hidden_layer = layers.Dense(20)(flatten_layer)
        act_layer = layers.PReLU()(hidden_layer)
        dropout_layer = layers.Dropout(0.5)(act_layer)
        hidden_layer = layers.Dense(40)(dropout_layer)
        act_layer = layers.PReLU()(hidden_layer)
        dropout_layer = layers.Dropout(0.5)(act_layer)
        output_layer = layers.Dense(10, activation='softmax')(dropout_layer)

        return Model(input_layer, output_layer)
Ejemplo n.º 12
0
    def __init__(self, embedding_count_dict, embedding_dim_dict, embedding_features_list, user_behavior_features,
                 activation='PReLU'):
        super(DIN, self).__init__()
        # Init Embedding Layer
        self.embedding_count_dict = embedding_count_dict
        self.embedding_dim_dict = embedding_dim_dict
        self.embedding_layers = dict()

        for feature in embedding_features_list:
            self.embedding_layers[feature] = layers.Embedding(input_dim=embedding_count_dict[feature],
                                                              output_dim=embedding_dim_dict[feature],
                                                              embeddings_initializer='random_uniform')

        # DIN Attention + Sum Pooling
        self.hist_at = attention(alibaba_utils.get_input_dim(embedding_dim_dict, user_behavior_features))
        # Init Fully Connection Layer
        self.fc = tf.keras.Sequential()
        self.fc.add(layers.BatchNormalization())
        self.fc.add(layers.Dense(200, activation="relu"))
        if activation == "Dice":
            self.fc.add(Dice())
        elif activation == "dice":
            self.fc.add(dice(200))
        elif activation == "PReLU":
            self.fc.add(layers.PReLU(alpha_initializer='zeros', weights=None))
        self.fc.add(layers.Dense(80, activation="relu"))
        if activation == "Dice":
            self.fc.add(Dice())
        elif activation == "dice":
            self.fc.add(dice(80))
        elif activation == "PReLU":
            self.fc.add(layers.PReLU(alpha_initializer='zeros', weights=None))
        self.fc.add(layers.Dense(2, activation=None))
Ejemplo n.º 13
0
    def define_architecture(self,
                            model_input,
                            final_activation="softmax",
                            out_name="m"):
        """Build the V-Net model."""
        # Downsampling / encoding portion
        conv0 = self.side_conv_block(model_input, 16, length=1)
        down0 = layers.Conv3D(32, (2, 2, 2), strides=(2, 2, 1),
                              padding="same")(conv0)
        down0 = layers.PReLU()(down0)

        conv1 = self.side_conv_block(down0, 32, length=2)
        down1 = layers.Conv3D(64, (2, 2, 2), strides=(2, 2, 1),
                              padding="same")(conv1)
        down1 = layers.PReLU()(down1)

        conv2 = self.side_conv_block(down1, 64, length=3)
        down2 = layers.Conv3D(128, (2, 2, 2),
                              strides=(2, 2, 1),
                              padding="same")(conv2)
        down2 = layers.PReLU()(down2)

        conv3 = self.side_conv_block(down2, 128, length=3)
        down3 = layers.Conv3D(256, (2, 2, 2),
                              strides=(2, 2, 1),
                              padding="same")(conv3)
        down3 = layers.PReLU()(down3)

        # Middle of network
        conv4 = self.side_conv_block(down3, 256, length=3)

        # Upsampling / decoding portion
        uconv3 = self.up_conv_block(conv4,
                                    conv3,
                                    256,
                                    length=3,
                                    strides=(2, 2, 1))
        uconv2 = self.up_conv_block(uconv3,
                                    conv2,
                                    128,
                                    length=3,
                                    strides=(2, 2, 1))
        uconv1 = self.up_conv_block(uconv2,
                                    conv1,
                                    64,
                                    length=2,
                                    strides=(2, 2, 1))
        uconv0 = self.up_conv_block(uconv1,
                                    conv0,
                                    32,
                                    length=1,
                                    strides=(2, 2, 1))

        out = layers.Conv3D(self.output_length, (1, 1, 1),
                            padding="same",
                            activation=None)(uconv0)
        out = layers.Activation(final_activation, name=out_name)(out)

        return out
Ejemplo n.º 14
0
def cnn_model(input_shape):
    img_input = layers.Input(input_shape)
    ## First CNN block:
    conv2D_1 = layers.Conv2D(64, (3, 3),
                             strides=2,
                             padding='valid',
                             name='Conv_1',
                             input_shape=input_shape,
                             kernel_initializer='he_normal')(img_input)
    conv2D_1 = layers.BatchNormalization(axis=3)(conv2D_1)
    conv2D_1 = layers.PReLU()(conv2D_1)
    conv2D_2 = layers.Conv2D(64, (3, 3),
                             strides=1,
                             padding='same',
                             name='Conv_2',
                             kernel_initializer='he_normal')(conv2D_1)
    conv2D_2 = layers.BatchNormalization(axis=3)(conv2D_2)
    conv2D_2 = layers.PReLU()(conv2D_2)
    maxPooling_2 = layers.MaxPooling2D((3, 3),
                                       strides=(2, 2),
                                       name='MaxPooling_1')(conv2D_2)

    ## Second CNN block:
    conv2D_3 = layers.Conv2D(128, (3, 3),
                             strides=1,
                             padding='same',
                             name='Conv_3',
                             kernel_initializer='he_normal')(maxPooling_2)
    conv2D_4 = layers.BatchNormalization(axis=3)(conv2D_3)
    conv2D_3 = layers.PReLU()(conv2D_3)
    conv2D_4 = layers.Conv2D(128, (3, 3),
                             strides=1,
                             padding='same',
                             name='Conv_4',
                             kernel_initializer='he_normal')(conv2D_3)
    conv2D_4 = layers.BatchNormalization(axis=3)(conv2D_4)
    conv2D_4 = layers.PReLU()(conv2D_4)
    averagePooling_4 = layers.AveragePooling2D(
        (3, 3), strides=(2, 2), name='AveragePooling_1')(conv2D_4)

    flatten = layers.Flatten()(averagePooling_4)
    fc = layers.Dropout(0.5)(flatten)
    fc = layers.Dense(256,
                      kernel_regularizer=regularizers.l2(0.01),
                      kernel_initializer='he_normal',
                      name='FC1')(fc)
    fc = layers.Activation('relu')(fc)
    fc = layers.Dropout(0.3)(fc)
    fc = layers.Dense(128,
                      kernel_regularizer=regularizers.l2(0.01),
                      kernel_initializer='he_normal',
                      name='FC2')(fc)
    fc = layers.Activation('relu')(fc)
    fc = layers.Dropout(0.3)(fc)
    output = layers.Dense(1)(fc)

    cnn_model = models.Model(inputs=[img_input], outputs=[output])

    return cnn_model
Ejemplo n.º 15
0
def fine(inputs):
    cnn = Conv1D(36, 4, padding="same")(inputs)
    cnn = layers.PReLU()(cnn)
    cnn = Conv1D(36, 4, padding="same")(cnn)
    cnn = layers.PReLU()(cnn)
    cnn = Conv1D(72, 4, padding="same")(cnn)
    cnn = layers.PReLU()(cnn)
    
    return cnn
Ejemplo n.º 16
0
    def __init__(self,
                 embedding_count_dict,
                 embedding_dim_dict,
                 embedding_features_list,
                 user_behavior_features,
                 activation="PReLU"):
        super(DIEN, self).__init__(embedding_count_dict, embedding_dim_dict,
                                   embedding_features_list, activation)
        """DIEN初始化model函数
        
        该函数在调用DIEN时进行DIEN的Embedding层,GRU层,AUGRU层,全连接层的初始化操作

        Args:
            embedding_count_dict:string->int格式,该变量记录需要embedding各个特征的词典个数,即最大整数索引+ 1的大小;
            embedding_dim_dict:string->int格式,该变量记录需要embedding各个特征的输出维数,即密集嵌入的尺寸;
            embedding_features_list:list(string)格式,该变量记录DIEN中user_profile部分所有需要embedding的feature名称;
            user_behavior_features:list(string)格式,该变量记录DIEN中user_behavior与target_item部分所有需要embedding的feature名称
            activation:string格式,默认值"PReLU",该变量空值全连接层激活函数,”PReLU“->PReLU,"Dice"->Dice
        """
        #Init Embedding Layer
        self.embedding_dim_dict = embedding_dim_dict
        self.embedding_count_dict = embedding_count_dict
        self.embedding_layers = dict()
        for feature in embedding_features_list:
            self.embedding_layers[feature] = layers.Embedding(
                embedding_count_dict[feature], embedding_dim_dict[feature])
        #Init GRU Layer
        self.user_behavior_gru = layers.GRU(self.get_GRU_input_dim(
            embedding_dim_dict, user_behavior_features),
                                            return_sequences=True)
        #Init Attention Layer
        self.attention_layer = layers.Softmax()
        #Init Auxiliary Layer
        self.AuxNet = AuxLayer()
        #Init AUGRU Layer
        self.user_behavior_augru = AUGRU(
            self.get_GRU_input_dim(embedding_dim_dict, user_behavior_features))
        #Init Fully Connection Layer
        self.fc = tf.keras.Sequential()
        self.fc.add(layers.BatchNormalization())
        self.fc.add(layers.Dense(200, activation="relu"))
        if activation == "Dice":
            self.fc.add(Dice())
        elif activation == "dice":
            self.fc.add(dice(200))
        elif activation == "PReLU":
            self.fc.add(layers.PReLU(alpha_initializer='zeros', weights=None))
        self.fc.add(layers.Dense(80, activation="relu"))
        if activation == "Dice":
            self.fc.add(Dice())
        elif activation == "dice":
            self.fc.add(dice(80))
        elif activation == "PReLU":
            self.fc.add(layers.PReLU(alpha_initializer='zeros', weights=None))
        self.fc.add(layers.Dense(2, activation=None))
Ejemplo n.º 17
0
        def regression_subnet(shape):
            input_img = layers.Input(shape)

            # downsample a bit
            net = layers.Conv2D(filters=16,
                                kernel_size=3,
                                strides=2,
                                padding='same')(input_img)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            # (48 48 16)

            net = layers.Conv2D(filters=32,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            # (24 24 32)

            net = layers.Conv2D(filters=64,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            # (12 12 64)

            net = layers.Conv2D(filters=128,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            # (6 6 128)

            net = layers.Conv2D(filters=256,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            # (3 3 256)

            net = layers.Conv2D(filters=self.config.num_landmarks * 2,
                                kernel_size=1,
                                strides=1,
                                padding='valid')(net)
            out = tf.reshape(net, [-1, self.config.num_landmarks, 2],
                             name='landmarks')

            model = tf.keras.Model(inputs=input_img,
                                   outputs=out,
                                   name='regression_subnet')
            return model
Ejemplo n.º 18
0
 def __init__(self, n_channels, name="decoder", **kwargs):
     super(Decoder, self).__init__(name=name, **kwargs)
     self.data_format = "channels_last"
     self.sublayers = [
         tfc.SignalConv2D(
             32,
             (5, 5),
             name="conv_1",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             32,
             (5, 5),
             name="conv_2",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             32,
             (5, 5),
             name="conv_3",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             16,
             (5, 5),
             name="conv_4",
             corr=False,
             strides_up=2,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             n_channels,
             (5, 5),
             name="conv_5",
             corr=False,
             strides_up=2,
             padding="same_zeros",
             use_bias=True,
             activation=tf.nn.sigmoid,
         ),
     ]
Ejemplo n.º 19
0
 def __init__(self, conv_depth, name="encoder", **kwargs):
     super(Encoder, self).__init__(name=name, **kwargs)
     self.data_format = "channels_last"
     self.sublayers = [
         tfc.SignalConv2D(
             16,
             (5, 5),
             name="conv_1",
             corr=True,
             strides_down=2,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             32,
             (5, 5),
             name="conv_2",
             corr=True,
             strides_down=2,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             32,
             (5, 5),
             name="conv_3",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             32,
             (5, 5),
             name="conv_4",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             conv_depth,
             (5, 5),
             name="conv_5",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
             activation=None,
         ),
     ]
    def call(self, inputs, training):
        x, x_highway = inputs
        merged = tfkl.concatenate([x, x_highway])
        in1 = merged
        for _ in range(self.num_conv):
            in_1 = tfkl.PReLU()(self.conv(in_1))

        add_1 = tfkl.add([in1, merged])
        output = self.up_conv(add_1)
        output = tfkl.PReLU()(output)
        return output
Ejemplo n.º 21
0
def __bottleneck_block(input,
                       filters=96,
                       cardinality=16,
                       strides=1,
                       weight_decay=0):
    ''' Adds a bottleneck block
    Args:
        input: input tensor
        filters: number of output filters
        cardinality: cardinality factor described number of
            grouped convolutions
        strides: performs strided convolution for downsampling if > 1
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    init = input

    grouped_channels = int(filters / cardinality)
    channel_axis = -1

    if init.shape[-1] != 2 * filters:
        init = Conv2D(filters * 2, (1, 1),
                      padding='same',
                      strides=(strides, strides),
                      use_bias=False,
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(weight_decay))(init)
        init = BatchNormalization(axis=channel_axis)(init)

    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = layers.PReLU()(x)
    # x = layers.Activation(tf.nn.swish)(x)

    x = __grouped_convolution_block(x, grouped_channels, cardinality, strides,
                                    weight_decay)

    x = Conv2D(filters * 2, (1, 1),
               padding='same',
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = SqueezeExciteLayer(16)(x)

    x = add([init, x])
    x = layers.PReLU()(x)

    return x
Ejemplo n.º 22
0
def creat_clustering_model_pre(number_of_class, maxlen, input_dim, output_dim, layers_name):

    input_layer     = Input(shape = (maxlen,), name = layers_name[0])
    #input_length 是每筆輸入的長度
    #如果要接flatten的話是必須參數
    embedding_layer  = layers.Embedding(input_dim, output_dim, input_length = maxlen, name = layers_name[1])(input_layer)
    flatten_layer    = layers.Flatten(name = layers_name[2])(embedding_layer)
    hidden_layer     = layers.Dense(64, name = layers_name[3])(flatten_layer)
    activation_layer = layers.PReLU(name = layers_name[4])(hidden_layer)
    hidden_layer     = layers.Dense(128, name = layers_name[5])(activation_layer)
    activation_layer = layers.PReLU(name = layers_name[6])(hidden_layer)
    hidden_layer     = layers.Dense(256, name = layers_name[7])(activation_layer)
    activation_layer = layers.PReLU(name = layers_name[8])(hidden_layer)
    hidden_layer     = layers.Dense(512, name = layers_name[9])(activation_layer)
    activation_layer = layers.PReLU(name = layers_name[10])(hidden_layer)
    label_layer      = layers.Dense(number_of_class, activation = 'softmax', name = layers_name[11])(activation_layer)
    hidden_layer     = layers.Dense(512)(label_layer)
    activation_layer = layers.PReLU()(hidden_layer)
    hidden_layer     = layers.Dense(256)(activation_layer)
    activation_layer = layers.PReLU()(hidden_layer)
    hidden_layer     = layers.Dense(128)(activation_layer)
    activation_layer = layers.PReLU()(hidden_layer)
    hidden_layer     = layers.Dense(64)(activation_layer)
    activation_layer = layers.PReLU()(hidden_layer)
    output_layer     = layers.Dense(maxlen * output_dim)(activation_layer)
    #output_layer     = layers.PReLU()(hidden_layer)

    return Model(input_layer, [output_layer, flatten_layer, label_layer])
Ejemplo n.º 23
0
def __grouped_convolution_block(input,
                                grouped_channels,
                                cardinality,
                                strides,
                                weight_decay=0):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3),
                   padding='same',
                   use_bias=False,
                   strides=(strides, strides),
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = layers.PReLU()(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:
                               (c + 1) * grouped_channels])(input)

        x = Conv2D(grouped_channels, (7, 7),
                   padding='same',
                   use_bias=False,
                   strides=(strides, strides),
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    x = BatchNormalization(axis=channel_axis)(group_merge)
    x = layers.PReLU()(x)
    # x = layers.Activation(tf.nn.swish)(x)

    return x
    def build(num_classes: int):
        '''
        Build the Seq12 architecture given width, height and depth
        as dimensions of the input tensor and the corresponding
        number of classes of the data.

        parameters
        ----------
            num_classes:  output size
        
        returns
        -------
            model: the Seq12 model compatible with given inputs
                    as a keras sequential model.
        '''
        # initialize model
        print("[INFO] preparing model...")

        # augment images
        data_augmentation = tf.keras.Sequential([
            layers.experimental.preprocessing.RandomFlip(
                "horizontal",
                input_shape=(config.img_height, config.img_width,
                             config.depth)),
            layers.experimental.preprocessing.RandomRotation(0.1),
            layers.experimental.preprocessing.RandomZoom(0.1),
            layers.experimental.preprocessing.Rescaling(1. / 255)
        ])

        model = Sequential(name='SeqS')
        model.add(data_augmentation)
        model.add(layers.Conv2D(16, 3, padding='same', activation='relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.PReLU())
        model.add(layers.MaxPooling2D())

        model.add(layers.Flatten())
        model.add(layers.Dense(128, activation='relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.PReLU())
        model.add(layers.Dropout(0.5))

        model.add(layers.Dense(num_classes))
        model.add(layers.Activation('softmax'))

        #model.summary()

        # return the constructed network architecture
        return model
Ejemplo n.º 25
0
def ACR_ENC2(x):
    res = x
    input_ch = x.shape[-1]

    x = layers.Conv2D(input_ch,
                      kernel_size=(1, 3),
                      strides=1,
                      padding='same',
                      data_format='channels_last',
                      use_bias=False)(x)
    x = add_common_layers(x)

    x = layers.Conv2D(input_ch,
                      kernel_size=(3, 1),
                      strides=1,
                      padding='same',
                      data_format='channels_last',
                      use_bias=False)(x)
    x = layers.BatchNormalization()(x)

    x = SqueezeExciteLayer(16)(x)

    x = layers.PReLU()(x + res)

    return x
Ejemplo n.º 26
0
    def creat_model(self):
        #測試使用類似ResNet架構
            
        input_layer   = Input(shape = [400, 400, self.channels, ], name = "Input_layer")
        #第一區多用較大的stride降維
        block_0_con2d_layer   = layers.Conv2D(self.start_nodes, 3, 2, name = "block_0_con2d_layer")(input_layer)
        block_0_bn_layer      = layers.BatchNormalization(name = "block_0_bn_layer")(block_0_con2d_layer)
        block_0_act_layer     = layers.PReLU(name = "block_0_act_layer")(block_0_bn_layer)
        block_0_maxpool_layer = layers.MaxPool2D(name = "block_0_maxpool_layer")(block_0_act_layer)
        #第二區由數個block組成
        #通常會在每個block開頭先降解析度並增加維度
        block_1_reduce_image_size = self.reduce_image_size(block_0_maxpool_layer, 1)
        block_1_main_con2d_net    = self.con2d_net_block(block_1_reduce_image_size, 1, 2)

        block_2_reduce_image_size = self.reduce_image_size(block_1_main_con2d_net, 2)
        block_2_main_con2d_net    = self.con2d_net_block(block_2_reduce_image_size, 2, 2)

        block_3_reduce_image_size = self.reduce_image_size(block_2_main_con2d_net, 3)
        block_3_main_con2d_net    = self.con2d_net_block(block_3_reduce_image_size, 3, 3)

        block_4_reduce_image_size = self.reduce_image_size(block_3_main_con2d_net, 4)
        block_4_main_con2d_net    = self.con2d_net_block(block_4_reduce_image_size, 4, 5)

        block_5_reduce_image_size = self.reduce_image_size(block_4_main_con2d_net, 5)
        block_5_main_con2d_net    = self.con2d_net_block(block_5_reduce_image_size, 5, 2)

        #最後以一個avg_pool降維並連接到輸出
        avg_pool_layer = layers.AvgPool2D(4, 1)(block_5_main_con2d_net)
        flatten_layer  = layers.Flatten()(avg_pool_layer)
        output_layer   = layers.Dense(self.number_of_class, activation = 'softmax')(flatten_layer)

        self.model    =  Model(input_layer, output_layer)
Ejemplo n.º 27
0
def gen_unary_operator_test(name, type, input_shape):
    # Create model.
    inp = layers.Input(name='input',
                       batch_size=input_shape[0],
                       shape=input_shape[1:])
    if type == 'relu':
        out = tf.nn.relu(inp)
    elif type == 'relu_n1to1':
        out = tf.clip_by_value(inp, -1.0, 1.0)
    elif type == 'relu6':
        out = tf.nn.relu6(inp)
    elif type == 'sigmoid':
        out = tf.nn.sigmoid(inp)
    elif type == 'exp':
        out = tf.exp(inp)
    elif type == 'log':
        out = tf.math.log(inp)
    elif type == 'tanh':
        out = tf.nn.tanh(inp)
    elif type == 'leaky_relu':
        out = tf.nn.leaky_relu(inp, alpha=0.1)
    elif type == 'prelu':
        out = layers.PReLU(alpha_initializer='random_uniform')(inp)
    elif type == 'square':
        out = tf.math.square(inp)
    elif type == 'abs':
        out = tf.math.abs(inp)
    elif type == 'neg':
        out = tf.math.negative(inp)
    elif type == 'sqrt':
        out = tf.math.sqrt(inp)
    elif type == 'rsqrt':
        out = tf.math.rsqrt(inp)
    elif type == 'sin':
        out = tf.math.sin(inp)
    elif type == 'cos':
        out = tf.math.cos(inp)
    elif type == 'ceil':
        out = tf.math.ceil(inp)
    elif type == 'round':
        out = tf.math.round(inp)
    elif type == 'floor':
        out = tf.math.floor(inp)
    else:
        print('Unary operator "%s" not supported!')
        exit(1)
    model = Model(inputs=[inp], outputs=[out])
    # Create data.
    np.random.seed(0)
    inp_tensor = np.random.randn(*input_shape).astype(np.float32)
    if type in ['log', 'sqrt', 'rsqrt']:
        inp_tensor = np.abs(inp_tensor) + 1
    out_tensor = model.predict(inp_tensor)
    # Save model.
    save_model(model, name)
    # Save data.
    save_tensor(inp_tensor, name + '.inp0')
    save_tensor(out_tensor, name + '.out0')
    # Clear session.
    keras_backend.clear_session()
Ejemplo n.º 28
0
        def T_regressor(inputs, num_landmarks):
            dropout_rate = 0.5
            net = layers.Conv2D(filters=32, kernel_size=3, strides=2, padding='same')(inputs) # (48, 48, 32)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1, 2])(net)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=32)
            net = branch_block(net, depth_strides=2, conv_filters=64) # (24, 24, 32)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=64)
            net = branch_block(net, depth_strides=2, conv_filters=64, pad=False) # (12, 12, 64)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=64)
            net = branch_block(net, depth_strides=2, conv_filters=64, pad=False) # (6, 6, 64)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=64)
            net = branch_block(net, depth_strides=2, conv_filters=128) # (3, 3, 64)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=128)
            net = conv_block(net, conv_filters=128)
            net = conv_block(net, conv_filters=128)

            net = layers.DepthwiseConv2D(kernel_size=3, strides=1, padding='valid',
                                         kernel_regularizer=tf.keras.regularizers.l2(0.01))(net)
            net = layers.Conv2D(filters=num_landmarks * 2, kernel_size=1, padding='same',
                                kernel_regularizer=tf.keras.regularizers.l2(0.01))(net)
            # (1, 1, num_landmarks)

            # reshape result
            return tf.reshape(net, [-1, num_landmarks, 2], name='landmark')
Ejemplo n.º 29
0
def conv_bn_relu(image_in, mask_in, filters, kernel_size, 
                 downsampling=1, upsampling=1, act="relu",
                 concat_img=None, concat_mask=None, reps=1):
    assert not (concat_img is None)^(concat_mask is None) # XORは常にFalse
    # Upsamplingする場合
    if upsampling > 1:
        conv = layers.Lambda(upsampling2d_tpu, arguments={"scale":upsampling})(image_in)
        mask = layers.Lambda(upsampling2d_tpu, arguments={"scale":upsampling})(mask_in)
    else:
        conv, mask = image_in, mask_in
    if concat_img is not None and concat_mask is not None:
        conv = layers.Concatenate()([conv, concat_img])
        mask = layers.Concatenate()([mask, concat_mask])
        # 計算量削減のために1x1 Convを入れる
        conv, mask = PConv2D(filters=filters, kernel_size=1)([conv, mask])
        conv = layers.BatchNormalization()(conv)
        conv = layers.Activation("relu")(conv)

    for i in range(reps):
        stride = downsampling if i == 0 else 1
        # strideでダウンサンプリング
        conv, mask = PConv2D(filters=filters, kernel_size=kernel_size, 
                             padding="same", strides=stride)([conv, mask])
        # Image側だけBN->ReLUを入れる
        conv = layers.BatchNormalization()(conv)
        if act == "relu":
            conv = layers.Activation("relu")(conv)
        elif act == "prelu":
            conv = layers.PReLU()(conv)
        elif act == "custom_tanh":
            # 元の画像の白を黒に変えるには、tanhの2倍のスケール[-2,2]が必要
            conv = layers.Lambda(lambda x: 2*K.tanh(x), name="unmasked")(conv)
    return conv, mask
Ejemplo n.º 30
0
def initial_block(input_tensor):
    """ENet initial block
    :param input_tensor: input tensor
    :return: initial block tensor
    """

    if backend.image_data_format() == 'channels_last':
        channel_axis = 3
    else:
        channel_axis = 1

    branch_conv = layers.Conv2D(
        filters=13,
        kernel_size=(3, 3),
        strides=(2, 2),
        padding='same',
        use_bias=False,
        kernel_initializer='he_normal',
        kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
        name='initial_block_conv')(input_tensor)
    branch_conv = layers.BatchNormalization(
        axis=channel_axis, name='initial_block_bn')(branch_conv)

    branch_pool = layers.MaxPool2D(pool_size=(2, 2),
                                   strides=(2, 2),
                                   padding='valid')(input_tensor)

    x = layers.Concatenate(axis=channel_axis)([branch_conv, branch_pool])
    x = layers.PReLU(alpha_initializer=PRELU_ALPHA)(x)

    return x