Example #1
0
                     activation='relu',
                     activity_regularizer='l2')(em)
# cnn2 = layers.MaxPooling1D(2,strides=2)(cnn2)
# cnn2 = layers.MaxPooling1D(2)(cnn2)
# drop2 = layers.Dropout(0.25)(cnn2)
cnn3 = layers.Conv1D(256,
                     kernel_size=5,
                     padding='same',
                     strides=1,
                     activation='relu',
                     activity_regularizer='l2')(em)
# cnn3 = layers.MaxPooling1D(2)(cnn3)
# drop3 = layers.Dropout(0.25)(cnn3)
# concat=layers.concatenate([drop1, drop2, drop3], axis=-1)
concat = layers.concatenate([cnn1, cnn2, cnn3], axis=-1)
maxpool = layers.GlobalMaxPooling1D()(concat)
flat = layers.Flatten()(maxpool)
dense = layers.Dropout(0.5)(flat)
dense = layers.Dense(64, activation='relu')(dense)
output = layers.Dense(2, activation='softmax')(dense)
model = tf.keras.models.Model(input, output)

one_hot_labels = keras.utils.to_categorical(train_label,
                                            num_classes=2)  #转为one hot编码
adamOpti = tf.keras.optimizers.Adam(0.001)
model.compile(loss='binary_crossentropy',
              optimizer=adamOpti,
              metrics=['accuracy'])

#callbacks_list=[#tf.keras.callbacks.EarlyStopping(monitor='accuracy',patience=1),
#tf.keras.callbacks.ModelCheckpoint(filepath="bestmodel.h5",monitor='val_loss',save_best_only=True),
Example #2
0
from tensorflow.keras.preprocessing import sequence

max_features = 2000
max_len = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)

model = keras.models.Sequential()
model.add(
    layers.Embedding(max_features, 128, input_length=max_len, name='embed'))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

callbacks = [
    keras.callbacks.TensorBoard(log_dir='logs',
                                histogram_freq=1,
                                embeddings_freq=1)
]
history = model.fit(x_train,
                    y_train,
                    epochs=20,
                    batch_size=128,
test_ds = test_ds.cache().prefetch(buffer_size=10)

from tensorflow.keras import layers

# A integer input for vocab indices.
inputs = tf.keras.Input(shape=(None,), dtype="int64")

# Next, we add a layer to map those vocab indices into a space of dimensionality
# 'embedding_dim'.
x = layers.Embedding(max_features, embedding_dim)(inputs)
x = layers.Dropout(0.5)(x)

# Conv1D + global max pooling
x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = layers.GlobalMaxPooling1D()(x)

# We add a vanilla hidden layer:
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)

# We project_gan_mnist onto a single unit output layer, and squash it with a sigmoid:
predictions = layers.Dense(1, activation="sigmoid", name="predictions")(x)

model = tf.keras.Model(inputs, predictions)

# Compile the model with binary crossentropy loss and an adam optimizer.
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])

epochs = 23
Example #4
0
def model_cnn(inp_shape=[1000, 4],
              embed_n=4,
              embed_dim=64,
              cnn_filters=[32],
              cnn_kernels=[2],
              cnn_dilations=[[1, 1, 1]],
              cnn_dropouts=[0.5],
              cnn_regularizers=None,
              pooling='local',
              max_pool=2,
              dense_regularizer=None,
              batchnormal=False):

    hparams = locals()
    ''' Input layer '''
    inp = Input(shape=inp_shape)
    ''' Embedding layer or not '''
    if embed_dim > 0:
        x = layers.Lambda(tokenize,
                          arguments={
                              'n': embed_n,
                              'padding': 'valid'
                          })(inp)
        x = layers.Embedding(4**embed_n,
                             embed_dim,
                             input_length=[1000 - embed_n + 1])(x)
    else:
        if embed_n == 1:
            x = inp
        else:
            x = layers.Lambda(tokenize,
                              arguments={
                                  'n': embed_n,
                                  'padding': 'valid'
                              })(inp)
            x = layers.Lambda(one_hot_layer, arguments={'n': 4**embed_n})(x)
    ''' Conv layers '''

    xs = [None] * len(cnn_kernels)
    if cnn_regularizers == None:
        cnn_regularizers = [None] * len(cnn_kernels)

    for i in range(len(cnn_kernels)):
        xs[i] = x
        for dil in cnn_dilations[i]:
            xs[i] = layers.Conv1D(cnn_filters[i],
                                  cnn_kernels[i],
                                  kernel_regularizer=cnn_regularizers[i],
                                  dilation_rate=dil)(xs[i])
            if batchnormal:
                xs[i] = layers.BatchNormalization(axis=-1,
                                                  momentum=0.9)(xs[i],
                                                                training=True)
            xs[i] = layers.Activation('relu')(xs[i])
            xs[i] = layers.Dropout(cnn_dropouts[i])(xs[i])
            xs[i] = layers.MaxPooling1D(pool_size=max_pool)(xs[i])

        if pooling == 'global':
            xs[i] = layers.GlobalMaxPooling1D()(xs[i])
        elif pooling == 'local':
            xs[i] = layers.Flatten()(xs[i])
        else:
            raise Exception('Unsupported param: {0}'.format(pooling))
    ''' Concatenate all vectors into one '''
    if len(xs) == 1:
        x = xs[0]
    else:
        x = layers.concatenate(xs, axis=-1)
    ''' Dense layer '''
    x = layers.Dense(1, kernel_regularizer=dense_regularizer)(x)
    outp = layers.Activation('sigmoid')(x)
    ''' Get model '''
    return models.Model(inp, outp), hparams
Example #5
0
    model = keras.Model(inputs=input_tensor, outputs=x, name='TreeRNN')
    return model


# %% define network
MLP2 = L.Conv2D(32, (1, 2), padding='valid', activation='tanh', name='MLP2')

inputs1 = L.Input(shape=list(x_train.shape)[1:], name='Input')

x = inputs1
x = MLP2(x)

x = TreeRNN(x, 32)(x)

x = L.GlobalMaxPooling1D()(x)

x = L.Dense(2, activation='softmax', name='mapping')(x)

outputs = x
model = keras.Model(inputs=inputs1, outputs=outputs)
model.summary()

opti = keras.optimizers.Adam(1e-3)
model.compile(opti, loss='categorical_crossentropy', metrics=['acc'])
#%%
ckpt = keras.callbacks.ModelCheckpoint(filepath='model.h5',
                                       save_weights_only=False,
                                       monitor='val_acc')
history = model.fit(x=train_loader,
                    validation_data=test_loader,
Example #6
0
    def build(self):
        query_input = self.new_query_input()
        title_input = self.new_title_input()
        ingredients_input = self.new_ingredients_input()
        description_input = self.new_description_input()
        country_input = self.new_country_input()
        doc_id_input = self.new_doc_id_input()
        inputs = [
            query_input, title_input, ingredients_input, description_input,
            country_input, doc_id_input
        ]

        word_embedding = layers.Embedding(self.total_words,
                                          self.embedding_dim,
                                          name='word_embedding')
        query = layers.GlobalMaxPooling1D()(word_embedding(query_input))
        title = layers.GlobalMaxPooling1D()(word_embedding(title_input))
        ingredients = layers.GlobalMaxPooling1D()(
            word_embedding(ingredients_input))
        description = layers.GlobalMaxPooling1D()(
            word_embedding(description_input))
        country_embedding = layers.Embedding(self.total_countries,
                                             self.embedding_dim)
        country = country_embedding(country_input)
        country = tf.reshape(country, shape=(
            -1,
            self.embedding_dim,
        ))
        image_embedding = self.load_pretrained_embedding(
            embedding_filepath=
            f'{project_dir}/data/raw/en_2020-03-16T00_04_34_recipe_image_tagspace5000_300.pkl',
            embedding_dim=300,
            name='image_embedding')
        image = image_embedding(doc_id_input)
        image = tf.reshape(image, shape=(
            -1,
            300,
        ))
        image = layers.Dropout(.2)(image)
        image = layers.Dense(self.embedding_dim)(image)
        input_features = [
            query, title, ingredients, description, country, image
        ]

        query_title = layers.Dot(axes=1)([query, title])
        query_ingredients = layers.Dot(axes=1)([query, ingredients])
        query_description = layers.Dot(axes=1)([query, description])
        query_country = layers.Dot(axes=1)([query, country])
        query_image = layers.Dot(axes=1)([query, image])
        interactions = layers.Add()([
            query_title, query_ingredients, query_description, query_country,
            query_image
        ])

        features = []
        for feature in input_features:
            feature = layers.Dense(1, activation='relu')(feature)
            features.append(feature)
        features = layers.Add()(features)
        features = AddBias0()(features)

        output = layers.Activation('sigmoid',
                                   name='label')(features + interactions)
        return tf.keras.Model(inputs=inputs, outputs=output, name=self.name)
Example #7
0
def MnasNet(include_top=True,
            weights='hasc',
            input_shape=None,
            pooling=None,
            classes=6,
            classifier_activation='softmax',
            alpha=1.0,
            depth_multiplier=1):
    if input_shape is None:
        input_shape = (256 * 3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    inputs = layers.Input(shape=input_shape)

    first_block_filters = _make_divisible(32 * alpha, 8)
    x = ConvBlock(2, first_block_filters)(inputs)

    x = SepConvBlock(16, alpha, 16, depth_multiplier)(x)

    x = InvertedResBlock(kernel=3,
                         expansion=3,
                         stride=2,
                         alpha=alpha,
                         filters=24,
                         block_id=1)(x)
    x = InvertedResBlock(kernel=3,
                         expansion=3,
                         stride=1,
                         alpha=alpha,
                         filters=24,
                         block_id=2)(x)
    x = InvertedResBlock(kernel=3,
                         expansion=3,
                         stride=1,
                         alpha=alpha,
                         filters=24,
                         block_id=3)(x)

    x = InvertedResBlock(kernel=5,
                         expansion=3,
                         stride=2,
                         alpha=alpha,
                         filters=40,
                         block_id=4)(x)
    x = InvertedResBlock(kernel=5,
                         expansion=3,
                         stride=1,
                         alpha=alpha,
                         filters=40,
                         block_id=5)(x)
    x = InvertedResBlock(kernel=5,
                         expansion=3,
                         stride=1,
                         alpha=alpha,
                         filters=40,
                         block_id=6)(x)

    x = InvertedResBlock(kernel=5,
                         expansion=6,
                         stride=2,
                         alpha=alpha,
                         filters=80,
                         block_id=7)(x)
    x = InvertedResBlock(kernel=5,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=80,
                         block_id=8)(x)
    x = InvertedResBlock(kernel=5,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=80,
                         block_id=9)(x)

    x = InvertedResBlock(kernel=3,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=96,
                         block_id=10)(x)
    x = InvertedResBlock(kernel=3,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=96,
                         block_id=11)(x)

    x = InvertedResBlock(kernel=5,
                         expansion=6,
                         stride=2,
                         alpha=alpha,
                         filters=192,
                         block_id=12)(x)
    x = InvertedResBlock(kernel=5,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=192,
                         block_id=13)(x)
    x = InvertedResBlock(kernel=5,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=192,
                         block_id=14)(x)
    x = InvertedResBlock(kernel=5,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=192,
                         block_id=15)(x)

    x = InvertedResBlock(kernel=3,
                         expansion=6,
                         stride=1,
                         alpha=alpha,
                         filters=320,
                         block_id=16)(x)

    x = layers.GlobalAveragePooling1D()(x)
    y = layers.Dense(classes,
                     activation=classifier_activation,
                     use_bias=True,
                     name="prediction")(x)

    model = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/mnasnet/mnasnet_hasc_weights_{}_{}.hdf5'.format(
                int(input_shape[0]), int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-3].output)

    return model
Example #8
0
x_test = sequence.pad_sequences(x_test, maxlen=max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

input_tensor = layers.Input((max_len, ))
'''
in the Embedding layer:
input_dim = num of categories = max_features in our case (the distinct number of
words that we are examining in this problem).
output_dim = embedding_size = 128.
'''
kmodel = layers.Embedding(max_features, 128)(input_tensor)
kmodel = layers.Conv1D(32, 7, activation='relu')(kmodel)
kmodel = layers.MaxPooling1D(5)(kmodel)
kmodel = layers.Conv1D(32, 7, activation='relu')(kmodel)
kmodel = layers.GlobalMaxPooling1D()(
    kmodel)  # ends with either this or Flatten() to turn the 3D
# inputs into 2D outputs, allowing us to add one or
# more Dense layers to the model for classification
# or regression.
output_tensor = layers.Dense(1, activation='sigmoid')(kmodel)
model = models.Model(input_tensor, output_tensor)
model.summary()

model.compile(optimizer=RMSprop(lr=1e-4),
              loss='binary_crossentropy',
              metrics=['acc'])
history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=128,
                    validation_split=0.2)
def get_tail_concat_model(DATA, predict_age=True, predict_gender=False):
    # shape:(sequence长度, )
    # first input
    input_creative_id = Input(shape=(None,), name='creative_id')
    x1 = Embedding(input_dim=NUM_creative_id+1,
                   output_dim=128,
                   weights=[DATA['creative_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_creative_id,
                   mask_zero=True)(input_creative_id)
    for _ in range(args.num_lstm):
        x1 = Bidirectional(LSTM(256, return_sequences=True))(x1)
    x1 = layers.GlobalMaxPooling1D()(x1)

    # second input
    input_ad_id = Input(shape=(None,), name='ad_id')
    x2 = Embedding(input_dim=NUM_ad_id+1,
                   output_dim=128,
                   weights=[DATA['ad_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_ad_id,
                   mask_zero=True)(input_ad_id)
    for _ in range(args.num_lstm):
        x2 = Bidirectional(LSTM(256, return_sequences=True))(x2)
    x2 = layers.GlobalMaxPooling1D()(x2)

    # third input
    input_product_id = Input(shape=(None,), name='product_id')
    x3 = Embedding(input_dim=NUM_product_id+1,
                   output_dim=128,
                   weights=[DATA['product_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_product_id,
                   mask_zero=True)(input_product_id)
    for _ in range(args.num_lstm):
        x3 = Bidirectional(LSTM(256, return_sequences=True))(x3)
    x3 = layers.GlobalMaxPooling1D()(x3)

    # third input
    input_advertiser_id = Input(shape=(None,), name='advertiser_id')
    x4 = Embedding(input_dim=NUM_advertiser_id+1,
                   output_dim=128,
                   weights=[DATA['advertiser_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_advertiser_id,
                   mask_zero=True)(input_advertiser_id)
    for _ in range(args.num_lstm):
        x4 = Bidirectional(LSTM(256, return_sequences=True))(x4)
    x4 = layers.GlobalMaxPooling1D()(x4)

    # third input
    input_industry = Input(shape=(None,), name='industry')
    x5 = Embedding(input_dim=NUM_industry+1,
                   output_dim=128,
                   weights=[DATA['industry_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_industry,
                   mask_zero=True)(input_industry)
    for _ in range(args.num_lstm):
        x5 = Bidirectional(LSTM(256, return_sequences=True))(x5)
    x5 = layers.GlobalMaxPooling1D()(x5)

    # third input
    input_product_category = Input(shape=(None,), name='product_category')
    x6 = Embedding(input_dim=NUM_product_category+1,
                   output_dim=128,
                   weights=[DATA['product_category_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_product_category,
                   mask_zero=True)(input_product_category)
    for _ in range(args.num_lstm):
        x6 = Bidirectional(LSTM(256, return_sequences=True))(x6)
    x6 = layers.GlobalMaxPooling1D()(x6)

    x = layers.Concatenate(axis=1)([x1, x2, x3, x4, x5, x6])
    # x = layers.GlobalMaxPooling1D()(x)

    if predict_age and predict_gender:
        output_gender = Dense(2, activation='softmax', name='gender')(x)
        output_age = Dense(10, activation='softmax', name='age')(x)
        output_y = [output_gender, output_age]
    elif predict_age:
        output_y = Dense(10, activation='softmax', name='age')(x)
    elif predict_gender:
        output_y = Dense(2, activation='softmax', name='gender')(x)

    model = Model(
        [
            input_creative_id, input_ad_id, input_product_id,
            input_advertiser_id, input_industry, input_product_category
        ],
        output_y)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam', metrics=['accuracy'])

    model.summary()
    return model
Example #10
0
def DenseNet(number,
             include_top=True,
             weights='hasc',
             input_shape=None,
             pooling=None,
             classes=6,
             classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256 * 3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    # number of blocks
    if number == 121:
        blocks = [6, 12, 24, 16]
    elif number == 169:
        blocks = [6, 12, 32, 32]
    elif number == 201:
        blocks = [6, 12, 48, 32]
    else:
        raise ValueError('`number` should be 121, 169 or 201')

    inputs = layers.Input(shape=input_shape)

    x = layers.ZeroPadding1D(padding=(3, 3))(inputs)
    x = layers.Conv1D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(epsilon=1.001e-5, name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding1D(padding=(1, 1))(x)
    x = layers.MaxPooling1D(3, strides=2, name='pool1')(x)

    x = DenseBlock(blocks[0], name='conv2')(x)
    x = TransitionBlock(0.5, name='pool2')(x)
    x = DenseBlock(blocks[1], name='conv3')(x)
    x = TransitionBlock(0.5, name='pool3')(x)
    x = DenseBlock(blocks[2], name='conv4')(x)
    x = TransitionBlock(0.5, name='pool4')(x)
    x = DenseBlock(blocks[3], name='conv5')(x)

    x = layers.BatchNormalization(epsilon=1.001e-5, name='bn')(x)
    x = layers.Activation('relu', name='relu')(x)

    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes,
                     activation=classifier_activation,
                     name='predictions')(x)

    # Create model.
    model_ = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/densenet{}/densenet{}_hasc_weights_{}_{}.hdf5'.format(
                number, number, int(input_shape[0]), int(input_shape[1]))
        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model_.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model_ = Model(inputs=model_.input,
                           outputs=model_.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model_.layers[-3].output)
            model_ = Model(inputs=model_.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model_.layers[-3].output)
            model_ = Model(inputs=model_.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model_ = Model(inputs=model_.input,
                           outputs=model_.layers[-3].output)

    return model_
Example #11
0
def Xception(include_top=True, weights='hasc', input_shape=None, pooling=None, classes=6, classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256*3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    inputs = layers.Input(shape=input_shape)

    x = layers.Conv1D(32, 3, strides=2, use_bias=False, name='block1_conv1')(inputs)
    x = layers.BatchNormalization(name='block1_conv1_bn')(x)
    x = layers.Activation('relu', name='block1_conv1_act')(x)
    x = layers.Conv1D(64, 3, use_bias=False, name='block1_conv2')(x)
    x = layers.BatchNormalization(name='block1_conv2_bn')(x)
    x = layers.Activation('relu', name='block1_conv2_act')(x)

    residual = layers.Conv1D(
        128, 1, strides=2, padding='same', use_bias=False
    )(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.SeparableConv1D(128, 3, padding='same', use_bias=False, name='block2_sepconv1')(x)
    x = layers.BatchNormalization(name='block2_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block2_sepconv2_act')(x)
    x = layers.SeparableConv1D(128, 3, padding='same', use_bias=False, name='block2_sepconv2')(x)
    x = layers.BatchNormalization(name='block2_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv1D(
        256, 1, strides=2, padding='same', use_bias=False
    )(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block3_sepconv1_act')(x)
    x = layers.SeparableConv1D(256, 3, padding='same', use_bias=False, name='block3_sepconv1')(x)
    x = layers.BatchNormalization(name='block3_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block3_sepconv2_act')(x)
    x = layers.SeparableConv1D(256, 3, padding='same', use_bias=False, name='block3_sepconv2')(x)
    x = layers.BatchNormalization(name='block3_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv1D(728, 1, strides=2, padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block4_sepconv1_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block4_sepconv1')(x)
    x = layers.BatchNormalization(name='block4_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block4_sepconv2_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block4_sepconv2')(x)
    x = layers.BatchNormalization(name='block4_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = layers.Activation('relu', name=prefix + "_sepconv1_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv1")(x)
        x = layers.BatchNormalization(name=prefix + "_sepconv1_bn")(x)
        x = layers.Activation('relu', name=prefix + "_sepconv2_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv2")(x)
        x = layers.BatchNormalization(name=prefix + "_sepconv2_bn")(x)
        x = layers.Activation('relu', name=prefix + "_sepconv3_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv3")(x)

        x = layers.add([x, residual])

    residual = layers.Conv1D(1024, 1, strides=2, padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block13_sepconv1_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block13_sepconv1')(x)
    x = layers.BatchNormalization(name='block13_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block13_speconv2_act')(x)
    x = layers.SeparableConv1D(1024, 3, padding='same', use_bias=False, name='block13_sepconv2')(x)
    x = layers.BatchNormalization(name='block13_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same')(x)
    x = layers.add([x, residual])

    x = layers.SeparableConv1D(1536, 3, padding='same', use_bias=False, name='block14_sepconv1')(x)
    x = layers.BatchNormalization(name='block14_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv1_act')(x)

    x = layers.SeparableConv1D(2048, 3, padding='same', use_bias=False, name='block14_sepconv2')(x)
    x = layers.BatchNormalization(name='block14_sepconv2_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv2_act')(x)

    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes, activation=classifier_activation,
                     name='predictions')(x)

    model = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/xception/xception_hasc_weights_{}_{}.hdf5'.format(int(input_shape[0]),
                                                                                 int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-3].output)

    return model
def get_text_encoding_layer(name,
                            dataset,
                            max_features=100,
                            max_len=20,
                            embedding_dim=20,
                            layer_type="bi_rnn",
                            n_units=20,
                            dropout=0.3):
    """Creates everything that is needed for a textual input pipeline.

    Args:
        name (string): name of the feature
        dataset (tf.DataSet): tensorflow dataset
        max_features (int, optional): Maximum vocab size. Defaults to 100.
        max_len (int, optional): Sequence length to pad the outputs to. Defaults to 20.
        embedding_dim (int, optional): Embedding dimension. Defaults to 20.
        layer_type (str, optional): layer type to use (e.g., rnn, bi_rnn, gru, cnn). Defaults to "bi_rnn".
        n_units (int, optional): number of units within that layer. Defaults to 20.
        dropout (float, optional): dropout percentage. Defaults to 0.3.

    Returns:
        lambda function: textual input pipeline
    """
    # max_features = 100  # Maximum vocab size.
    # max_len = 20  # Sequence length to pad the outputs to.
    # embedding_dim = 20

    vec_layer = TextVectorization(max_tokens=max_features,
                                  output_mode='int',
                                  output_sequence_length=max_len)

    text_ds = dataset.map(lambda x, y: x[name])

    vec_layer.adapt(text_ds)

    embedding_layer = layers.Embedding(max_features + 1, embedding_dim)

    dropout_layer = layers.Dropout(0.4)

    if layer_type == "cnn":
        # Conv1D + global max pooling
        conv_layer = layers.Conv1D(n_units,
                                   5,
                                   padding='valid',
                                   activation='relu',
                                   strides=3)
        max_pooling_layer = layers.GlobalMaxPooling1D()

        return lambda feature: max_pooling_layer(
            conv_layer(dropout_layer(embedding_layer(vec_layer(feature)))))

    elif layer_type == "rnn":
        rnn_layer = layers.SimpleRNN(units=n_units,
                                     activation='relu',
                                     dropout=dropout)

        return lambda feature: rnn_layer(
            dropout_layer(embedding_layer(vec_layer(feature))))

    elif layer_type == "bi_rnn":
        bi_rnn_layer = layers.Bidirectional(
            layers.SimpleRNN(units=n_units, activation='relu',
                             dropout=dropout))

        return lambda feature: bi_rnn_layer(
            dropout_layer(embedding_layer(vec_layer(feature))))

    elif layer_type == "gru":
        gru_layer = layers.Bidirectional(
            layers.GRU(units=n_units, activation='relu', dropout=dropout))

        return lambda feature: gru_layer(
            dropout_layer(embedding_layer(vec_layer(feature))))
Example #13
0
    def make_gru_network(self):

        x0 = tf.keras.Input(shape=[None, 6, self.num_channels])

        num_channels = self.num_channels

        # # x_spc = tf.math.reduce_max(x0[:,:,:,num_channels:], axis=1)
        # # x_spc = layers.Flatten()(x_spc)
        # # x_spc = layers.Dense(1, activation=None, name='fit_spacing')(x_spc)

        x = layers.Masking(mask_value=-1.0)(x0)
        # x = tf.multiply(x, x_spc)
        # x = layers.BatchNormalization()(x)

        x_0 = layers.Reshape([-1, num_channels])(x[:, :, 0, :])
        x_1 = layers.Reshape([-1, num_channels])(x[:, :, 1, :])
        x_2 = layers.Reshape([-1, num_channels])(x[:, :, 2, :])
        x_3 = layers.Reshape([-1, num_channels])(x[:, :, 3, :])
        x_4 = layers.Reshape([-1, num_channels])(x[:, :, 4, :])
        x_5 = layers.Reshape([-1, num_channels])(x[:, :, 5, :])

        # x_0, w_a0_fwd, w_a0_bwd = GruAtt(units=1024, drop_prob=self.drop_prob)(x_0)
        # x_1, w_a1_fwd, w_a1_bwd = GruAtt(units=1024, drop_prob=self.drop_prob)(x_1)
        # x_2, w_a2_fwd, w_a2_bwd = GruAtt(units=1024, drop_prob=self.drop_prob)(x_2)
        # x_3, w_a3_fwd, w_a3_bwd = GruAtt(units=1024, drop_prob=self.drop_prob)(x_3)
        # x_4, w_a4_fwd, w_a4_bwd = GruAtt(units=1024, drop_prob=self.drop_prob)(x_4)
        # x_5, w_a5_fwd, w_a5_bwd = GruAtt(units=1024, drop_prob=self.drop_prob)(x_5)

        # x_0, w_a0 = GruAtt(drop_prob=self.drop_prob)(x_0)
        # x_1, w_a1 = GruAtt(drop_prob=self.drop_prob)(x_1)
        # x_2, w_a2 = GruAtt(drop_prob=self.drop_prob)(x_2)
        # x_3, w_a3 = GruAtt(drop_prob=self.drop_prob)(x_3)
        # x_4, w_a4 = GruAtt(drop_prob=self.drop_prob)(x_4)
        # x_5, w_a5 = GruAtt(drop_prob=self.drop_prob)(x_5)

        x_0 = TransformerEncoder(units=num_channels,
                                 drop_prob=self.drop_prob)(x_0)
        x_1 = TransformerEncoder(units=num_channels,
                                 drop_prob=self.drop_prob)(x_1)
        x_2 = TransformerEncoder(units=num_channels,
                                 drop_prob=self.drop_prob)(x_2)
        x_3 = TransformerEncoder(units=num_channels,
                                 drop_prob=self.drop_prob)(x_3)
        x_4 = TransformerEncoder(units=num_channels,
                                 drop_prob=self.drop_prob)(x_4)
        x_5 = TransformerEncoder(units=num_channels,
                                 drop_prob=self.drop_prob)(x_5)

        x_0 = layers.GlobalMaxPooling1D()(x_0)
        x_1 = layers.GlobalMaxPooling1D()(x_1)
        x_2 = layers.GlobalMaxPooling1D()(x_2)
        x_3 = layers.GlobalMaxPooling1D()(x_3)
        x_4 = layers.GlobalMaxPooling1D()(x_4)
        x_5 = layers.GlobalMaxPooling1D()(x_5)

        x_0 = tf.expand_dims(x_0, axis=1)
        x_1 = tf.expand_dims(x_1, axis=1)
        x_2 = tf.expand_dims(x_2, axis=1)
        x_3 = tf.expand_dims(x_3, axis=1)
        x_4 = tf.expand_dims(x_4, axis=1)
        x_5 = tf.expand_dims(x_5, axis=1)

        x = layers.Concatenate(axis=1)([x_0, x_1, x_2, x_3, x_4, x_5])
        # x, w_a = GruAtt(units=4096, drop_prob=self.drop_prob)(x)
        x = TransformerEncoder(units=num_channels, drop_prob=self.drop_prob)(x)
        # x_spc = x0[:,0,0,512:513]
        # x = tf.multiply(x, x_spc)
        x = layers.GlobalMaxPooling1D()(x)

        x = layers.Dense(1, activation='sigmoid', name='prediction')(x)
        x = tf.math.add(tf.math.multiply(x, 180.0), 100.0)

        # x_e, x_h_fwd, x_h_bwd = layers.Bidirectional(layers.GRU(units=512, activation='tanh', use_bias=False, kernel_initializer="glorot_normal", dropout=self.drop_prob, return_sequences=True, return_state=True), name="bi_gru0")(x)
        # x_e = layers.Dropout(self.drop_prob)(x_e)
        # x_h_fwd = layers.Dropout(self.drop_prob)(x_h_fwd)
        # x_h_bwd = layers.Dropout(self.drop_prob)(x_h_bwd)

        # x_a_fwd, w_a_fwd = BahdanauAttention(1024)(x_h_fwd, x_e)
        # x_a_bwd, w_a_bwd = BahdanauAttention(1024)(x_h_bwd, x_e)

        # x = tf.concat([x_h_fwd, x_a_fwd, x_h_bwd, x_a_bwd], axis=-1)

        # x = layers.Dense(1, activation='sigmoid', name='prediction')(x)
        # x = tf.math.add(tf.math.multiply(x, 240.0), 40.0)

        return tf.keras.Model(inputs=x0, outputs=x)
Example #14
0
def create_model(filters, kernel_size, dilation_rate, num_res_blocks,
                 lookback_window):
    '''Get the model
    '''
    #Model inputs
    inputs = layers.Input(shape=(lookback_window, 5))
    #Initial convolution
    conv = layers.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         dilation_rate=dilation_rate,
                         padding="same")(inputs)

    #Resnet
    def resnet(x, num_res_blocks):
        """Builds a resnet with 1D convolutions of the defined depth.
        """
        # Instantiate the stack of residual units
        #Similar to ProtCNN, but they used batch_size = 64, 2000 filters and kernel size of 21
        for res_block in range(num_res_blocks):
            #block
            c1 = layers.Conv1D(filters=filters,
                               kernel_size=kernel_size,
                               dilation_rate=dilation_rate,
                               padding="same")(x)
            b1 = layers.BatchNormalization()(
                c1)  #Bacth normalize, focus on segment
            a1 = layers.Activation('relu')(b1)
            c2 = layers.Conv1D(filters=filters,
                               kernel_size=kernel_size,
                               dilation_rate=dilation_rate,
                               padding="same")(a1)
            b2 = layers.BatchNormalization()(
                c2)  #Bacth normalize, focus on segment
            #Skip connection
            s1 = tf.math.add(x, b2)  #Skip connection
            x = layers.Activation('relu')(s1)

            return x

    #Apply resnet
    if num_res_blocks >= 1:
        conv = resnet(conv, num_res_blocks)

    #Maxpool along sequence axis
    maxpool = layers.GlobalMaxPooling1D()(conv)
    #Flatten
    out = layers.Flatten()(maxpool)
    outputs = layers.Dense(1, activation="tanh")(out)

    model = tf.keras.Model(inputs, outputs)
    #Optimizer
    initial_learning_rate = 1e-3
    lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate,
        decay_steps=500,
        decay_rate=0.96,
        staircase=True)

    opt = tf.keras.optimizers.Adam(learning_rate=1e-3, amsgrad=True)

    #Compile
    model.compile(optimizer=opt, loss='mae')

    return model
Example #15
0
 def doublePoolClassCNN(self,
                        n_tokens,
                        convolutions,
                        dense,
                        conv_act=None,
                        conv_bias=True,
                        dense_bias=True,
                        input_type="trainable",
                        embedding_dim=None,
                        regul_dense_only=True,
                        dropout_rate=0,
                        optimizer="rmsprop"):
     """
     Classificating CNN with both max and average 
     global pooling operations
     Parameters:
     - convolutions  -- specification of convolutional layers
                        as list of tuples:
                        either (kernel width, n filters, stride)
                        or (kernel width, n filters) if stride = 1
     - dense         -- specification of dense layers as list of
                        their width except the last one
     - conv_act      -- activation of convolutional layers
     - conv_bias     -- use bias for convolutional layers
     - dense_bias    -- use bias for dense layers
     - input_type    -- type of input layer:
                        - categorical - categorical coding 
                                        decoded by preprocessing layer
                        - trainable   - categorical coding processed
                                        by trainable embedding layer
     - embedding_dim -- size of embedding vectors
                        It effects trainable embedding only,
                        i.e. input_type == "trainable"
     - dropout_rate  -- dropout rate for inserted dropout layer
                        If it is 0 or None no droput layer is inserted
     - optimizer   -- training optimizer
     - regul_dense_only -- flag to regulirize only dense layers
     Returns:
     - compiled dnn
     """
     _input = layers.Input(shape=(None, ))
     _maker = SeqBlockFactory(n_tokens)
     _conv_tower1 = _maker.convTower(convolutions,
                                     conv_act=conv_act,
                                     out_no_act=True,
                                     conv_bias=conv_bias,
                                     input_type=input_type,
                                     regularizer=self._regularizer,
                                     embedding_dim=embedding_dim,
                                     regul_dense_only=regul_dense_only)
     _conv_tower2 = _maker.convTower(convolutions,
                                     conv_act=conv_act,
                                     out_no_act=True,
                                     conv_bias=conv_bias,
                                     input_type=input_type,
                                     regularizer=self._regularizer,
                                     embedding_dim=embedding_dim,
                                     regul_dense_only=regul_dense_only)
     _conv1 = _conv_tower1(_input)
     _conv2 = _conv_tower2(_input)
     _max_pool = layers.GlobalMaxPooling1D()(_conv1)
     _aver_pool = \
         layers.GlobalAveragePooling1D()(_conv2)
     #keras.layers.Lambda(lambda x: keras.backend.stop_gradient(x))(_conv))
     _merged = layers.concatenate([_max_pool, _aver_pool], axis=-1)
     if dropout_rate:
         _merged = layers.Dropout(dropout_rate,
                                  seed=UniqueSeed.getSeed())(_merged)
     _output = self._denseClassifier(_merged, dense)
     return self._compile(_input, _output, optimizer=optimizer)
def get_head_concat_model(DATA):
    # shape:(sequence长度, )
    # first input
    input_creative_id = Input(shape=(None,), name='creative_id')
    x1 = Embedding(input_dim=NUM_creative_id+1,
                   output_dim=128,
                   weights=[DATA['creative_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_creative_id,
                   mask_zero=True)(input_creative_id)

    input_ad_id = Input(shape=(None,), name='ad_id')
    x2 = Embedding(input_dim=NUM_ad_id+1,
                   output_dim=128,
                   weights=[DATA['ad_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_ad_id,
                   mask_zero=True)(input_ad_id)

    input_product_id = Input(shape=(None,), name='product_id')
    x3 = Embedding(input_dim=NUM_product_id+1,
                   output_dim=128,
                   weights=[DATA['product_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_product_id,
                   mask_zero=True)(input_product_id)

    input_advertiser_id = Input(shape=(None,), name='advertiser_id')
    x4 = Embedding(input_dim=NUM_advertiser_id+1,
                   output_dim=128,
                   weights=[DATA['advertiser_id_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_advertiser_id,
                   mask_zero=True)(input_advertiser_id)

    input_industry = Input(shape=(None,), name='industry')
    x5 = Embedding(input_dim=NUM_industry+1,
                   output_dim=128,
                   weights=[DATA['industry_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_industry,
                   mask_zero=True)(input_industry)

    input_product_category = Input(shape=(None,), name='product_category')
    x6 = Embedding(input_dim=NUM_product_category+1,
                   output_dim=128,
                   weights=[DATA['product_category_emb']],
                   trainable=args.not_train_embedding,
                   input_length=LEN_product_category,
                   mask_zero=True)(input_product_category)

    x = Concatenate(axis=2)([x1, x2, x3, x4, x5, x6])

    for _ in range(args.num_lstm):
        x = Bidirectional(LSTM(128, return_sequences=True))(x)
    x = layers.GlobalMaxPooling1D()(x)
    # x = layers.GlobalAvaregePooling1D()(x)

    output_gender = Dense(2, activation='softmax', name='gender')(x)
    output_age = Dense(10, activation='softmax', name='age')(x)

    model = Model(
        [
            input_creative_id,
            input_ad_id,
            input_product_id,
            input_advertiser_id,
            input_industry,
            input_product_category
        ],
        [
            output_gender,
            output_age
        ]
    )
    model.compile(
        optimizer=optimizers.Adam(1e-4),
        loss={'gender': losses.CategoricalCrossentropy(from_logits=False),
              'age': losses.CategoricalCrossentropy(from_logits=False)},
        loss_weights=[0.5, 0.5],
        metrics=['accuracy'])
    model.summary()

    return model
Example #17
0
def create_cnn_model(vocab_size,
                     h=5,
                     s=3,
                     strides=2,
                     m=200,
                     embed_size=128,
                     p=0.2):
    """
    :param vocab_size: size of the vocabulary known (word count)
    :param h: filter window size for the convolutional layers
    :param s: pooling size for the first max-pooling layer
    :param strides: strides for the first max-pooling layer
    :param m: filter count
    :param embed_size: size of the embedding vector
    :param p: dropout probability
    :return: built uncompiled model
    """
    model = tf.keras.Sequential()
    # embedding
    # The first layer of the network consists of a lookup table where the word embeddings are represented
    # mention in the article (page 3) Word Embeddings: we create the word embeddings in phase P 1 using word2vec.
    model.add(layers.Embedding(vocab_size, embed_size))

    # dropout
    # Due to overfitting, this layer was added after input layer, with drop rate 0.2
    # In the article, page 2, Dropout:
    # Dropout is an alternative technique used to reduce overfitting
    # We apply Dropout to the hidden layer and to the input layer using p = 0.2 in both cases
    model.add(layers.Dropout(rate=p))

    # first convolutional layer
    # - About filter parameters and window size
    # In the article (page 2) Convolutional layer:
    # This layer, have a set of m filters is applied to a sliding window of length h over each sentence
    # In the article (page 2) parameters:
    # For both convolutional layers we set the length of the sliding window h to 5.
    # And the number of filters m is set to 200 in both convolutional layers.
    # The activation function in this layer is relu
    # The output of the convolutional layer is passed through a non-linear activation function
    model.add(layers.Conv1D(filters=m, kernel_size=h, activation=tf.nn.relu))

    #Max pooling
    # In the article (page 2) Max pooling:
    # Where s is the length of each interval. In the case of overlapping intervals with a stride value
    # In the article (page 2) parameters:
    # pooling interval s is set to 3 in both layers, where we use a striding of 2 in the first layer
    model.add(layers.MaxPool1D(pool_size=s, strides=strides))
    # second convolutional layer
    # similar to first layer
    model.add(layers.Conv1D(filters=m, kernel_size=h, activation=tf.nn.relu))
    # this time we use a global max-pooling to reduce the data

    model.add(layers.GlobalMaxPooling1D())
    # hidden
    # - About the type of layer
    # In the article (page 2) Hidden layer:
    # A fully connected hidden layer computes the transformation
    # In the article (page 2) Hidden layer:
    # We use with rectified linear (relu) activation function
    # - About the node units
    # the number of filters m is set to 200 in both convolutional layers.
    model.add(layers.Dense(m, activation=tf.nn.relu))
    # dropout
    # We apply Dropout to the hidden layer and to the input layer using p = 0.2 in both cases
    model.add(layers.Dropout(rate=p))
    # dense
    # using sigmoid function
    # In the article (page 2) they use Softmax amd we use sigmoid function:
    # Finally, the outputs of the hidden layer
    model.add(layers.Dense(1, activation=tf.nn.sigmoid))
    return model
Example #18
0
def InceptionResNetV2(include_top=True,
                      weights='hasc',
                      input_shape=None,
                      pooling=None,
                      classes=6,
                      classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256 * 3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    inputs = layers.Input(shape=input_shape)

    # stem block
    x = Conv1DBN(32, 3, strides=2, padding='valid')(inputs)
    x = Conv1DBN(32, 3, padding='valid')(x)
    x = Conv1DBN(64, 3)(x)
    x = layers.MaxPooling1D(3, strides=2)(x)
    x = Conv1DBN(80, 1, padding='valid')(x)
    x = Conv1DBN(192, 3, padding='valid')(x)
    x = layers.MaxPooling1D(3, strides=2)(x)

    # Mixed 5b (Inception-A block)
    branch_0 = Conv1DBN(96, 1)(x)
    branch_1 = Conv1DBN(48, 1)(x)
    branch_1 = Conv1DBN(64, 5)(branch_1)
    branch_2 = Conv1DBN(64, 1)(x)
    branch_2 = Conv1DBN(96, 3)(branch_2)
    branch_2 = Conv1DBN(96, 3)(branch_2)
    branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
    branch_pool = Conv1DBN(64, 1)(branch_pool)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = layers.Concatenate(name='mixed_5b')(branches)

    # 10x block35 (Inception-ResNet-A block)
    for block_idx in range(1, 11):
        x = InceptionResNetBlock(scale=0.17,
                                 block_type='block35',
                                 block_idx=block_idx)(x)

    # Mixed 6a (Reduction-A block)
    branch_0 = Conv1DBN(384, 3, strides=2, padding='valid')(x)
    branch_1 = Conv1DBN(256, 1)(x)
    branch_1 = Conv1DBN(256, 3)(branch_1)
    branch_1 = Conv1DBN(384, 3, strides=2, padding='valid')(branch_1)
    branch_pool = layers.MaxPooling1D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    x = layers.Concatenate(name='mixed_6a')(branches)

    # 20x block17 (Inception-ResNet-B block)
    for block_idx in range(1, 21):
        x = InceptionResNetBlock(scale=0.1,
                                 block_type='block17',
                                 block_idx=block_idx)(x)

    # Mixed 7a (Reduction-B block)
    branch_0 = Conv1DBN(256, 1)(x)
    branch_0 = Conv1DBN(384, 3, strides=2, padding='valid')(branch_0)
    branch_1 = Conv1DBN(256, 1)(x)
    branch_1 = Conv1DBN(288, 3, strides=2, padding='valid')(branch_1)
    branch_2 = Conv1DBN(256, 1)(x)
    branch_2 = Conv1DBN(288, 3)(branch_2)
    branch_2 = Conv1DBN(320, 3, strides=2, padding='valid')(branch_2)
    branch_pool = layers.MaxPooling1D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = layers.Concatenate(name='mixed_7a')(branches)

    # 10x block8 (Inception-ResNet-C block)
    for block_idx in range(1, 10):
        x = InceptionResNetBlock(scale=0.2,
                                 block_type='block8',
                                 block_idx=block_idx)(x)
    x = InceptionResNetBlock(scale=1.,
                             activation=None,
                             block_type='block8',
                             block_idx=10)(x)

    # Final convolution block
    x = Conv1DBN(1536, 1, name='conv_7b')(x)

    # Classification block
    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes,
                     activation=classifier_activation,
                     name='predictions')(x)

    model = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/inceptionresnetv2/inceptionresnetv2_hasc_weights_{}_{}.hdf5'.format(
                int(input_shape[0]), int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-3].output)

    return model
Example #19
0
                                                test_size=.5,
                                                random_state=1)

n_epoch = 40
n_batch = 100
seqsize = 100
seq_inputs = layers.Input(shape=(
    101,
    300,
), dtype='float32')

sin1 = seq_inputs[:, 1:101, :]
sin2 = seq_inputs[:, 0:1, :]

conv1 = layers.Conv1D(1700, 2, activation='relu')(sin1)
pool1 = layers.GlobalMaxPooling1D()(conv1)
conv2 = layers.Conv1D(1700, 1, activation='relu')(sin1)
pool2 = layers.GlobalMaxPooling1D()(conv2)
pool12 = layers.concatenate([pool1, pool2])
fcoutput1 = (layers.Dense(1000, activation="relu"))(pool12)
fcoutput1 = (layers.Dense(600, activation="relu"))(fcoutput1)
fcoutput1 = (layers.Dense(300, activation="relu"))(fcoutput1)
lstm = layers.Bidirectional(
    layers.LSTM(500, return_sequences=False, implementation=1,
                name="lstm_1"))(sin2)
d1 = (layers.Dense(500, activation="relu"))(lstm)
d1 = (layers.Dense(700, activation="relu"))(d1)
fcoutput2 = (layers.Dense(200, activation="relu"))(d1)
fc = layers.concatenate([fcoutput1, fcoutput2])
fcoutput = (layers.Dense(1, activation="relu"))(fc)
Example #20
0
from tensorflow.keras import models
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras import layers
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D
from tensorflow.keras.layers import AveragePooling2D, AveragePooling1D
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Embedding, Reshape, Dropout, Dense
from tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D
from tensorflow.keras.layers import GlobalMaxPooling1D, Dropout, Conv1D
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
############################################################################################

model = tf.keras.Sequential([
    layers.Reshape((-1, 1)),  ### you need to make input as 3-D for CNN models
    layers.Conv1D(100, 64, padding="same", activation="relu", strides=3),
    layers.GlobalMaxPooling1D(),
    layers.Dropout(0.5),
    layers.Reshape((-1, 1)),  ### you need to make input as 3-D for CNN models
    layers.Conv1D(64, 32, padding="same", activation="relu", strides=3),
    layers.GlobalMaxPooling1D(),
    layers.Dropout(0.2),
    layers.Flatten(),
    layers.Dense(32, activation="relu"),
    layers.Dropout(0.25),
])
def stjohns(params, num_classes=None):
    # split channels
    # + modality-dependent hyperparameters (a kind of bias selection)
    # + considering all channels from all positions
    if num_classes is None:  # ensure back compatibility
        num_classes = 8
    xs = []
    inputs = []
    all_views = []
    for position in DataReader.smartphone_positions:
        one_view = []
        for _, channel in DataReader.channels.items():
            print('circuit of channel {}'.format(channel))
            modality = DataReader.channel_to_modality(channel)

            if channel.startswith('Acc') and channel.endswith('spectralfeatures'):
                input_length=60
            elif channel == 'Mag_spectralfeatures':
                input_length=73
            else:
                input_length=500

            # 3D tensor with shape: (batch_size, steps, input_dim)
            ts = keras.Input(shape=(input_length,), name=position+'_'+channel)
            x = layers.Reshape((input_length, 1))(ts)

            # xs.append(x)  # this is for grouped modalities

            # x = layers.Dense(
            #     # 8,  # as the number of classes. In order to be used as output layer during validation and test!
            #     # activation='softmax',
            #     64,
            #     activation='relu',
            #     kernel_regularizer=regularizers.l2(0.001),
            #     bias_regularizer=regularizers.l2(0.001),
            #     activity_regularizer=regularizers.l2(0.001),
            #     # name='view_'+position
            # )(x)
            # # x = layers.Dropout(params['All']['dropout']['3'])(x)
            # x = layers.Dropout(0.5)(x)

            # x = layers.LSTM(
            #     32,
            #     kernel_regularizer=regularizers.l1_l2(l1=1e-1, l2=1e-1),
            #     recurrent_regularizer=regularizers.l2(1e-1),
            #     bias_regularizer=regularizers.l2(1e-1),
            #     activity_regularizer=regularizers.l2(1e-1),
            #     # dropout,
            #     # recurrent_dropout
            # )(x)

            x = layers.Conv1D(
                filters=params[modality]['numfilters']['0'],
                kernel_size=params[modality]['kernelsize']['0'],
                strides=2,
                padding='valid',
                activation='relu',
                kernel_regularizer=regularizers.l2(0.001),
                bias_regularizer=regularizers.l2(0.001),
                activity_regularizer=regularizers.l2(0.001),
                # input_shape=(None, 500, 1),
                input_shape=(None, input_length, 1),
                name=position+'/'+channel+'/Conv1d/layer_0')(x)
            x = layers.MaxPooling1D()(x)
            x = layers.BatchNormalization(name=position+'/'+channel+'/BN/layer_0')(x)

            x = layers.Conv1D(
                filters=params[modality]['numfilters']['1'],
                kernel_size=params[modality]['kernelsize']['1'],
                strides=2,
                padding='valid',
                activation='relu',
                kernel_regularizer=regularizers.l2(0.001),
                bias_regularizer=regularizers.l2(0.001),
                activity_regularizer=regularizers.l2(0.001),
                name=position+'/'+channel+'/Conv1d/layer_1')(x)
            x = layers.MaxPooling1D()(x)
            x = layers.BatchNormalization(name=position+'/'+channel+'/BN/layer_1')(x)

            x = layers.Conv1D(
                 filters=params[modality]['numfilters']['2'],
                 kernel_size=params[modality]['kernelsize']['2'],
                 strides=2,
                 padding='valid',
                 activation='relu',
                 kernel_regularizer=regularizers.l2(0.001),
                 bias_regularizer=regularizers.l2(0.001),
                 activity_regularizer=regularizers.l2(0.001),
                 name=position+'/'+channel+'/Conv1d/layer_2')(x)
            #---------------------------------
            x = layers.GlobalMaxPooling1D()(x)
            #---------------------------------
            x = layers.BatchNormalization(name=position+'/'+channel+'/BN/layer_3')(x)

            inputs.append(ts)
            one_view.append(x)

        x = layers.concatenate(one_view)
        x = layers.Dense(
            # 8,  # as the number of classes. In order to be used as output layer during validation and test!
            # activation='softmax',
            params['All']['viewReprDim']['3'],
            activation='relu',
            kernel_regularizer=regularizers.l2(0.001),
            bias_regularizer=regularizers.l2(0.001),
            activity_regularizer=regularizers.l2(0.001),
            name='view_'+position)(x)
        # x = layers.Dropout(params['All']['dropout']['3'])(x)
        x = layers.Dropout(0.5)(x)
        all_views.append(x)

    joint_representation = layers.concatenate(all_views, name='joint_representation')

    joint_representation = layers.Dense(
        units=params['All']['hiddenunits']['3'],
        activation='relu',
        kernel_regularizer=regularizers.l2(0.001),
        bias_regularizer=regularizers.l2(0.001),
        activity_regularizer=regularizers.l2(0.001))(joint_representation)
    joint_representation = layers.Dropout(params['All']['dropout']['3'])(joint_representation)
    class_output = layers.Dense(num_classes, activation='softmax', name='class_output')(joint_representation)

    model = keras.Model(inputs=inputs, outputs=class_output)

    keras.utils.plot_model(model, 'stjohns.png', show_shapes=True)

    return model
Example #22
0
from tensorflow.keras.utils import plot_model

max_features = 2000  # number of words to consider as features
max_len = 500  # cuts of texts after this number of words
# (among max_features most common words)

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)

input_tensor = layers.Input((max_len, ))
kmodel = layers.Embedding(max_features, 128, name='embed')(input_tensor)
kmodel = layers.Conv1D(32, 7, activation='relu')(kmodel)
kmodel = layers.MaxPooling1D(5)(kmodel)
kmodel = layers.Conv1D(32, 7, activation='relu')(kmodel)
kmodel = layers.GlobalMaxPooling1D()(kmodel)
output_tensor = layers.Dense(1, activation='sigmoid')(kmodel)
model = models.Model(input_tensor, output_tensor)

model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
'''
We will launch the training with a Tensorboard callback instance. This callback
will write log events to disk at the specified location.

histogram_freq=1 => records activation histograms every 1 epoch
embeddings_freq=1 => records embedding data every 1 epoch  
'''
callbacks = [
    keras.callbacks.TensorBoard(log_dir='my_log_dir',
                                histogram_freq=1,
def validate_architectureCNNandRNN(architecture_name,
                          num_epochs,
                          num_iterations):
    
    for it in range(num_iterations):
    
        # Reinitializing  weights
        # reinitializing weights not working on CPU
        
        input_shape = (99, 81, 1)
        nclass = len(train_labels)
        inp = Input(shape=input_shape)
        norm_inp = BatchNormalization()(inp)
        img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(norm_inp)
        img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(img_1)
        img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(img_1)
        
        img_1 = layers.TimeDistributed(Flatten())(img_1)
        
        norm_inp = layers.BatchNormalization()(img_1)
        lstm = layers.Bidirectional(layers.LSTM(128, return_sequences=True))(norm_inp)
        lstm = layers.Bidirectional(layers.LSTM(128, return_sequences=True))(norm_inp)
        lstm = layers.GlobalMaxPooling1D()(lstm)
        dense_1 = layers.Dense(128, activation=activations.relu)(lstm)
        dense_out = layers.Dense(nclass, activation=activations.softmax)(dense_1)

        model = models.Model(inputs=inp, outputs=dense_out)
        opt = optimizers.Adam()

        model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['accuracy'])
    
        x_train, x_valid, y_train, y_valid = train_test_split(X_train,
                                                              Y_train,
                                                              test_size=0.1)
        
        mc = ModelCheckpoint(f'./results/best_model_{architecture_name}_{it}.h5',
                     monitor='val_accuracy',
                     mode='max',
                     verbose=1,
                     save_weights_only=True,
                     save_best_only=True)
        
        history = model.fit(x_train,
                            y_train,
                            batch_size=32,
                            validation_data=(x_valid, y_valid),
                            epochs=num_epochs,
                            shuffle=True,
                            callbacks=[mc])
        
        history = history.history
        
        # saving json
        with open(f'./results/results_{architecture_name}_{it}', 'w') as fp:
            json.dump(history, fp, indent=4)
            
        model.load_weights(f'./results/best_model_{architecture_name}_{it}.h5')    
        # prediction
        index = []
        results = []
        
        for fnames, imgs in tqdm(test_data_generator(batch=32)):
            predicts = model.predict(imgs)
            predicts = np.argmax(predicts, axis=1)
            predicts = [label_index[p] for p in predicts]
            index.extend([text.split("/")[-1] for text in fnames])
            results.extend(predicts)

            df = pd.DataFrame(columns=['fname', 'label'])
            df['fname'] = index
            df['label'] = results
            df.to_csv(f'./results/submission_{architecture_name}_{it}.csv', index=False)
Example #24
0
def encoder(max_len=256,
            vocab_size=30000,
            g_num_heads=1,
            d_num_heads=1,
            g_emb_dim=64,
            d_emb_dim=256,
            shared_embedding=False,
            emb_type="default",
            g_ff_dim=128,
            d_ff_dim=768,
            key_dim=64,
            g_num_layers=1,
            d_num_layers=1,
            num_buckets=None,
            g_layer_name_prefix='generator',
            d_layer_name_prefix='discriminator'):

    g_inputs = layers.Input((max_len, ), dtype=tf.int64)
    d_inputs = layers.Input((max_len, ), dtype=tf.int64)

    if shared_embedding:
        g_emb_dim = d_emb_dim

        position_embeddings = layers.Embedding(
            input_dim=max_len,
            output_dim=d_emb_dim,
            weights=[get_pos_encoding_matrix(max_len, d_emb_dim)],
            name="position_embedding",
        )(tf.range(start=0, limit=max_len, delta=1))

        if emb_type == "compositional":
            if num_buckets is None:
                num_buckets = vocab_size // 5
            word_embeddings = QREmbedding(vocab_size,
                                          d_emb_dim,
                                          num_buckets=num_buckets,
                                          name="word_embedding")
        else:
            word_embeddings = layers.Embedding(vocab_size,
                                               d_emb_dim,
                                               name="word_embedding")
        g_word_embeddings = word_embeddings(g_inputs)
        d_word_embeddings = word_embeddings(d_inputs)

        g_embeddings = g_word_embeddings + position_embeddings
        d_embeddings = d_word_embeddings + position_embeddings

    else:
        g_position_embeddings = layers.Embedding(
            input_dim=max_len,
            output_dim=g_emb_dim,
            weights=[get_pos_encoding_matrix(max_len, g_emb_dim)],
            name="position_embedding",
        )(tf.range(start=0, limit=max_len, delta=1))

        d_position_embeddings = layers.Embedding(
            input_dim=max_len,
            output_dim=d_emb_dim,
            weights=[get_pos_encoding_matrix(max_len, d_emb_dim)],
            name="position_embedding",
        )(tf.range(start=0, limit=max_len, delta=1))

        if emb_type == "compositional":
            if num_buckets is None:
                num_buckets = vocab_size // 5
            g_word_embeddings = QREmbedding(vocab_size,
                                            g_emb_dim,
                                            num_buckets=num_buckets,
                                            name="g_word_embedding")
            d_word_embeddings = QREmbedding(vocab_size,
                                            d_emb_dim,
                                            num_buckets=num_buckets,
                                            name="d_word_embedding")
        else:
            g_word_embeddings = layers.Embedding(vocab_size,
                                                 g_emb_dim,
                                                 name="g_word_embedding")
            d_word_embeddings = layers.Embedding(vocab_size,
                                                 d_emb_dim,
                                                 name="d_word_embedding")
        g_word_embeddings = g_word_embeddings(g_inputs)
        d_word_embeddings = d_word_embeddings(d_inputs)

        g_embeddings = g_word_embeddings + g_position_embeddings
        d_embeddings = d_word_embeddings + d_position_embeddings

    g_encoder_output = g_embeddings
    d_encoder_output = d_embeddings

    for i in range(g_num_layers):
        j = "{0}_{1}".format(g_layer_name_prefix, i)
        g_encoder_output = TransformerBlock(g_num_heads,
                                            g_emb_dim,
                                            g_ff_dim,
                                            key_dim,
                                            layer_name=j,
                                            dropout_rate=0.1)(g_encoder_output)

    for i in range(d_num_layers):
        j = "{0}_{1}".format(d_layer_name_prefix, i)
        d_encoder_output = TransformerBlock(d_num_heads,
                                            d_emb_dim,
                                            d_ff_dim,
                                            key_dim,
                                            layer_name=j,
                                            dropout_rate=0.1)(d_encoder_output)

    g_output = layers.Dense(vocab_size,
                            name="{0}_output_1".format(g_layer_name_prefix),
                            activation="softmax")(g_encoder_output)

    d_encoder_output = layers.GlobalMaxPooling1D()(d_encoder_output)
    d_output = layers.Dense(max_len,
                            name="{0}_output_1".format(d_layer_name_prefix),
                            activation="sigmoid")(d_encoder_output)

    g_model = keras.models.Model(inputs=g_inputs,
                                 outputs=g_output,
                                 name="{0}_model".format(g_layer_name_prefix))
    d_model = keras.models.Model(inputs=d_inputs,
                                 outputs=d_output,
                                 name="{0}_model".format(d_layer_name_prefix))

    return g_model, d_model
Example #25
0
        else:
            self.len = math.floor(file_length * test_length / batch_size)

    def __len__(self):
        return self.len


#sentences = MyCorpus(files)
#model = gensim.models.Word2Vec(sentences=sentences, max_final_vocab = 30)

dl_model = keras.Sequential([
    layers.Conv1D(32,
                  context_size,
                  input_shape=(1500, vec_length),
                  kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)),
    layers.GlobalMaxPooling1D(data_format="channels_first"),
    layers.Dense(32, kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)),
    layers.Dense(2,
                 activation='softmax',
                 kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01))
])

optimizer = tf.optimizers.Adam(learning_rate=0.001)
loss = keras.losses.BinaryCrossentropy()


def runFile(path):
    modelex = copy.deepcopy(model)
    sentences = extract_tokens(path)
    modelex.build_vocab([sentences], update=True, trim_rule=ak_rule)
    modelex.train([sentences], total_examples=1, epochs=1)
Example #26
0
def create_model(model_layers, config, compile=True):

    model = models.Sequential()

    for item in model_layers:
        if (item["type"] == "EMBEDDING"):
            model.add(
                layers.Embedding(input_dim=item["input_dim"],
                                 output_dim=item["output_dim"],
                                 input_length=item["input_length"]))
        elif (item["type"] == "MAX_POOLING"):
            model.add(
                layers.MaxPooling1D(pool_size=item["max_pooling_size"],
                                    strides=item["max_pooling_strides"]))
        elif (item["type"] == "CNN_1D"):
            model.add(
                layers.Conv1D(item["num_filters"],
                              item["kernel_size"],
                              activation=item["activation"]))
        elif (item["type"] == "LSTM"):
            if (item["recurrent_dropout"] > 0):
                model.add(
                    layers.LSTM(units=item["unit"],
                                recurrent_dropout=item["recurrent_dropout"]))
            else:
                model.add(layers.LSTM(units=item["units"]))
        elif (item["type"] == "BiLSTM"):
            if (item["recurrent_dropout"] > 0
                    and item["recurrent_dropout"] < 1):
                model.add(
                    layers.Bidirectional(
                        layers.LSTM(
                            units=item["units"],
                            recurrent_dropout=item["recurrent_dropout"])))
            else:
                model.add(
                    layers.Bidirectional(layers.LSTM(units=item["units"])))
        elif (item["type"] == "GRU"):
            if (item["recurrent_dropout"] > 0
                    and item["recurrent_dropout"] < 1):
                model.add(
                    layers.GRU(units=item["units"],
                               recurrent_dropout=item["recurrent_dropout"]))
            else:
                model.add(layers.GRU(units=item["units"]))
        elif (item["type"] == "BiGRU"):
            if (item["recurrent_dropout"] > 0):
                model.add(
                    layers.Bidirectional(
                        layers.GRU(
                            units=item["units"],
                            recurrent_dropout=item["recurrent_dropout"])))
            else:
                model.add(layers.Bidirectional(
                    layers.GRU(units=item["units"])))
        elif (item["type"] == "GLOBAL"):
            model.add(layers.GlobalMaxPooling1D())
        elif (item["type"] == "FLATTEN"):
            model.add(layers.Flatten())
        elif (item["type"] == "DENSE"):
            model.add(
                layers.Dense(item["neurons"], activation=item["activation"]))
        elif (item["type"] == "DROPOUT"):
            if (item["dropout"] > 0 and item["dropout"] < 1):
                model.add(layers.Dropout(rate=item["dropout"]))
        elif (item["type"] == "SPATIAL_DROPOUT_2D"):
            if (item["dropout"] > 0 and item["dropout"] < 1):
                model.add(layers.SpatialDropout2D(rate=item["dropout"]))
        else:
            continue

    # optimalizáló és veszteség függvények beállítása
    if compile == True:
        model.compile(optimizer=config["optimizer"],
                      loss=config["loss"],
                      metrics=config["metrics"])

    # modell összegzése
    if (config["verbose"] >= 1):
        model.summary()

    return model
Example #27
0
model_recurrente = Sequential()

#La entrada es un vector de tamaño maxlen, con cada valor un entero que representa la palabra (None, maxlen)
model_recurrente.add(layers.Embedding(max_features, 8, input_length=maxlen))
model_recurrente.add(layers.LSTM(32, return_sequences=True))
model_recurrente.add(layers.LSTM(32, return_sequences=True))
model_recurrente.add(layers.LSTM(32))
model_recurrente.add(layers.Dense(1, activation='sigmoid'))

print("Modelo para procesar texto con red recurrente (varias capas")
print(model_recurrente.summary())

#########################################
#Text Processing
#Convolution (1D)
#########################################

model_conv_1d = Sequential()

#La entrada es un vector de tamaño maxlen, con cada valor un entero que representa la palabra (None, maxlen)
model_conv_1d.add(layers.Embedding(max_features, 8, input_length=maxlen))
model_conv_1d.add(layers.Conv1D(32, 3, activation='relu'))
model_conv_1d.add(layers.MaxPooling1D(2))
model_conv_1d.add(layers.Conv1D(32, 3, activation='relu'))
model_conv_1d.add(layers.GlobalMaxPooling1D())
model_conv_1d.add(layers.Dense(1))

print("Modelo para procesar texto con convolucion 1D")
print(model_conv_1d.summary())
Example #28
0
def create_functional_model(model_layers, config, compile=True):
    input_layer = layers.Input(shape=(config["input_dim"], ))
    func_layers = []
    func_layers.append(input_layer)

    for item in model_layers:
        if item["type"] == "EMBEDDING":
            input_layer = layers.Input(shape=(item["input_length"], ))
            func_layers = []
            func_layers.append(input_layer)

            embedding_output_dim = item["output_dim"]
            embedding_layer = layers.Embedding(
                input_dim=item["input_dim"],
                output_dim=item["output_dim"],
                input_length=item["input_length"])(func_layers[-1])
            func_layers.append(embedding_layer)
        elif (item["type"] == "POSITION_EMBEDDING"):
            input_layer = layers.Input(shape=(item["input_length"], ))
            func_layers = []
            func_layers.append(input_layer)

            embedding_output_dim = item["output_dim"]
            position_embedding_layer = PositionEmbedding(
                vocab_size=item["input_dim"],
                output_dim=item["output_dim"],
                maxlen=item["input_length"])(func_layers[-1])
            func_layers.append(position_embedding_layer)
        elif (item["type"] == "CNNTransformer"):
            cnn_Transformer_layer = CNNTransformer(
                embed_dim=embedding_output_dim,
                num_heads=item["num_heads"],
                ff_dim=item["ff_dim"])(func_layers[-1])
            func_layers.append(cnn_Transformer_layer)
        elif (item["type"] == "FLATTEN"):
            flatten_layer = layers.Flatten()(func_layers[-1])
            func_layers.append(flatten_layer)
        elif item["type"] == "CNN_1D":
            conv1d_layer = layers.Conv1D(filters=item["num_filters"],
                                         kernel_size=item["kernel_size"],
                                         activation=item["activation"])(
                                             func_layers[-1])
            func_layers.append(conv1d_layer)
        elif item["type"] == "MAX_POOLING":
            max_pooling = layers.MaxPooling1D(
                pool_size=item["max_pooling_size"],
                strides=item["max_pooling_strides"])(func_layers[-1])
            func_layers.append(max_pooling)
        elif item["type"] == "BATCH_NORMALIZATION":
            batch_normalization = layers.BatchNormalization()(func_layers[-1])
            func_layers.append(batch_normalization)
        elif item["type"] == "GLOBAL":
            global_layer = layers.GlobalMaxPooling1D()(func_layers[-1])
            func_layers.append(global_layer)
        elif (item["type"] == "DENSE"):
            dense_layer = layers.Dense(units=item["neurons"],
                                       activation=item["activation"])(
                                           func_layers[-1])
            func_layers.append(dense_layer)
        elif (item["type"] == "DROPOUT"):
            if (item["dropout"] > 0 and item["dropout"] < 1):
                dropout_layer = layers.Dropout(rate=item["dropout"])(
                    func_layers[-1])
                func_layers.append(dropout_layer)
        else:
            continue

    model = models.Model(inputs=input_layer, outputs=func_layers[-1])

    # optimalizáló és veszteség függvények beállítása
    if compile == True:
        model.compile(optimizer=config["optimizer"],
                      loss=config["loss"],
                      metrics=config["metrics"])

    # modell összegzése
    if (config["verbose"] >= 1):
        model.summary()

    return model
#
best_acc = 0
good_kernel = 0
i = 0
history_kernel = np.zeros(30)
history_acc = np.zeros(30)
for KERNELS in range(1, 30):
    network = models.Sequential()

    network.add(
        layers.Conv1D(1,
                      kernel_size=KERNELS,
                      activation='relu',
                      input_shape=(time_length, 1)))

    network.add(layers.GlobalMaxPooling1D())

    network.add(layers.Dense(64, activation='softmax'))
    network.add(layers.Dropout(0.6))
    network.add(layers.Dense(64, activation='softmax'))
    network.add(layers.Dense(1, activation='sigmoid'))

    network.compile(optimizer='rmsprop',
                    loss='binary_crossentropy',
                    metrics=['accuracy'])

    # I dati sono già stati mischiati.
    history = network.fit(x,
                          y[:, 0],
                          epochs=EPOCH,
                          batch_size=BATCH,
Example #30
0
def MobileNetV3Large(include_top=True,
                     weights='hasc',
                     input_shape=None,
                     pooling=None,
                     classes=6,
                     classifier_activation='softmax',
                     alpha=1.0,
                     minimalistic=False):

    if input_shape is None:
        input_shape = (256 * 3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    def stack_fn(x, kernel, activation, se_ratio):
        def depth(d):
            return _depth(d * alpha)

        x = InvertedResBlock(1, depth(16), 3, 1, None, relu, 0)(x)
        x = InvertedResBlock(4, depth(24), 3, 2, None, relu, 1)(x)
        x = InvertedResBlock(3, depth(24), 3, 1, None, relu, 2)(x)
        x = InvertedResBlock(3, depth(40), kernel, 2, se_ratio, relu, 3)(x)
        x = InvertedResBlock(3, depth(40), kernel, 1, se_ratio, relu, 4)(x)
        x = InvertedResBlock(3, depth(40), kernel, 1, se_ratio, relu, 5)(x)
        x = InvertedResBlock(6, depth(80), 3, 2, None, activation, 6)(x)
        x = InvertedResBlock(2.5, depth(80), 3, 1, None, activation, 7)(x)
        x = InvertedResBlock(2.3, depth(80), 3, 1, None, activation, 8)(x)
        x = InvertedResBlock(2.3, depth(80), 3, 1, None, activation, 9)(x)
        x = InvertedResBlock(6, depth(112), 3, 1, se_ratio, activation, 10)(x)
        x = InvertedResBlock(6, depth(112), 3, 1, se_ratio, activation, 11)(x)
        x = InvertedResBlock(6, depth(160), kernel, 2, se_ratio, activation,
                             12)(x)
        x = InvertedResBlock(6, depth(160), kernel, 1, se_ratio, activation,
                             13)(x)
        x = InvertedResBlock(6, depth(160), kernel, 1, se_ratio, activation,
                             14)(x)
        return x

    model = MobileNetV3(stack_fn,
                        1280,
                        minimalistic=minimalistic,
                        input_shape=input_shape,
                        classes=classes,
                        classifier_activation=classifier_activation)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/mobilenetv3small/mobilenetv3small_hasc_weights_{}_{}.hdf5'.format(
                int(input_shape[0]), int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-7].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model.layers[-7].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model.layers[-7].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-7].output)

    return model