Esempio n. 1
0
# TokenFile.close()

train_sequences = tokenizer.texts_to_sequences(x_train['comment'])
test_sequences = tokenizer.texts_to_sequences(x_test['comment'])

train_data = pad_sequences(train_sequences, maxlen=maxlen)
test_data = pad_sequences(test_sequences, maxlen=maxlen)

train_labels = np.asarray(y_train)
test_labels = np.asarray(y_test)

embedding_dim = 50

CNN_model = Sequential([
    layers.Embedding(max_words, embedding_dim),
    layers.Conv1D(50, 7, activation='relu'),
    layers.MaxPooling1D(5),
    layers.Conv1D(50, 7, activation='relu'),
    layers.GlobalAveragePooling1D(),
    layers.Dense(20, activation='relu'),
    layers.Dense(1, activation='sigmoid')
])

RNN_model = Sequential([
    layers.Embedding(max_words, embedding_dim),
    layers.Bidirectional(layers.LSTM(64)),
    layers.Dense(64, activation='relu'),
    layers.Dense(1, activation='sigmoid')
])

# CNN_model.summary()
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

import load_text

embedding_dim = 300
inputs = keras.Input(shape=(54, ), name="input_text")
embedding_layer = layers.Embedding(load_text.vocab_size,
                                   embedding_dim,
                                   name="embedding")(inputs)
conv_3_layer = layers.Conv1D(100, 3, activation='relu',
                             name="filter_size_3")(embedding_layer)
conv_4_layer = layers.Conv1D(100, 4, activation='relu',
                             name="filter_size_4")(embedding_layer)
conv_5_layer = layers.Conv1D(100, 5, activation='relu',
                             name="filter_size_5")(embedding_layer)
max_pool_3_layer = layers.MaxPool1D(pool_size=52,
                                    name="max_pool_3",
                                    padding="same")(conv_3_layer)
max_pool_4_layer = layers.MaxPool1D(pool_size=51,
                                    name="max_pool_4",
                                    padding="same")(conv_4_layer)
max_pool_5_layer = layers.MaxPool1D(pool_size=50,
                                    name="max_pool_5",
                                    padding="same")(conv_5_layer)
flatten_3_layer = layers.Flatten()(max_pool_3_layer)
flatten_4_layer = layers.Flatten()(max_pool_4_layer)
flatten_5_layer = layers.Flatten()(max_pool_5_layer)
concatenate_layer = layers.concatenate(
    [flatten_3_layer, flatten_4_layer, flatten_5_layer])
plt.xlabel('Timestamps (Each value represents a 5 minute interval.)', fontsize=28)
plt.ylabel('CPU Utilization', fontsize=28)
plt.title('CPU Utilization Over a Period of Time', fontsize=32)
plt.show()

cpu_utilization_values = cpu_utilization.value.to_list()
# print(cpu_utilization_values)

normalized_cpu_utilization_values, training_mean, training_std = normalize(cpu_utilization_values)

x_train = create_sequences(normalized_cpu_utilization_values, time_steps=32)
print("Training data shape: ", x_train.shape)

# Conv1D based auto-encoder model.
model = keras.Sequential([layers.Input(shape=(x_train.shape[1], x_train.shape[2])),
                          layers.Conv1D(filters=18, kernel_size=7, padding="same", strides=2, activation="relu"),
                          layers.Dropout(rate=0.2),
                          layers.Conv1D(filters=9, kernel_size=7, padding="same", strides=2, activation="relu"),
                          layers.Conv1DTranspose(filters=9, kernel_size=7, padding="same", strides=2,
                                                 activation="relu"),
                          layers.Dropout(rate=0.2),
                          layers.Conv1DTranspose(filters=18, kernel_size=7, padding="same", strides=2,
                                                 activation="relu"),
                          layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same")])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
model.summary()

history = model.fit(x_train, x_train, epochs=50, batch_size=128, validation_split=0.1,
                    callbacks=[keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min")])

# Plotting the training and validation losses.
Esempio n. 4
0
)

_, train_accuracy = model.evaluate(X_train, y_train)
print(f'Training Accuracy: {train_accuracy:.4f}')
_, test_accuracy = model.evaluate(X_test, y_test)
print(f'Testing Accuracy: {test_accuracy:.4f}')
plot_history(history)


# Lesson 9
model = models.Sequential()
model.add(layers.Embedding(
    input_dim=len(tokenizer.word_index) + 1,
    output_dim=100,
    input_length=100))
model.add(layers.Conv1D(128, 5, activation='relu'))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(
    loss='binary_crossentropy',
    optimizer='adam',
    metrics=['accuracy']
)

model.summary()


history = model.fit(
    X_train,
    y_train,
def get_text_encoding_layer(name,
                            dataset,
                            max_features=100,
                            max_len=20,
                            embedding_dim=20,
                            layer_type="bi_rnn",
                            n_units=20,
                            dropout=0.3):
    """Creates everything that is needed for a textual input pipeline.

    Args:
        name (string): name of the feature
        dataset (tf.DataSet): tensorflow dataset
        max_features (int, optional): Maximum vocab size. Defaults to 100.
        max_len (int, optional): Sequence length to pad the outputs to. Defaults to 20.
        embedding_dim (int, optional): Embedding dimension. Defaults to 20.
        layer_type (str, optional): layer type to use (e.g., rnn, bi_rnn, gru, cnn). Defaults to "bi_rnn".
        n_units (int, optional): number of units within that layer. Defaults to 20.
        dropout (float, optional): dropout percentage. Defaults to 0.3.

    Returns:
        lambda function: textual input pipeline
    """
    # max_features = 100  # Maximum vocab size.
    # max_len = 20  # Sequence length to pad the outputs to.
    # embedding_dim = 20

    vec_layer = TextVectorization(max_tokens=max_features,
                                  output_mode='int',
                                  output_sequence_length=max_len)

    text_ds = dataset.map(lambda x, y: x[name])

    vec_layer.adapt(text_ds)

    embedding_layer = layers.Embedding(max_features + 1, embedding_dim)

    dropout_layer = layers.Dropout(0.4)

    if layer_type == "cnn":
        # Conv1D + global max pooling
        conv_layer = layers.Conv1D(n_units,
                                   5,
                                   padding='valid',
                                   activation='relu',
                                   strides=3)
        max_pooling_layer = layers.GlobalMaxPooling1D()

        return lambda feature: max_pooling_layer(
            conv_layer(dropout_layer(embedding_layer(vec_layer(feature)))))

    elif layer_type == "rnn":
        rnn_layer = layers.SimpleRNN(units=n_units,
                                     activation='relu',
                                     dropout=dropout)

        return lambda feature: rnn_layer(
            dropout_layer(embedding_layer(vec_layer(feature))))

    elif layer_type == "bi_rnn":
        bi_rnn_layer = layers.Bidirectional(
            layers.SimpleRNN(units=n_units, activation='relu',
                             dropout=dropout))

        return lambda feature: bi_rnn_layer(
            dropout_layer(embedding_layer(vec_layer(feature))))

    elif layer_type == "gru":
        gru_layer = layers.Bidirectional(
            layers.GRU(units=n_units, activation='relu', dropout=dropout))

        return lambda feature: gru_layer(
            dropout_layer(embedding_layer(vec_layer(feature))))
Esempio n. 6
0
def conv_bn(x, filter, kernel, activation):
    return layers.BatchNormalization()(layers.Conv1D(filter,
                                                     kernel,
                                                     activation=activation)(x))
Esempio n. 7
0
def cnn_rnn_model(input_dim, filters, kernel_size, conv_stride, conv_border_mode, units, output_dim=155):
    ''' CNN + RNN '''
    input_data = layers.Input(name='input', shape = input_dim)
    
    # convolutional layer
    conv_1d = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d')(input_data)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d)
    # batch normalization
    bn_cnn = layers.BatchNormalization(name='bn_conv_1d')(max_pool)

    # convolutional layer #2
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_2')(bn_cnn)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_2')(max_pool)

    # convolutional layer #3
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_3')(bn_cnn_2)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_3')(conv_1d_2)

    # convolutional layer #4
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_4')(bn_cnn_2)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_4')(conv_1d_2)

    # convolutional layer #5
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_5')(bn_cnn_2)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_5')(conv_1d_2)

    # recurrent layer
    simp_rnn = layers.SimpleRNN(units, activation='relu', return_sequences=True, implementation=2, name='rnn')(bn_cnn_2)
    # batch normalization
    bn_rnn = layers.BatchNormalization()(simp_rnn)

    # recurrent layer
    # simp_rnn = layers.SimpleRNN(units//4, activation='relu', return_sequences=True, implementation=2, name='rnn_2')(bn_rnn)
    # # batch normalization
    # bn_rnn = layers.BatchNormalization()(simp_rnn)

    flat = layers.Flatten()(bn_rnn)
    dense = layers.Dense(512)(flat)
    dense = layers.Dense(256)(dense)
    dense = layers.Dense(output_dim)(dense)

    # TimeDistributed(Dense(output_dim)) layer
    # time_dense = layers.TimeDistributed(layers.Dense(output_dim))(bn_rnn)
    
    # sigmoid activation layer
    y_pred = layers.Activation('sigmoid', name='sigmoid')(dense)

    model = models.Model(inputs=input_data, outputs=y_pred)
    #model.output_length = lambda x: cnn_output_length(x, kernel_size, conv_border_mode, conv_stride)
    print(model.summary())
    return model
Esempio n. 8
0
    def multi_output_model_eg():
        """
        This function will detail the building and use of a multi-output model using a simple social-media example

        :return: None
        """

        # Dummy variables to make sure there are no errors
        posts = None
        age_targets = None
        income_targets = None
        gender_targets = None
        vocabulary_size = 50000
        num_income_groups = 10

        # Make an input to hold the posts shared by users, variable length just like in the previous example
        post_input = Input(shape=(None, ), dtype='int32', name='posts')

        # Embed and encode these inputs. This time we'll use a larger Conv1D network
        embedded_posts = layers.Embedding(256, vocabulary_size)(post_input)
        x = layers.Conv1D(128, 5, activation='relu')(embedded_posts)
        x = layers.MaxPooling1D(5)(x)
        x = layers.Conv1D(256, 5, activation='relu')(x)
        x = layers.Conv1D(256, 5, activation='relu')(x)
        x = layers.MaxPooling1D(5)(x)
        x = layers.Conv1D(256, 5, activation='relu')(x)
        x = layers.Conv1D(256, 5, activation='relu')(x)
        x = layers.GlobalMaxPool1D()(x)
        x = layers.Dense(128, activation='relu')(x)

        # Now make 3 separate heads, one for each output. Note how all 3 have unique activations. This will require
        # unique loss functions for each head.
        age_prediction = layers.Dense(1, name='age')(x)
        income_prediction = layers.Dense(num_income_groups,
                                         activation='softmax',
                                         name='income')(x)
        gender_prediction = layers.Dense(1,
                                         activation='sigmoid',
                                         name='gender')(x)

        # Instantiate the model
        model = models.Model(
            post_input, [age_prediction, income_prediction, gender_prediction])

        # Compile and fit the model. Use the dictionary method of passing parameters for greater explicitness. Note that
        # very imbalanced loss contributions will lead to the model being more optimised for the output with the largest
        # individual loss than for the others. To counteract this we can scale the individual loss functions to diminish
        # the largest contributor (mse ~= 3-5) and magnify the smallest contributor (binary_crossentropy ~= 0.1)
        model.compile(optimizer='rmsprop',
                      loss={
                          'age': 'mse',
                          'income': 'categorical_crossentropy',
                          'gender': 'binary_crossentropy'
                      },
                      loss_weights={
                          'age': 0.25,
                          'income': 1.0,
                          'gender': 10.0
                      })
        model.fit(posts, {
            'age': age_targets,
            'income': income_targets,
            'gender': gender_targets
        },
                  epochs=10,
                  batch_size=64)
Esempio n. 9
0
from tensorflow.keras import layers
from tensorflow.keras import Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
import numpy as np

vocabulary_size = 100
num_income_groups = 10
num_samples = 1000

posts_input = Input(shape=(None, ), dtype='int32', name='posts')
embedded_posts = layers.Embedding(256, vocabulary_size)(posts_input)
x = layers.Conv1D(128, 5, activation='relu')(embedded_posts)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256, 5, activation='relu')(x)
x = layers.Conv1D(256, 5, activation='relu')(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256, 5, activation='relu')(x)
x = layers.Conv1D(256, 5, activation='relu')(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation='relu')(x)

age_prediction = layers.Dense(1, name='age')(x)
income_prediction = layers.Dense(num_income_groups,
                                 activation='softmax',
                                 name='income')(x)
gender_prediction = layers.Dense(1, activation='sigmoid', name='gender')(x)

model = Model(posts_input,
              [age_prediction, income_prediction, gender_prediction])
Esempio n. 10
0
def create_conv1d(inputsize, layerlist, ncategories, config):
    """Make a CNN with convolutional layers, max pooling, and fully connected layers.

    The convolutional layers will have 'glorot_normal' initialization. To add one,
    include the tuple ("conv", conv_dict) in layerlist, where conv_dict is a dictionary
    minimally including the keys "filters" (number of filters) and
    "width" (kernel size).

    The max pooling layers will have 'valid' padding. For a max pooling layer,
    include the tuple ("maxpool", pool_size) in layerlist.

    To include a fully-connected layer, include the tuple ("fc", size, activation).

    To include identity elements, use ("startskip") and ("endskip") to add and remove
    layers to a list. The default behavior is to use a stack. You can specify
    ("endskip", 4) for finer-grained control over the element that is re-added. For
    example, using only ("endskip", 0) would use a queue instead of a stack.

    You can add arbitrary layers by including (layertobeadded) in layerlist.
    """

    inputs = tf.keras.Input(shape=(inputsize, 1))
    outputs = inputs
    checkpoints = []
    for layer in layerlist:
        if layer[0] == "conv":
            # Defaults
            if not layer[1].get("padding"):
                layer[1]["padding"] = "same"
            if not layer[1].get("dilation"):
                layer[1]["dilation"] = 1
            if not layer[1].get("activation"):
                layer[1]["activation"] = "relu"
            # Construction
            outputs = layers.Conv1D(
                filters=layer[1]["filters"],
                kernel_size=layer[1]["width"],
                padding=layer[1]["padding"],
                dilation_rate=layer[1]["dilation"],
                activation=layer[1]["activation"],
                kernel_initializer="glorot_normal",
            )(outputs)
        elif layer[0] == "maxpool":
            outputs = layers.MaxPooling1D(pool_size=layer[1],
                                          strides=2)(outputs)
        elif layer[0] == "fc":
            outputs = layers.Dense(layer[1], activation=layer[2])(outputs)
        elif layer[0] == "startskip":
            checkpoints.append(outputs)
        elif layer[0] == "endskip":
            if len(layer) < 2:
                skipped = checkpoints.pop()
            else:
                skipped = checkpoints[layer[1]]
                del checkpoints[layer[1]]
            outputs = layers.Add()([outputs, skipped])
        else:
            outputs = layer[0](outputs)
    outputs = layers.Flatten()(outputs)
    outputs = layers.Dense(
        ncategories,
        activation="softmax",
        kernel_regularizer=config.get("regularizer"))(outputs)

    return tf.keras.Model(inputs=inputs, outputs=outputs, name="conv1d")
Esempio n. 11
0
args = parser.parse_args()

outputFile = args.outputFile

#
# Build my model
#

import tensorflow as tf
from tensorflow.keras import layers
from saphyra.layers import RpLayer, rvec
# Ringer NN
input_rings = layers.Input(shape=(100, ), name='Input_rings')
#input_rings  = RpLayer(rvec, name='RpLayer')(input_rings)
conv_rings = layers.Reshape((100, 1))(input_rings)
conv_rings = layers.Conv1D(16, kernel_size=2, name='conv_1',
                           activation='relu')(conv_rings)
conv_rings = layers.Conv1D(32, kernel_size=2, name='conv_2',
                           activation='relu')(conv_rings)
conv_output = layers.Flatten()(conv_rings)
# Shower shape NN
input_shower_shapes = layers.Input(shape=(5, ), name='Input_shower_shapes')
dense_shower_shapes = layers.Dense(
    4, activation='relu', name='dense_shower_shapes_1')(input_shower_shapes)
# Decision NN
input_concat = layers.Concatenate(axis=1)([conv_output, dense_shower_shapes])
dense = layers.Dense(32, activation='relu', name='dense_layer')(input_concat)
dense = layers.Dense(1, activation='linear',
                     name='output_for_inference')(dense)
output = layers.Activation('sigmoid', name='output_for_training')(dense)
# Build the model
model = tf.keras.Model([input_rings, input_shower_shapes],
Esempio n. 12
0
def build_cnn(config, input_tensor):
    # x = input  # (-1, 520)
    # building_layers = [[99, 22], [66, 22], [33, 22]]
    # for units in building_layers:  # config['building_layers']:
    # x = layers.Reshape((common_hl_output.shape[1], 1))(common_hl_output)

    sdae_model = sdae(
        input_data=rss_train,
        hidden_layers=config['sdae_hidden_layers'],
        cache=True,
        model_fname=None,
        optimizer=config['optimizer'],
        corruption_level=config['corruption_level'],
        batch_size=config['batch_size'],
        epochs=config['epochs'],
        validation_split=config['validation_split'],
    )
    x = sdae_model(input_tensor)
    input = layers.Reshape((x.shape[1], 1))(x)

    # building classification output
    x = input
    x = layers.Conv1D(256, 3, activation='relu')(x)
    x = layers.Conv1D(128, 3, activation='relu')(x)
    x = layers.Conv1D(64, 3, activation='relu')(x)
    x = layers.Flatten()(x)
    building_output = layers.Dense(3, activation='softmax',
                                   name='building')(x)  # (-1, 3)

    # floor classification output
    x = input
    x = layers.Conv1D(256, 3, activation='relu')(x)
    x = layers.MaxPooling1D(2)(x)
    x = layers.Conv1D(128, 3, activation='relu')(x)
    x = layers.MaxPooling1D(2)(x)
    x = layers.Conv1D(64, 3, activation='relu')(x)
    x = layers.Flatten()(x)
    floor_output = layers.Dense(5, activation='softmax',
                                name='floor')(x)  # (-1, 5)

    # coordinates regression output
    # for units in config['coordinates_layers']:
    x = input
    x = layers.Conv1D(256, 3, activation='relu')(x)
    x = layers.MaxPooling1D(2)(x)
    x = layers.Conv1D(128, 3, activation='relu')(x)
    x = layers.MaxPooling1D(2)(x)
    x = layers.Conv1D(64, 3, activation='relu')(x)
    x = layers.Flatten()(x)
    coordinates_output = layers.Dense(2, activation='linear',
                                      name='coord')(x)  # (-1, 2)

    model = keras.Model(
        inputs=input_tensor,
        outputs=[building_output, floor_output, coordinates_output])
    model.compile(optimizer=config['optimizer'],
                  loss={
                      'building': 'categorical_crossentropy',
                      'floor': 'categorical_crossentropy',
                      'coord': 'mse'
                  },
                  loss_weights=[
                      config['building_weight'], config['floor_weight'],
                      config['coordinates_weight']
                  ],
                  metrics={
                      'building': 'categorical_accuracy',
                      'floor': 'categorical_accuracy',
                      'coord': 'mse'
                  })
    return model
Esempio n. 13
0
test_gen = generator(float_data,
                     lookback=lookback,
                     delay=delay,
                     min_index=300001,
                     max_index=None,
                     step=step)

val_steps = (300000 - 200001 - lookback) // batch_size
test_steps = (len(float_data) - 300001 - lookback) // batch_size

# 一种基本的机器学习方法

model = Sequential()
model.add(
    layers.Conv1D(32,
                  5,
                  activation='relu',
                  input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')

history = model.fit(train_gen,
                    steps_per_epoch=500,
                    epochs=20,
                    validation_data=val_gen,
                    validation_steps=val_steps)

loss = history.history['loss']
val_loss = history.history['val_loss']
Esempio n. 14
0
def build_sender_model(n_images,
                       input_image_shape,
                       embedding_size,
                       vocabulary_size,
                       optimizer,
                       sender_type="agnostic",
                       image_embedding_layer=None,
                       verbose=False,
                       **kwargs):
    image_inputs = [
        layers.Input(shape=input_image_shape,
                     name=f"S_image_in_{i}",
                     dtype="float32") for i in range(n_images)
    ]

    if not image_embedding_layer:
        image_embedding_layer = layers.Dense(embedding_size,
                                             name="S_image_embedding")

    # agnostic part
    activation = layers.Activation("sigmoid", name="S_sigmoid")
    output_layer = layers.Dense(vocabulary_size, name="S_output")

    # informed part
    stack = layers.Lambda(lambda x: K.stack(x, axis=1), name="S_stack")
    permute = layers.Permute([2, 1], name="S_permute")
    feature_filters = layers.Conv1D(filters=vocabulary_size,
                                    kernel_size=(1, ),
                                    input_shape=[n_images, embedding_size],
                                    data_format="channels_last",
                                    name="S_feature_filters")

    permute_2 = layers.Permute([2, 1], name="S_permute_2")
    vocabulary_filter = layers.Conv1D(1,
                                      kernel_size=(1, ),
                                      data_format="channels_last",
                                      name="S_vocabulary_filter")
    flatten = layers.Flatten()

    y = [image_embedding_layer(x) for x in image_inputs]
    if sender_type == "agnostic":
        y = layers.concatenate(y, axis=-1)
        y = activation(y)
        y = output_layer(y)
    elif sender_type == "informed":
        y = stack(y)
        y = permute(y)
        y = feature_filters(y)
        y = activation(y)
        y = permute_2(y)
        y = vocabulary_filter(y)
        y = flatten(y)

    # y = layers.Activation("relu")(y)
    # y = layers.Dense(vocabulary_size)(y)
    y = layers.Activation("sigmoid")(y)

    model_predict = models.Model(image_inputs, y, name="S_predict")
    model_predict.compile(loss=losses.binary_crossentropy, optimizer=optimizer)

    if verbose:
        model_predict.summary()

    return model_predict, model_predict
def Smi2Smi():

    #product
    l_in = layers.Input(shape=(None, ))
    l_mask = layers.Input(shape=(None, ))

    #reagents
    l_dec = layers.Input(shape=(None, ))
    l_dmask = layers.Input(shape=(None, ))

    #positional encodings for product and reagents, respectively
    l_pos = PositionLayer(EMBEDDING_SIZE)(l_mask)
    l_dpos = PositionLayer(EMBEDDING_SIZE)(l_dmask)

    l_emask = MaskLayerRight()([l_dmask, l_mask])
    l_right_mask = MaskLayerTriangular()(l_dmask)
    l_left_mask = MaskLayerLeft()(l_mask)

    #encoder
    l_voc = layers.Embedding(input_dim=vocab_size,
                             output_dim=EMBEDDING_SIZE,
                             input_length=None)

    l_embed = layers.Add()([l_voc(l_in), l_pos])
    l_embed = layers.Dropout(rate=0.1)(l_embed)

    for layer in range(n_block):

        #self attention
        l_o = [
            SelfLayer(EMBEDDING_SIZE,
                      KEY_SIZE)([l_embed, l_embed, l_embed, l_left_mask])
            for i in range(n_self)
        ]

        l_con = layers.Concatenate()(l_o)
        l_dense = layers.TimeDistributed(layers.Dense(EMBEDDING_SIZE))(l_con)
        l_drop = layers.Dropout(rate=0.1)(l_dense)
        l_add = layers.Add()([l_drop, l_embed])
        l_att = LayerNormalization()(l_add)

        #position-wise
        l_c1 = layers.Conv1D(N_HIDDEN, 1, activation='relu')(l_att)
        l_c2 = layers.Conv1D(EMBEDDING_SIZE, 1)(l_c1)
        l_drop = layers.Dropout(rate=0.1)(l_c2)
        l_ff = layers.Add()([l_att, l_drop])
        l_embed = LayerNormalization()(l_ff)

    #bottleneck
    l_encoder = l_embed

    l_embed = layers.Add()([l_voc(l_dec), l_dpos])
    l_embed = layers.Dropout(rate=0.1)(l_embed)

    for layer in range(n_block):

        #self attention
        l_o = [
            SelfLayer(EMBEDDING_SIZE,
                      KEY_SIZE)([l_embed, l_embed, l_embed, l_right_mask])
            for i in range(n_self)
        ]

        l_con = layers.Concatenate()(l_o)
        l_dense = layers.TimeDistributed(layers.Dense(EMBEDDING_SIZE))(l_con)
        l_drop = layers.Dropout(rate=0.1)(l_dense)
        l_add = layers.Add()([l_drop, l_embed])
        l_att = LayerNormalization()(l_add)

        #attention to the encoder
        l_o = [
            SelfLayer(EMBEDDING_SIZE,
                      KEY_SIZE)([l_att, l_encoder, l_encoder, l_emask])
            for i in range(n_self)
        ]
        l_con = layers.Concatenate()(l_o)
        l_dense = layers.TimeDistributed(layers.Dense(EMBEDDING_SIZE))(l_con)
        l_drop = layers.Dropout(rate=0.1)(l_dense)
        l_add = layers.Add()([l_drop, l_att])
        l_att = LayerNormalization()(l_add)

        #position-wise
        l_c1 = layers.Conv1D(N_HIDDEN, 1, activation='relu')(l_att)
        l_c2 = layers.Conv1D(EMBEDDING_SIZE, 1)(l_c1)
        l_drop = layers.Dropout(rate=0.1)(l_c2)
        l_ff = layers.Add()([l_att, l_drop])
        l_embed = LayerNormalization()(l_ff)

    l_out = layers.TimeDistributed(layers.Dense(vocab_size,
                                                use_bias=False))(l_embed)

    mdl = tf.keras.Model([l_in, l_mask, l_dec, l_dmask], l_out)

    def masked_loss(y_true, y_pred):
        loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true,
                                                          logits=y_pred)
        mask = tf.cast(tf.not_equal(tf.reduce_sum(y_true, -1), 0), 'float32')
        loss = tf.reduce_sum(loss * mask, -1) / tf.reduce_sum(mask, -1)
        loss = K.mean(loss)
        return loss

    def masked_acc(y_true, y_pred):
        mask = tf.cast(tf.not_equal(tf.reduce_sum(y_true, -1), 0), 'float32')
        eq = K.cast(
            K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)),
            'float32')
        eq = tf.reduce_sum(eq * mask, -1) / tf.reduce_sum(mask, -1)
        eq = K.mean(eq)
        return eq

    mdl.compile(optimizer='adam',
                loss=masked_loss,
                metrics=['accuracy', masked_acc])

    mdl_enc = tf.keras.Model([l_in, l_mask], l_encoder)
    mdl_enc.compile(optimizer="adam", loss="categorical_crossentropy")

    #mdl.summary();

    return mdl, mdl_enc
Esempio n. 16
0
# Do async prefetching / buffering of the data for best performance on GPU.
train_ds = train_ds.cache().prefetch(buffer_size=10)
val_ds = val_ds.cache().prefetch(buffer_size=10)
test_ds = test_ds.cache().prefetch(buffer_size=10)

# A integer input for vocab indices.
inputs = tf.keras.Input(shape=(None, ), dtype="int64")

# Next, we add a layer to map those vocab indices into a space of dimensionality
# 'embedding_dim'.
x = layers.Embedding(max_features, embedding_dim)(inputs)
x = layers.Dropout(0.5)(x)

# Conv1D + global max pooling
x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = layers.GlobalMaxPooling1D()(x)

# We add a vanilla hidden layer:
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)

# We project onto a single unit output layer, and squash it with a sigmoid:
predictions = layers.Dense(1, activation="sigmoid", name="predictions")(x)

model = tf.keras.Model(inputs, predictions)

# Compile the model with binary crossentropy loss and an adam optimizer.
model.compile(loss="binary_crossentropy",
              optimizer="adam",
Esempio n. 17
0
def detection_NN_architecture(Tinputs):
    feature_name = "detection"
    x = layers.Conv1D(
        64,
        activation='relu',
        batch_input_shape=(None, _DATA_STREAM_LENGTH, 3),
        data_format='channels_last',
        kernel_size=10,
        strides=4,
        kernel_regularizer=tf.keras.regularizers.l2(l=0.001))(Tinputs)
    x = layers.Conv1D(64,
                      activation='relu',
                      kernel_size=10,
                      strides=2,
                      kernel_regularizer=tf.keras.regularizers.l2(l=0.001),
                      padding='valid')(x)
    # x = layers.Dropout(0.1)(x)
    x = layers.Conv1D(64,
                      activation='relu',
                      kernel_size=10,
                      strides=2,
                      kernel_regularizer=tf.keras.regularizers.l2(l=0.001),
                      padding='valid')(x)
    # x = layers.Dropout(0.1)(x)
    x = layers.Conv1D(64,
                      activation='relu',
                      kernel_size=10,
                      strides=2,
                      kernel_regularizer=tf.keras.regularizers.l2(l=0.001),
                      padding='valid')(x)
    # x = layers.Dropout(0.1)(x)
    x = layers.Conv1D(64,
                      activation='relu',
                      kernel_size=10,
                      strides=2,
                      kernel_regularizer=tf.keras.regularizers.l2(l=0.001),
                      padding='valid')(x)
    # x = layers.Dropout(0.1)(x)
    x = layers.Conv1D(64,
                      activation='relu',
                      kernel_size=10,
                      strides=2,
                      kernel_regularizer=tf.keras.regularizers.l2(l=0.001),
                      padding='valid')(x)
    # #x = layers.Dropout(0.1)(x)
    # x = layers.Conv1D(64, activation='relu', kernel_size=10, strides=2,
    #                   kernel_regularizer=tf.keras.regularizers.l2(l=0.001), padding='valid')(x)
    # #x = layers.Dropout(0.1)(x)
    # x = layers.Conv1D(32, activation='relu', kernel_size=5, strides=2,
    #                 kernel_regularizer=tf.keras.regularizers.l2(l=0.001), padding='valid')(x)
    # x = layers.Dropout(0.25)(x)
    # x = layers.Conv1D(32, activation='elu', kernel_size=3, strides=2,
    #                 kernel_regularizer=tf.keras.regularizers.l2(l=0.001), padding='valid')(x)
    x = layers.Flatten()(x)
    last = layers.Dense(
        64,
        activation='relu',
        kernel_regularizer=tf.keras.regularizers.l2(l=0.001))(x)
    output = layers.Dense(3,
                          activation='softmax',
                          name=(feature_name + '_out'))(last)

    return output
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
import numpy as np
from numpy.random import random_sample

# 网络搭建

image_input = tf.keras.Input(shape=(32, 32, 3), name="img_input")
timeseries_input = tf.keras.Input(shape=(20, 10), name="ts_input")

x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)

x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)

x = layers.concatenate([x1, x2])

score_output = layers.Dense(1, name="score_output")(x)
class_output = layers.Dense(5, name="class_output")(x)

# 模型(对象)构建

model = tf.keras.Model(inputs=[image_input, timeseries_input],
                       outputs=[score_output, class_output])
loss_score_object = losses.MeanSquaredError()
loss_class_object = losses.CategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
def build_1d_model(args):
    l2r = 1e-9

    T, X = tfkl.Input((N_TOKS,)), tfkl.Input((MAX_OBJS, 3 + N_OBJS))

    # print('T: ', T.shape)
    # print('X: ', X.shape)

    ti = tfkl.Embedding(N_VOCAB, N_EMBED, input_length=N_TOKS)(T)

    # print('ti :', ti.shape)

    th = tfkm.Sequential([
        tfkl.Bidirectional(tfkl.LSTM(128, return_sequences=True)),
        tfkl.Bidirectional(tfkl.LSTM(128, return_sequences=True)),
        tfkl.Conv1D(256, (1,), activation='elu', kernel_regularizer=tfkr.l2(l2r)),
        tfkl.Conv1D(6, (1,), activation=None, kernel_regularizer=tfkr.l2(l2r)),
        tfkl.Softmax(axis=-2, name='lstm_attn'),
    ], name='lstm_layers')(ti)

    # print('th: ', th.shape)

    tia = tfkb.sum(tfkl.Reshape((N_TOKS, 1, -1))(th) * tfkl.Reshape((N_TOKS, N_EMBED, 1))(ti), axis=-3)

    # print('tia: ', tia.shape)

    Xi = tfkb.sum(X[:, :, 3:], axis=-1, keepdims=True)

    # print('Xi: ', Xi.shape)

    s1 = tfkl.Dense(N_OBJS, activation='softmax')(tia[:, :, 0])
    s1b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, N_OBJS))])(s1)
    Xs1 = tfkb.sum(X[:, :, 3:] * s1b, axis=-1, keepdims=True)

    # print('s1: ', s1.shape)
    # print('s1b: ', s1b.shape)
    # print('Xs1: ', Xs1.shape)

    s2 = tfkl.Dense(3)(tia[:, :, 1])
    s2b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 3))])(s2)
    s2c = tfkb.sum(s2b * X[:, :, 2:3] - (1 - Xi) * 20, axis=-1, keepdims=True)
    Xs2 = tfkm.Sequential([tfkl.Reshape((-1, 1)), tfkl.Softmax(axis=-2), tfkl.Reshape((MAX_OBJS, 1))])(s2c)
    Xs2 = Xs2 - tfkb.max(Xs2, axis=[1, 2], keepdims=True)

    # print('Xs2: ', Xs2.shape)

    s3 = tfkl.Dense(N_OBJS, activation='softmax')(tia[:, :, 2])
    s3b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, N_OBJS))])(s3)
    Xs3 = tfkb.sum(X[:, :, 3:] * s3b, axis=-1, keepdims=True)

    s4 = tfkl.Dense(16, activation='softmax')(tia[:, :, 3])
    s4b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 16))])(s4)
    Xs4 = s4b * Xi

    # print('Xs4: ', Xs2.shape)

    s5 = tfkl.Dense(16, activation='softmax')(tia[:, :, 4])
    s5b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 16))])(s5)
    Xs5 = s5b * Xi

    s6 = tfkl.Dense(16, activation='softmax')(tia[:, :, 5])
    s6b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 16))])(s6)
    Xs6 = s6b * Xi

    xt = tfkl.concatenate([Xi, Xs1, Xs2, Xs3, Xs4, Xs5, Xs6], axis=-1)
    # print('xt: ', xt.shape)

    attn = fcnet(xt)
    # print('attn: ', attn.shape)
    Y = tfkb.sum(attn * X[:, :, :2], axis=[1])
    # print('Y: ', Y.shape)

    model = tfkm.Model(inputs=[T, X], outputs=[Y])

    def acc(y_pred, y_true):
        return tfkb.mean(tfkb.min(tfkb.cast((tfkb.abs(y_true-y_pred) < args.tol), 'float32'), axis=1))

    model.compile(tfk.optimizers.Adam(args.lr), 'mse', metrics=[acc])

    return model
Esempio n. 20
0
    def __init__(self, units_w1=2048, units_w2=512):
        super(FeedForward, self).__init__()

        self.W1 = layers.Conv1D(units_w1, kernel_size=1, activation='relu')
        self.W2 = layers.Conv1D(units_w2, kernel_size=1)
Esempio n. 21
0
    def DefineModel_textAndNumerics_VGG16x(self, oneshape: typing.List[int],
                                           numclasses: int) -> Model:
        # # 分叉 神经网络
        text_input = Input(shape=(oneshape[0], 1),
                           dtype=tf.float32,
                           name='text_input')

        text_bn = layers.BatchNormalization(axis=1)(text_input)

        conv1D_1 = layers.Conv1D(32,
                                 5,
                                 padding='same',
                                 activation=tf.nn.relu,
                                 name='conv1D_1')(text_bn)
        conv1D_2 = layers.Conv1D(64,
                                 5,
                                 padding='same',
                                 activation=tf.nn.relu,
                                 name='conv1D_2')(conv1D_1)
        # maxPooling_1 = layers.MaxPooling1D(5)(conv1D_2)
        dropout_1 = layers.Dropout(0.25)(conv1D_2)

        numerics_input = Input(shape=(oneshape[1], 1),
                               dtype=tf.float32,
                               name='numerics_input')

        numerics_bn = layers.BatchNormalization(axis=1)(numerics_input)

        conv1D_3 = layers.Conv1D(32,
                                 5,
                                 padding='same',
                                 activation=tf.nn.relu,
                                 name='conv1D_3')(numerics_bn)
        conv1D_4 = layers.Conv1D(64,
                                 5,
                                 padding='same',
                                 activation=tf.nn.relu,
                                 name='conv1D_4')(conv1D_3)
        # maxPooling_2 = layers.MaxPooling1D(5)(conv1D_4)
        dropout_2 = layers.Dropout(0.25, name='dropout_2')(conv1D_4)

        concatenate_1 = layers.concatenate([dropout_1, dropout_2], axis=1)

        bn_1 = layers.BatchNormalization(axis=1)(concatenate_1)

        conv1D_vgg_1_1 = layers.Conv1D(64,
                                       3,
                                       padding='same',
                                       activation=tf.nn.relu)(bn_1)
        conv1D_vgg_1_2 = layers.Conv1D(64,
                                       3,
                                       padding='same',
                                       activation=tf.nn.relu)(conv1D_vgg_1_1)
        # maxpooling1D_vgg_1_3 = layers.MaxPooling1D(3)(conv1D_vgg_1_2)

        conv1D_vgg_2_1 = layers.Conv1D(128,
                                       3,
                                       padding='same',
                                       activation=tf.nn.relu)(conv1D_vgg_1_2)
        conv1D_vgg_2_2 = layers.Conv1D(128,
                                       3,
                                       padding='same',
                                       activation=tf.nn.relu)(conv1D_vgg_2_1)
        # maxpooling1D_vgg_2_3 = layers.MaxPooling1D(3)(conv1D_vgg_2_2)

        conv1D_vgg_3_1 = layers.Conv1D(256,
                                       3,
                                       padding='same',
                                       activation=tf.nn.relu)(conv1D_vgg_2_2)
        conv1D_vgg_3_2 = layers.Conv1D(256,
                                       3,
                                       padding='same',
                                       activation=tf.nn.relu)(conv1D_vgg_3_1)
        conv1D_vgg_3_3 = layers.Conv1D(256,
                                       3,
                                       padding='same',
                                       activation=tf.nn.relu)(conv1D_vgg_3_2)
        # maxpooling1D_vgg_3_4 = layers.MaxPooling1D(3)(conv1D_vgg_3_3)

        # conv1D_vgg_4_1 = layers.Conv1D(512, 3, padding='same', activation=tf.nn.relu)(conv1D_vgg_3_3)
        # conv1D_vgg_4_2 = layers.Conv1D(512, 3, padding='same', activation=tf.nn.relu)(conv1D_vgg_4_1)
        # conv1D_vgg_4_3 = layers.Conv1D(512, 3, padding='same', activation=tf.nn.relu)(conv1D_vgg_4_2)
        # # maxpooling1D_vgg_4_4 = layers.MaxPooling1D(3)(conv1D_vgg_4_3)

        # conv1D_vgg_5_1 = layers.Conv1D(512, 3, padding='same', activation=tf.nn.relu)(conv1D_vgg_4_3)
        # conv1D_vgg_5_2 = layers.Conv1D(512, 3, padding='same', activation=tf.nn.relu)(conv1D_vgg_5_1)
        # conv1D_vgg_5_3 = layers.Conv1D(512, 3, padding='same', activation=tf.nn.relu)(conv1D_vgg_5_2)
        # maxpooling1D_vgg_5_4 = layers.MaxPooling1D(3)(conv1D_vgg_5_3)

        flatten_vgg_6_1 = layers.Flatten()(conv1D_vgg_3_3)
        # dense_vgg_6_2 = layers.Dense(int(2*(512+numclasses) / 3), activation=tf.nn.softmax)(flatten_vgg_6_1)
        # dense_vgg_6_3 = layers.Dense(int((512 + numclasses) / 3), activation=tf.nn.softmax)(dense_vgg_6_2)
        dense_vgg_6_4 = layers.Dense(numclasses,
                                     activation=tf.nn.softmax)(flatten_vgg_6_1)

        model = Model(inputs=[text_input, numerics_input],
                      outputs=[dense_vgg_6_4])

        return model
Esempio n. 22
0
def dnn_model():
    num_out = 6
    model = keras.Sequential()

    model.add(layers.Conv1D(128, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Conv1D(128, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Conv1D(128, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Conv1D(128, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.MaxPool1D(2))
    model.add(layers.Conv1D(96, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Conv1D(96, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Conv1D(96, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.MaxPool1D(2))
    # model.add(layers.Flatten())
    model.add(layers.Conv1D(64, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Conv1D(64, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.MaxPool1D(2))
    model.add(layers.Conv1D(32, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Conv1D(32, 1, input_shape=(26, 2), activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.2))
    model.add(layers.Dense(60, activation='relu'))
    model.add(layers.Dense(30, activation='relu'))
    model.add(layers.Dense(16, activation='relu'))
    model.add(layers.Dense(num_out, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
Esempio n. 23
0
def build_model(input_shape, type="conv", mode="sup"):
    layers = []
    if type == "conv":
        layers = [
            l.Conv1D(32,
                     10,
                     activation=gelu,
                     strides=1,
                     padding="same",
                     input_shape=input_shape),
            l.Dropout(0.5),
            # ta.layers.Maxout(32),
            l.Conv1D(64, 10, activation=gelu, padding="same", strides=1),
            l.BatchNormalization(),
            l.Dropout(0.5),
            l.LocallyConnected1D(128,
                                 1,
                                 activation=gelu,
                                 padding="same",
                                 implementation=2),
            l.BatchNormalization(),
            l.Conv1D(32,
                     10,
                     activation=gelu,
                     strides=1,
                     padding="same",
                     input_shape=input_shape),
            l.Dropout(0.5),
            # ta.layers.Maxout(32),
            l.Conv1D(64, 10, activation=gelu, padding="same", strides=5),
            l.BatchNormalization(),
            l.Dropout(0.5),
            l.LocallyConnected1D(128,
                                 1,
                                 activation=gelu,
                                 padding="same",
                                 implementation=2),
            l.Conv1D(32,
                     10,
                     activation=gelu,
                     strides=1,
                     padding="same",
                     input_shape=input_shape),
            l.Dropout(0.5),
            # ta.layers.Maxout(32),
            l.Conv1D(64, 10, activation=gelu, padding="same", strides=1),
            l.BatchNormalization(),
            l.Dropout(0.5),
            l.LocallyConnected1D(128,
                                 5,
                                 activation=gelu,
                                 padding="same",
                                 implementation=2),
            l.BatchNormalization(),
            l.BatchNormalization(),
            l.Flatten(),
            l.Dropout(0.4),
            l.Dense(7 * 7, activation="softmax")
        ]
    if type == "fc":
        layers = [
            l.InputLayer(input_shape, dtype=tf.float32),
            l.Reshape((91, )),
            l.Dense(512, activation="tanh", input_shape=(91, )),
            l.BatchNormalization(),
            l.Dropout(0.5),
            l.Dense(512, activation="tanh"),
            l.BatchNormalization(),
            l.Dropout(0.5),
            l.Dense(7 * 7, activation="softmax")
        ]

    model = tf.keras.Sequential(layers)
    if mode == "sup":
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.005),
                      loss='categorical_crossentropy',
                      metrics=[tf.keras.metrics.CategoricalAccuracy()])

    else:
        model.build(input_shape=input_shape)
    if DEBUG: model.summary()
    return model
Esempio n. 24
0
    def Conv1D(self):
        input_shape = (int(self.SR * self.DT), 1)
        i = get_melspectrogram_layer(input_shape=input_shape,
                                     n_mels=128,
                                     pad_end=True,
                                     n_fft=512,
                                     win_length=400,
                                     hop_length=160,
                                     sample_rate=self.SR,
                                     return_decibel=True,
                                     input_data_format='channels_last',
                                     output_data_format='channels_last')
        x = LayerNormalization(axis=2, name='batch_norm')(i.output)
        x = TimeDistributed(layers.Conv1D(8,
                                          kernel_size=(4),
                                          activation='tanh'),
                            name='td_conv_1d_tanh')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2), name='max_pool_2d_1')(x)
        x = TimeDistributed(layers.Conv1D(16,
                                          kernel_size=(4),
                                          activation='relu'),
                            name='td_conv_1d_relu_1')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2), name='max_pool_2d_2')(x)
        x = TimeDistributed(layers.Conv1D(32,
                                          kernel_size=(4),
                                          activation='relu'),
                            name='td_conv_1d_relu_2')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2), name='max_pool_2d_3')(x)
        x = TimeDistributed(layers.Conv1D(64,
                                          kernel_size=(4),
                                          activation='relu'),
                            name='td_conv_1d_relu_3')(x)
        x = layers.MaxPooling2D(pool_size=(2, 2), name='max_pool_2d_4')(x)
        x = TimeDistributed(layers.Conv1D(128,
                                          kernel_size=(4),
                                          activation='relu'),
                            name='td_conv_1d_relu_4')(x)
        x = layers.GlobalMaxPooling2D(name='global_max_pooling_2d')(x)
        x = layers.Dropout(rate=0.1, name='dropout')(x)
        x = layers.Dense(64,
                         activation='relu',
                         activity_regularizer=l2(0.001),
                         name='dense')(x)
        if self.N_CLASSES == 2:
            o = layers.Dense(1, activation='sigmoid', name='sigmoid')(x)
            self.model = Model(inputs=i.input,
                               outputs=o,
                               name='1d_convolution')
            self.model.compile(optimizer='adam',
                               loss='binary_crossentropy',
                               metrics=['accuracy'])
        else:
            o = layers.Dense(self.N_CLASSES,
                             activation='softmax',
                             name='softmax')(x)
            self.model = Model(inputs=i.input,
                               outputs=o,
                               name='1d_convolution')
            self.model.compile(optimizer='adam',
                               loss='categorical_crossentropy',
                               metrics=['accuracy'])

        return self.model
Esempio n. 25
0
    def __init__(self):
        super(MyRnn, self).__init__()
        self.hidden_dim = hidden_dim
        self.projection1 = layers.Dense(units=hidden_dim, activation='relu')
        self.projection2 = layers.Dense(units=hidden_dim, activation='relu')
        self.classifier = layers.Dense(1, activation='sigmoid')

    def call(self, inputs):
        outs = []
        states = tf.zeros(shape=[inputs.shape[0], self.hidden_dim])
        for t in range(inputs.shape[1]):
            x = inputs[:, t, :]
            h = self.projection1(x)
            y = h + self.projection2(states)
            states = y
            outs.append(y)
        # print(outs)
        features = tf.stack(outs, axis=1)
        print(features.shape)
        return self.classifier(features)


# 构建网络
inputs = keras.Input(batch_shape=(batch_size, time_step, inputs_dim))
x = layers.Conv1D(32, 3)(inputs)
print(x.shape)
outputs = MyRnn()(x)
model = keras.Model(inputs, outputs)

rnn_model = MyRnn()
_ = rnn_model(tf.zeros((1, 10, 5)))
    def createModel(self, generateGraph=False):
        """
        Creates a brand new neural network for this model.
        """
        # Should go over minutes, not seconds
        input_layer = layers.Input(shape=(SAMPLES_OF_DATA_TO_LOOK_AT,
                                          self._numberOfInputChannels))
        # print(input_layer.shape)
        # layer = tf.transpose(input_layer, [0, 2, 1])
        print(input_layer.shape)
        layer = layers.Conv1D(
            filters=64,
            kernel_size=9,
            activation='relu',
            input_shape=(SAMPLES_OF_DATA_TO_LOOK_AT,
                         self._numberOfInputChannels),
            kernel_regularizer=regularizers.l2(
                self.hyperparameters.regularization))(input_layer)
        layer = tf.transpose(layer, [0, 2, 1])
        # default number of units: layer.shape[2]
        print(layer.shape)
        forward_lstm = tf.keras.layers.LSTM(
            layer.shape[1],
            return_sequences=True,
            kernel_regularizer=regularizers.l2(
                self.hyperparameters.regularization))
        backward_lstm = tf.keras.layers.LSTM(
            layer.shape[1],
            activation='relu',
            return_sequences=True,
            go_backwards=True,
            kernel_regularizer=regularizers.l2(
                self.hyperparameters.regularization))
        layer = tf.keras.layers.Bidirectional(forward_lstm,
                                              backward_layer=backward_lstm,
                                              input_shape=layer.shape)(layer)
        print(layer.shape)
        layer = layers.Flatten()(layer)
        print(layer.shape)
        layer = layers.Dense(60,
                             activation='relu',
                             kernel_regularizer=regularizers.l2(
                                 self.hyperparameters.regularization))(layer)
        layer = tf.keras.layers.Dropout(self.hyperparameters.dropout)(layer)
        layer = layers.Dense(10,
                             activation='relu',
                             kernel_regularizer=regularizers.l2(
                                 self.hyperparameters.regularization))(layer)
        layer = tf.keras.layers.Dropout(self.hyperparameters.dropout)(layer)
        output = layers.Dense(1,
                              activation='sigmoid',
                              name="output",
                              kernel_regularizer=regularizers.l2(
                                  self.hyperparameters.regularization))(layer)
        self.model = tf.keras.Model(input_layer, outputs=output)

        if self.binary:
            # This compiles the model if we are using binary prediction.
            self.model.compile(loss="binary_crossentropy",
                               optimizer=tf.keras.optimizers.Adam(
                                   lr=self.hyperparameters.learningRate),
                               metrics=self.listOfMetrics)
        else:
            # This compiles the model if we are using percentage prediction.
            self.model.compile(loss="mean_squared_error",
                               optimizer=tf.keras.optimizers.Adam(
                                   lr=self.hyperparameters.learningRate),
                               metrics=self.listOfMetrics)

        if generateGraph:
            tf.keras.utils.plot_model(self.model,
                                      "crypto_model.png",
                                      show_shapes=True)
Esempio n. 27
0
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']

max_features = 10000  # 特征单词的个数
maxlen = 500  # 在这么多单词之后截断文本

# 加载数据
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# 填充序列
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
# x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

model = Sequential()
model.add(layers.Embedding(max_features, 128, input_length=maxlen))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))

model.summary()

model.compile(optimizer=RMSprop(lr=1e-4),
              loss='binary_crossentropy',
              metrics=['acc'])
history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=128,
                    validation_split=0.2)
def buildNetwork():

    unfreeze = False

    l_in = layers.Input(shape=(None, ))
    l_mask = layers.Input(shape=(None, ))

    l_ymask = []
    for i in range(len(props)):
        l_ymask.append(layers.Input(shape=(1, )))

    #transformer part
    #positional encodings for product and reagents, respectively
    l_pos = PositionLayer(EMBEDDING_SIZE)(l_mask)
    l_left_mask = MaskLayerLeft()(l_mask)

    #encoder
    l_voc = layers.Embedding(input_dim=vocab_size,
                             output_dim=EMBEDDING_SIZE,
                             input_length=None,
                             trainable=unfreeze)
    l_embed = layers.Add()([l_voc(l_in), l_pos])

    for layer in range(n_block):

        #self attention
        l_o = [
            SelfLayer(EMBEDDING_SIZE, KEY_SIZE, trainable=unfreeze)(
                [l_embed, l_embed, l_embed, l_left_mask])
            for i in range(n_self)
        ]

        l_con = layers.Concatenate()(l_o)
        l_dense = layers.TimeDistributed(layers.Dense(EMBEDDING_SIZE,
                                                      trainable=unfreeze),
                                         trainable=unfreeze)(l_con)
        if unfreeze == True: l_dense = layers.Dropout(rate=0.1)(l_dense)
        l_add = layers.Add()([l_dense, l_embed])
        l_att = LayerNormalization(trainable=unfreeze)(l_add)

        #position-wise
        l_c1 = layers.Conv1D(N_HIDDEN,
                             1,
                             activation='relu',
                             trainable=unfreeze)(l_att)
        l_c2 = layers.Conv1D(EMBEDDING_SIZE, 1, trainable=unfreeze)(l_c1)
        if unfreeze == True: l_c2 = layers.Dropout(rate=0.1)(l_c2)
        l_ff = layers.Add()([l_att, l_c2])
        l_embed = LayerNormalization(trainable=unfreeze)(l_ff)

    #end of Transformer's part
    l_encoder = l_embed

    #text-cnn part
    #https://github.com/deepchem/deepchem/blob/b7a6d3d759145d238eb8abaf76183e9dbd7b683c/deepchem/models/tensorgraph/models/text_cnn.py

    l_in2 = layers.Input(shape=(None, EMBEDDING_SIZE))

    kernel_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
    num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]

    l_pool = []
    for i in range(len(kernel_sizes)):
        l_conv = layers.Conv1D(num_filters[i],
                               kernel_size=kernel_sizes[i],
                               padding='valid',
                               kernel_initializer='normal',
                               activation='relu')(l_in2)
        l_maxpool = layers.Lambda(lambda x: tf.reduce_max(x, axis=1))(l_conv)
        l_pool.append(l_maxpool)

    l_cnn = layers.Concatenate(axis=1)(l_pool)
    l_cnn_drop = layers.Dropout(rate=0.25)(l_cnn)

    #dense part
    l_dense = layers.Dense(N_HIDDEN_CNN, activation='relu')(l_cnn_drop)

    #https://github.com/ParikhKadam/Highway-Layer-Keras
    transform_gate = layers.Dense(
        units=N_HIDDEN_CNN,
        activation="sigmoid",
        bias_initializer=tf.keras.initializers.Constant(-1))(l_dense)

    carry_gate = layers.Lambda(lambda x: 1.0 - x,
                               output_shape=(N_HIDDEN_CNN, ))(transform_gate)
    transformed_data = layers.Dense(units=N_HIDDEN_CNN,
                                    activation="relu")(l_dense)
    transformed_gated = layers.Multiply()([transform_gate, transformed_data])
    identity_gated = layers.Multiply()([carry_gate, l_dense])

    l_highway = layers.Add()([transformed_gated, identity_gated])

    #Because of multitask we have here a few different outputs and a custom loss.

    def mse_loss(prop):
        def loss(y_true, y_pred):
            y2 = y_true * l_ymask[prop] + y_pred * (1 - l_ymask[prop])
            return tf.keras.losses.mse(y2, y_pred)

        return loss

    def binary_loss(prop):
        def loss(y_true, y_pred):
            y_pred = tf.clip_by_value(y_pred, K.epsilon(), 1.0 - K.epsilon())
            r = y_true * K.log(y_pred) + (1.0 - y_true) * K.log(1.0 - y_pred)
            r = -tf.reduce_mean(r * l_ymask[prop])
            return r

        return loss

    l_out = []
    losses = []
    for prop in props:
        if props[prop][2] == "regression":
            l_out.append(
                layers.Dense(1,
                             activation='linear',
                             name="Regression-" + props[prop][1])(l_highway))
            losses.append(mse_loss(prop))
        else:
            l_out.append(
                layers.Dense(1,
                             activation='sigmoid',
                             name="Classification-" +
                             props[prop][1])(l_highway))
            losses.append(binary_loss(prop))

    l_input = [l_in2]
    l_input.extend(l_ymask)

    mdl = tf.keras.Model(l_input, l_out)
    mdl.compile(optimizer='adam', loss=losses)

    #mdl.summary();

    K.set_value(mdl.optimizer.lr, 1.0e-4)

    #so far we do not train the encoder part of the model.
    encoder = tf.keras.Model([l_in, l_mask], l_encoder)
    encoder.compile(optimizer='adam', loss='mse')
    encoder.set_weights(np.load("embeddings.npy", allow_pickle=True))

    #encoder.summary();

    return mdl, encoder
Esempio n. 29
0
def DenseNet(number,
             include_top=True,
             weights='hasc',
             input_shape=None,
             pooling=None,
             classes=6,
             classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256 * 3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    # number of blocks
    if number == 121:
        blocks = [6, 12, 24, 16]
    elif number == 169:
        blocks = [6, 12, 32, 32]
    elif number == 201:
        blocks = [6, 12, 48, 32]
    else:
        raise ValueError('`number` should be 121, 169 or 201')

    inputs = layers.Input(shape=input_shape)

    x = layers.ZeroPadding1D(padding=(3, 3))(inputs)
    x = layers.Conv1D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(epsilon=1.001e-5, name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding1D(padding=(1, 1))(x)
    x = layers.MaxPooling1D(3, strides=2, name='pool1')(x)

    x = DenseBlock(blocks[0], name='conv2')(x)
    x = TransitionBlock(0.5, name='pool2')(x)
    x = DenseBlock(blocks[1], name='conv3')(x)
    x = TransitionBlock(0.5, name='pool3')(x)
    x = DenseBlock(blocks[2], name='conv4')(x)
    x = TransitionBlock(0.5, name='pool4')(x)
    x = DenseBlock(blocks[3], name='conv5')(x)

    x = layers.BatchNormalization(epsilon=1.001e-5, name='bn')(x)
    x = layers.Activation('relu', name='relu')(x)

    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes,
                     activation=classifier_activation,
                     name='predictions')(x)

    # Create model.
    model_ = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/densenet{}/densenet{}_hasc_weights_{}_{}.hdf5'.format(
                number, number, int(input_shape[0]), int(input_shape[1]))
        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model_.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model_ = Model(inputs=model_.input,
                           outputs=model_.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model_.layers[-3].output)
            model_ = Model(inputs=model_.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model_.layers[-3].output)
            model_ = Model(inputs=model_.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model_ = Model(inputs=model_.input,
                           outputs=model_.layers[-3].output)

    return model_
    print("test data done")

    train_data = Dataset.from_tensor_slices((Xtrain, Ytrain[:train_size]))
    test_data = Dataset.from_tensor_slices((Xtest, Ytest[:test_size]))
    train_data = train_data.batch(batch_size).repeat()
    test_data = test_data.batch(batch_size).repeat()
    readin_time = time() - t0
    print(train_data.output_types)
    print(train_data.output_shapes)
    print('read in data time: %0.3fs' % readin_time)

    # 构建卷积神经网络模型
    model = tf.keras.Sequential()
    model.add(
        layers.Conv1D(input_shape=(200, 100),
                      filters=64,
                      kernel_size=5,
                      activation='relu'))
    model.add(layers.GlobalMaxPool1D())
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dropout(rate=drop_prob))
    model.add(layers.Dense(2, activation='softmax'))
    model.compile(optimizer=tf.keras.optimizers.Adam(lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    callbacks = [tf.keras.callbacks.TensorBoard(log_dir='./logs')]
    # 训练
    model.fit(train_data,
              epochs=epochs,
              steps_per_epoch=20,
              callbacks=callbacks)
    # 测试