Beispiel #1
0
def decoder(*, previous_layer, encoder, dropout_rate, batch_normalization):
    """Return decoder layer for U-Net model."""
    unconvolution_layer = layers.Conv2DTranspose(
        filters=encoder.shape[3] // 2,
        kernel_size=(2, 2),
        strides=(2, 2),
        padding="same",
    )(previous_layer)
    unconvolution_layer = layers.concatenate([unconvolution_layer, encoder])
    unconvolution_layer = layers.Conv2D(
        filters=encoder.shape[3],
        kernel_size=(3, 3),
        activation=activations.relu,
        kernel_initializer=initializers.he_normal(),
        padding="same")(unconvolution_layer)
    if batch_normalization:
        unconvolution_layer = layers.BatchNormalization()(unconvolution_layer)
    if dropout_rate:
        unconvolution_layer = layers.Dropout(dropout_rate)(unconvolution_layer)
    unconvolution_layer = layers.Conv2D(
        filters=encoder.shape[3],
        kernel_size=(3, 3),
        activation=activations.relu,
        kernel_initializer=initializers.he_normal(),
        padding="same")(unconvolution_layer)
    if batch_normalization:
        return layers.BatchNormalization()(unconvolution_layer)
    else:
        return unconvolution_layer
Beispiel #2
0
    def build(self):
        new_embed_dim = 100 
        glove_dim = 100 + 50
        n_words = 51
        return_seq = False

        self.embedding_dim_1, self.embedding_dim_2 = self.embedding_dim

        glove_embedding_input = Input(shape=(n_words, glove_dim))

        new_embedding = Dense(new_embed_dim, kernel_initializer=he_normal(seed=11), activation=None)(glove_embedding_input[0])
        
        sensor_input = Input(shape=(self.seq_length, self.embedding_dim_1))
        out_2 = self.shared_module(sensor_input, return_seq)

        search_input = Input(shape=(self.seq_length, self.embedding_dim_2))
        batch_seq = tf.reshape(search_input, [-1, self.embedding_dim_2])
        batch_new_seq = tf.matmul(batch_seq, new_embedding)
        new_search_seq = tf.reshape(batch_new_seq, [-1, self.seq_length, new_embed_dim])

        out_1 = self.shared_module(new_search_seq, return_seq)
        merged_layer = concatenate([out_1, out_2])

        out = Dense(1, activation='sigmoid', kernel_initializer=he_normal(seed=1))(
            merged_layer)

        model = keras_Model(inputs=[glove_embedding_input, sensor_input, search_input], outputs=out)
        return model
def vgg_fc_model(after_rois):
    Xvis = Flatten()(after_rois)
    Xvis = Dense(units=4096,
                 input_shape=(25088, ),
                 kernel_initializer=initializers.he_normal())(Xvis)
    Xvis = Dense(units=4096, kernel_initializer=initializers.he_normal())(Xvis)
    return Xvis
Beispiel #4
0
def fitnessLearnModel(max_len,
                      features,
                      lr,
                      cells=32,
                      regularization_base=2e-6):
    inp = Input(shape=(max_len, features), name='fitnessModel_inputs1')
    inp2 = Input(shape=(max_len, features), name='fitnessModel_inputs2')

    mult = Multiply()([inp, inp2])

    mask = Masking(0.0)(mult)
    lstm_Layer = LSTM(cells,
                      activation='relu',
                      return_sequences=False,
                      kernel_initializer=he_normal(24353),
                      name='fitnessModel_lstm1',
                      return_state=False,
                      recurrent_regularizer=l1_l2(regularization_base / 20,
                                                  regularization_base / 20),
                      kernel_regularizer=l1_l2(regularization_base,
                                               regularization_base),
                      bias_regularizer=l1_l2(regularization_base * 2,
                                             regularization_base * 2))

    lstm_out = lstm_Layer(mask)
    out = Dense(1,
                activation='linear',
                kernel_initializer=he_normal(53436),
                name='fitnessModel_denseOut')(lstm_out)

    fitnessModel = Model(inputs=[inp, inp2], outputs=out)
    fitnessModel.compile(optimizer=Adam(lr, clipnorm=1.0, clipvalue=0.5),
                         loss='mse')
    return fitnessModel
Beispiel #5
0
def convolutional_block_small(X, f, filters, stage, block, s=2):
    """
    Implementation of the small convolutional block 
    
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    f -- integer, specifying the shape of the CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    s -- Integer, specifying the stride to be used
    
    Returns:
    X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value
    X_shortcut = X

    ##### MAIN PATH #####
    # First component of main path
    X = Conv2D(filters=F1,
               kernel_size=(f, f),
               strides=(s, s),
               name=conv_name_base + '2a',
               kernel_initializer=he_normal(seed=None))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=he_normal(seed=None))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)

    ##### SHORTCUT PATH ####
    X_shortcut = Conv2D(filters=F2,
                        kernel_size=(3, 3),
                        strides=(s, s),
                        padding='valid',
                        name=conv_name_base + '1',
                        kernel_initializer=he_normal(seed=None))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + '1')(X_shortcut)

    # Final step: Add shortcut value to main path, and pass it through a RELU activation
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
Beispiel #6
0
def make_rnns(return_sequences, return_state, go_backwards=False):
    seed = 1
    kernel_initializer = initializers.glorot_uniform(seed)
    recurrent_initializer = initializers.he_normal(seed)
    bias_initializer = initializers.he_normal(seed)

    kwargs = dict(units=3,
                  input_shape=(None, 5),
                  activation='tanh',
                  recurrent_activation='sigmoid',
                  kernel_initializer=kernel_initializer,
                  recurrent_initializer=recurrent_initializer,
                  bias_initializer=bias_initializer,
                  return_sequences=return_sequences,
                  return_state=return_state)

    if go_backwards:
        kwargs.update(dict(direction=Direction(-1)))

    rnn = MDGRU(**kwargs)
    keras_rnn = tf.keras.layers.GRU(
        units=3,
        activation='tanh',
        recurrent_activation='sigmoid',
        implementation=1,
        kernel_initializer=kernel_initializer,
        recurrent_initializer=recurrent_initializer,
        bias_initializer=bias_initializer,
        return_sequences=return_sequences,
        reset_after=False,
        return_state=return_state,
        go_backwards=go_backwards)
    return rnn, keras_rnn
def get_model(num_users, num_items, layers=[20, 10], reg_layers=[0, 0]):
    assert len(layers) == len(reg_layers)
    # number of layers  in MLP
    num_layer = len(layers)
    # Input variables
    user_input = Input(shape=(1,), dtype='int32', name='user_input')
    item_input = Input(shape=(1,), dtype='int32', name='item_input')

    MLP_Embedding_User = Embedding(input_dim=num_users, output_dim=layers[0] // 2, name='user_embedding',
                                   embeddings_initializer=initializers.he_normal(),
                                   embeddings_regularizer=l2(reg_layers[0]),
                                   input_length=1)
    MLP_Embedding_Item = Embedding(input_dim=num_items, output_dim=layers[0] // 2, name='item_embedding',
                                   embeddings_initializer=initializers.he_normal(),
                                   embeddings_regularizer=l2(reg_layers[1]),
                                   input_length=1)
    # Crucial to flatten an embedding vector!
    user_latent = Flatten()(MLP_Embedding_User(user_input))
    item_latent = Flatten()(MLP_Embedding_Item(item_input))
    # The 0-th layer is the concatenation of embedding layers
    vector = tf.concat([user_latent, item_latent], axis=1)

    # MLP layers
    for idx in range(1, num_layer):
        vector = Dense(layers[idx], activation='sigmoid', kernel_initializer='lecun_uniform', name="layer%d" % idx)(
            vector)

    # Final prediction layer
    prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(vector)

    model = Model(inputs=[user_input, item_input],
                  outputs=prediction)

    return model
Beispiel #8
0
 def _build_network(self):
     network = Sequential()
     network.add(Dense(LAYERS, activation='relu', kernel_initializer=he_normal()))
     network.add(Dense(LAYERS, activation='relu', kernel_initializer=he_normal()))
     network.add(Dense(self._action_size))
     
     return network
Beispiel #9
0
    def build_ResNet_model(self):
        inputs = Input(shape=(self.state_size, ))
        h1 = Dense(64,
                   activation="relu",
                   kernel_initializer=he_normal(seed=247))(inputs)  #h1
        h2 = Dense(64,
                   activation="relu",
                   kernel_initializer=he_normal(seed=2407))(h1)  #h2

        # h3 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=2403))(h2) #h3
        # h4 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=24457))(h3) #h4
        # add1 = Add()([h4, h2])

        # h5 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=24657))(add1) #h5
        # h6 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=27567))(h5) #h6
        # add2 = Add()([h6, add1])

        # h7 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=24657))(add2) #h5
        # h8 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=27567))(h7) #h6
        # add3 = Add()([h7, add2])

        outputs = Dense(self.n_actions * self.n_nodes,
                        kernel_initializer=he_normal(seed=27))(h2)
        model = Model(inputs=inputs, outputs=outputs)
        model.compile(loss='mse', optimizer='rmsprop')
        return model
    def build(self):
        new_embedding_dim = 100
        word_embedding_dim = 50 + 100
        n_words = 51
        return_seq = False

        # Model inputs.
        glove_embedding_input = Input(shape=(n_words, word_embedding_dim))
        search_input = Input(shape=(self.seq_length, self.embedding_dim))

        # Transform search interest data to incorporate term embeddings.
        new_embedding = Dense(new_embedding_dim,
                              kernel_initializer=he_normal(seed=11),
                              activation='relu')(glove_embedding_input[0])
        batch_seq = tf.reshape(search_input, [-1, self.embedding_dim])
        batch_new_seq = tf.matmul(batch_seq, new_embedding)
        batch_new_seq = Dense(new_embedding_dim,
                              kernel_initializer=he_normal(seed=11),
                              activation='relu')(batch_new_seq)
        new_search_seq = tf.reshape(batch_new_seq,
                                    [-1, self.seq_length, new_embedding_dim])

        net = self.shared_module(new_search_seq, return_seq)

        out = Dense(1,
                    activation='sigmoid',
                    kernel_initializer=he_normal(seed=1))(net)

        model = keras_Model(inputs=[glove_embedding_input, search_input],
                            outputs=out)

        return model
Beispiel #11
0
def fusing_model(model_typ, Xvis, Xp):
    if model_typ == 'vgg':
        vis_dim = 4096
    elif model_typ == 'densenet':
        vis_dim = 1024

    dense_Xvis = Dense(units=1000,
                       input_shape=(vis_dim, ),
                       kernel_initializer=initializers.he_normal())
    dense_Xfuse_v = Dense(units=1000,
                          input_shape=(1000, ),
                          kernel_initializer=initializers.he_normal())
    dense_Xfuse_p = Dense(units=1000,
                          input_shape=(300, ),
                          kernel_initializer=initializers.he_normal())
    dense_fuse = Dense(units=300,
                       input_shape=(1000, ),
                       kernel_initializer=initializers.he_normal())

    Xvis = Activation('relu')(BatchNormalization()((dense_Xvis(Xvis))))
    fuse = Add()([dense_Xfuse_v(Xvis), dense_Xfuse_p(Xp)])

    fuse = Activation('relu')(BatchNormalization()(fuse))
    fuse = Activation('relu')(BatchNormalization()(dense_fuse(fuse)))
    return fuse
Beispiel #12
0
def make_autoencoder(data, lr=0.001, enc_dim=100):
    # Auto encoder layers
    ae0 = Input(shape=products_shape, name='FeaturesInput')
    encode = Dense(enc_dim,
                   activation='relu',
                   kernel_initializer=he_normal(1),
                   name='AE_feature_reduction')(ae0)
    decode = Dense(products_shape[0], activation='relu', name='AE_3')(encode)

    # inspired by https://www.frontiersin.org/articles/10.3389/fgene.2018.00585/full
    # clustering layers (will work with the help of OPTICS)
    # we want to find the probability of one product to be in 1 of total found clusters
    opt = OPTICS()
    opt.fit(minmax.fit_transform(data))
    clusters = len(np.unique(opt.labels_))
    print('Optimal number of cluster:', clusters)
    prob0 = Dense(enc_dim // 2,
                  activation='relu',
                  kernel_initializer=he_normal(1))(encode)
    prob1 = BatchNormalization()(prob0)
    prob = Dense(clusters, activation='softmax',
                 name='Probability_Product')(prob1)

    autoencoder_ = Model(inputs=ae0, outputs=decode)
    encoder_ = Model(inputs=ae0, outputs=encode)
    p_prob = Model(inputs=ae0, outputs=prob)

    autoencoder_.compile(optimizer=Adam(learning_rate=lr),
                         loss='mae',
                         metrics=['mse'])

    return autoencoder_, encoder_, p_prob, opt
Beispiel #13
0
def identity_block_large(X, f, filters, stage, block):
    """
    Implementation of the large identity block 
    
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    f -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    
    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value. You'll need this later to add back to the main path.
    X_shortcut = X

    # First component of main path
    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=he_normal(seed=None))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path (≈3 lines)
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=he_normal(seed=None))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    # Third component of main path (≈2 lines)
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=he_normal(seed=None))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
def Phrase_Projection_Net(Xp):
    fuse_p = Dense(1000, kernel_initializer=initializers.he_normal())
    fuse = Dense(300, kernel_initializer=initializers.he_normal())

    x = Activation('relu')(BatchNormalization()(fuse_p(Xp)))
    x = Activation('relu')(BatchNormalization()(fuse(x)))

    return x
    def __init__(self, filters, kernel_size, slope=0.2):
        super(DNetEncoderBlock, self).__init__()

        self.block = Sequential()
        self.block.add(Conv2D(filters, kernel_size=kernel_size, kernel_initializer=he_normal(seed=10000), padding="SAME"))
        self.block.add(LeakyReLU(alpha=slope))
        self.block.add(Conv2D(filters, kernel_size=kernel_size, kernel_initializer=he_normal(seed=10000), padding="SAME"))
        self.block.add(LeakyReLU(alpha=slope))
Beispiel #16
0
def multimodal_gate_model(Xv, Xp):
    dense_phr = Dense(300, kernel_initializer=initializers.he_normal())
    dense_vis = Dense(300, kernel_initializer=initializers.he_normal())

    x = Concatenate()([Xv, Xp])
    g_phr = Activation('sigmoid')(dense_phr(x))
    g_vis = Activation('sigmoid')(dense_vis(x))
    return g_phr, g_vis
Beispiel #17
0
def createResNetV1(inputShape=(128, 128, 3), numClasses=3):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Input')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=16,
                 numBlocks=7,
                 downsampleOnFirst=False,
                 names='Stg1')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=32,
                 numBlocks=7,
                 downsampleOnFirst=True,
                 names='Stg2')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=64,
                 numBlocks=7,
                 downsampleOnFirst=True,
                 names='Stg3')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=128,
                 numBlocks=7,
                 downsampleOnFirst=True,
                 names='Stg4')

    v = Dropout(0.2)(v)

    v = AveragePooling2D(pool_size=8, name='AvgPool')(v)

    v = Dropout(0.2)(v)

    v = Flatten()(v)

    v = Dense(256, activation='relu', kernel_initializer=he_normal(33))(v)

    v = Dropout(0.2)(v)

    outputs = Dense(numClasses,
                    activation='softmax',
                    kernel_initializer=he_normal(33))(v)

    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optmz,
                  metrics=['accuracy'])

    return model
Beispiel #18
0
    def build_ResNet_model(self):
        inputs = Input(shape=(self.state_size, ))
        h1 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=247))(inputs) #h1
        h2 = Dense(64, activation="relu", kernel_initializer=he_normal(seed=2407))(h1) #h2

        outputs =  Dense(self.n_actions*self.n_nodes, kernel_initializer=he_normal(seed=27))(h2)
        model = Model(inputs=inputs, outputs=outputs)
        model.compile(loss='mse', optimizer='rmsprop')
        return model
def expand_convo(inputs: tf.Tensor, filters: int, multiplier: int, is_training: bool, stride: int) -> tf.Tensor:
    x = convo_bn(inputs, filters * multiplier, 3, is_training, stride)
    x = tf.nn.relu(x)
    x = tf.layers.conv2d(x, filters * multiplier, 3, strides=(1, 1), padding="same", use_bias=False,
                         kernel_initializer=he_normal(), kernel_regularizer=l2(weight_regularizer))

    skip = tf.layers.conv2d(inputs, filters * multiplier, 1, strides=(stride, stride), padding="same", use_bias=False,
                            kernel_initializer=he_normal(), kernel_regularizer=l2(weight_regularizer))
    return x + skip
Beispiel #20
0
def construct_actor_network(bandits):
    """Construct the actor network with mu and sigma as output"""
    inputs = layers.Input(shape=(1,)) #input dimension
    hidden1 = layers.Dense(5, activation="relu",kernel_initializer=initializers.he_normal())(inputs)
    hidden2 = layers.Dense(5, activation="relu",kernel_initializer=initializers.he_normal())(hidden1)
    probabilities = layers.Dense(len(bandits), kernel_initializer=initializers.Ones(),activation="softmax")(hidden2)

    actor_network = keras.Model(inputs=inputs, outputs=[probabilities]) 
    
    return actor_network
def _residual_block(layer, n_out_channels, stride=1, nonlinearity='relu'):
    """Crea un bloque residual de la red.

    :param layer: Capa de anterior al bloque residual a crear.
    :param n_out_channels: Número de filtros deseados para las convoluciones realizadas en el bloque.
    :param stride: Stride para la primera convolución realizada y en el caso de ser mayor a 1, el usado en un primer AveragePooling.
    :param nonlinearity: No linealidad aplicada a las salidas de las BatchNormalization aplicadas.
    :return: La última capa del bloque residual.
    """
    conv = layer
    if stride > 1:
        # padding: https://stackoverflow.com/a/47213171
        layer = AveragePooling2D(pool_size=1, strides=stride,
                                 padding="same")(layer)
    # Si no hay concordancia de dimensiones entre las capas se hace un padding con ceros
    if (n_out_channels != int(layer.get_shape()[-1])):
        diff = n_out_channels - int(layer.get_shape()[-1])
        diff_2 = int(diff / 2)
        if diff % 2 == 0:
            width_tp = ((0, 0), (diff_2, diff_2))
        else:
            width_tp = ((0, 0), ((diff_2) + 1, diff_2))
        # Para que el pad se haga en la dimension correcta, al no poder seleccionar
        # como en lasagne batch_ndim, se usa data_format='channels_last'
        layer = ZeroPadding2D(padding=(width_tp),
                              data_format='channels_first')(layer)
    conv = Conv2D(filters=n_out_channels,
                  kernel_size=(3, 3),
                  strides=(stride, stride),
                  padding='same',
                  activation='linear',
                  kernel_initializer=he_normal(seed=1),
                  bias_initializer=Constant(0.),
                  kernel_regularizer=l2(1e-4),
                  bias_regularizer=l2(1e-4))(conv)
    conv = BatchNormalization(beta_initializer=Constant(0.),
                              gamma_initializer=Constant(1.),
                              beta_regularizer=l2(1e-4),
                              gamma_regularizer=l2(1e-4))(conv)
    conv = Activation(nonlinearity)(conv)
    conv = Conv2D(filters=n_out_channels,
                  kernel_size=(3, 3),
                  strides=(1, 1),
                  padding='same',
                  activation='linear',
                  kernel_initializer=he_normal(seed=1),
                  bias_initializer=Constant(0.),
                  kernel_regularizer=l2(1e-4),
                  bias_regularizer=l2(1e-4))(conv)
    conv = BatchNormalization(beta_initializer=Constant(0.),
                              gamma_initializer=Constant(1.),
                              beta_regularizer=l2(1e-4),
                              gamma_regularizer=l2(1e-4))(conv)
    sum_ = Add()([conv, layer])
    return Activation(nonlinearity)(sum_)
def make_ae_rnn(lr=0.001, enc_dim=200):
    # Auto encoder layers
    # TODO allow the use of generator
    ae0 = Input(shape=(prod_features, ), name='FeaturesInput')
    encode0 = Dense(enc_dim,
                    activation='relu',
                    kernel_initializer=he_normal(1))(ae0)
    encode = Dense(enc_dim,
                   activation='relu',
                   kernel_initializer=he_normal(1),
                   name='AE_feature_reduction')(encode0)
    decode0 = Dense(enc_dim,
                    activation='relu',
                    kernel_initializer=he_normal(1))(encode)
    decode = Dense(prod_features, activation='relu', name='AE_3')(decode0)
    shape_re = Reshape((train_prod_feat.shape[0], enc_dim))(encode)
    perm = Permute((2, 1))(shape_re)

    # Simple RNN layers
    # inspired by https://dlpm2016.fbk.eu/docs/esteban_combining.pdf,
    # https://stackoverflow.com/questions/52474403/keras-time-series-suggestion-for-including-static-and-dynamic-variables-in-lstm,
    # https://blog.nirida.ai/predicting-e-commerce-consumer-behavior-using-recurrent-neural-networks-36e37f1aed22
    # https://www.affineanalytics.com/blog/new-product-forecasting-using-deep-learning-a-unique-way/
    # https://lilianweng.github.io/lil-log/2017/07/22/predict-stock-prices-using-RNN-part-2.html
    n_neurons = length  # we want the model to predict with length of output == to length of timesteps inputted
    seq_input = Input(
        shape=(length / step,
               train_ts.shape[-1]))  # Shape: (timesteps, data dimensions)
    concat0 = Concatenate(axis=1)([perm, seq_input])
    # the number of units is the number of sequential months to predict
    rnn0 = SimpleRNN(n_neurons, activation='tanh',
                     return_sequences=True)(concat0)
    out = TimeDistributed(Dense(train_ts.shape[-1]))(rnn0)

    encoder_ = Model(inputs=ae0, outputs=encode)
    autoencoder_ = Model(inputs=ae0, outputs=decode)
    autoencoder_.compile(optimizer=Adam(learning_rate=lr),
                         loss='mae',
                         metrics=['mse', 'cosine_similarity'])

    model_rnn_ = Model(inputs=[ae0, seq_input], outputs=out)
    model_rnn_.compile(optimizer=Adam(learning_rate=lr),
                       loss='mae',
                       metrics=['mse'])

    # new_prod_predictor_ = Model(inputs=ae0, outputs=out)

    model_full_ = Model(inputs=[ae0, seq_input], outputs=[out, decode])
    model_full_.compile(optimizer=Adam(learning_rate=lr),
                        loss='mae',
                        metrics=['mse'])

    return autoencoder_, encoder_, model_full_, model_rnn_
 def __init__(self, channels, filters=64, slope=0.2):
     super(SigmaNetwork, self).__init__()
     self.block = Sequential()
     self.block.add(Conv2D(filters, kernel_size=3, kernel_initializer=he_normal(seed=10000), padding="SAME"))
     self.block.add(LeakyReLU(alpha=slope))
     self.block.add(Conv2D(filters, kernel_size=3, kernel_initializer=he_normal(seed=10000), padding="SAME"))
     self.block.add(LeakyReLU(alpha=slope))
     self.block.add(Conv2D(filters, kernel_size=3, kernel_initializer=he_normal(seed=10000), padding="SAME"))
     self.block.add(LeakyReLU(alpha=slope))
     self.block.add(Conv2D(filters, kernel_size=3, kernel_initializer=he_normal(seed=10000), padding="SAME"))
     self.block.add(LeakyReLU(alpha=slope))
     self.block.add(Conv2D(channels, kernel_size=3, kernel_initializer=he_normal(seed=10000), padding="SAME"))
def depthwise_convo_bn(inputs: tf.Tensor, features: int, kernel_size: int,
                       is_training: bool, stride: int) -> tf.Tensor:
    x = tf.layers.separable_conv2d(inputs,
                                   features,
                                   kernel_size,
                                   strides=(stride, stride),
                                   padding="same",
                                   depthwise_initializer=he_normal(),
                                   pointwise_initializer=he_normal(),
                                   depthwise_regularizer=l2(L2_REGULARIZATION),
                                   pointwise_regularizer=l2(L2_REGULARIZATION))
    x = tf.layers.batch_normalization(x, training=is_training)
    return x
Beispiel #25
0
def lowrank_linear(input_, dim_bottle, dim_out, name="lowrank_linear"):
    with tf.variable_scope(name):
        weights1 = tf.get_variable("fc_weights1",
                                   [input_.get_shape()[-1], dim_bottle],
                                   initializer=he_normal())
        weights2 = tf.get_variable("fc_weights2", [dim_bottle, dim_out],
                                   initializer=he_normal())
        biases = tf.get_variable("biases", [dim_out],
                                 initializer=tf.constant_initializer(0.01))

        activation = tf.add(tf.matmul(tf.matmul(input_, weights1), weights2),
                            biases)
    return activation
Beispiel #26
0
def residual_convLSTM2D_block(x, filters, num_class, rd=0.1):

    x = Conv2D(num_class,
               kernel_size=(1, 1),
               padding="same",
               strides=1,
               kernel_initializer=he_normal(seed=5),
               bias_initializer='zeros')(x)
    x = LeakyReLU(alpha=0.1)(x)

    o2 = Lambda(lambda x: x[:, :, :, :, tf.newaxis])(x)
    o3 = Bidirectional(
        ConvLSTM2D(filters=filters,
                   kernel_size=(3, num_class),
                   padding='same',
                   kernel_initializer=he_normal(seed=5),
                   recurrent_initializer=orthogonal(gain=1.0, seed=5),
                   activation='tanh',
                   return_sequences=True,
                   recurrent_dropout=rd))(o2)

    o2t = tf.transpose(o2, perm=[0, 2, 1, 3, 4])

    o3t = Bidirectional(
        ConvLSTM2D(filters=filters,
                   kernel_size=(3, num_class),
                   padding='same',
                   kernel_initializer=he_normal(seed=5),
                   recurrent_initializer=orthogonal(gain=1.0, seed=5),
                   activation='tanh',
                   return_sequences=True,
                   recurrent_dropout=rd))(o2t)

    o3t = tf.transpose(o3t, perm=[0, 2, 1, 3, 4])

    o4 = Add()([o3, o3t])
    res = tf.reduce_sum(o4, axis=-1)

    shortcut = Conv2D(num_class,
                      kernel_size=(1, 1),
                      padding='same',
                      strides=1,
                      kernel_initializer=he_normal(seed=5),
                      bias_initializer='zeros')(x)

    shortcut = LeakyReLU(alpha=0.1)(shortcut)

    output = Add()([x, res])

    return output
def Classifier_Net(x1, x2):

    dense_1 = Dense(128, kernel_initializer=initializers.he_normal())
    dense_2 = Dense(128, kernel_initializer=initializers.he_normal())
    dense_final = Dense(1,
                        kernel_initializer=initializers.lecun_normal(),
                        activation='sigmoid')

    x = Add()([dense_1(x1), dense_2(x2)])
    x = Activation('relu')(BatchNormalization()(x))
    x = dense_final(Dropout(0.4)(x))
    # x = dense_final(x)

    return x
Beispiel #28
0
 def build_RNN_model(self):
     inputs = Input(shape=(self.state_length, self.features))
     # h1 = GRU(32, activation='relu', kernel_initializer=he_normal(seed=215247), return_sequences=True)(inputs)
     # h2 = GRU(256, activation='relu', kernel_initializer=he_normal(seed=87), return_sequences=True)(inputs)
     h3 = GRU(64, activation='relu', kernel_initializer=he_normal(seed=56))(inputs)
     h4 = Dense(64, activation='relu', kernel_initializer=he_normal(seed=524))(h3)
     # h4 = Dense(256, activation='relu', kernel_initializer=he_normal(seed=217))(h4)
     # h4 = Dense(128, activation='relu', kernel_initializer=he_normal(seed=217))(h4)
     h4 = Dense(64, activation='relu', kernel_initializer=he_normal(seed=50))(h4)
     # h4 = Dense(32, activation='relu', kernel_initializer=he_normal(seed=527))(h4)
     # h4 = Dense(16, activation='relu', kernel_initializer=he_normal(seed=9005247))(h4)
     outputs = Dense(self.n_actions*self.n_nodes, kernel_initializer=he_normal(seed=89))(h4)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(loss='mse', optimizer='rmsprop')
     return model
Beispiel #29
0
def gated_classifier_model(Xv, Xp):
    mlp_phr = Dense(300, kernel_initializer=initializers.he_normal())
    mlp_vis = Dense(300, kernel_initializer=initializers.he_normal())
    final_mlp = Dense(1,
                      kernel_initializer=initializers.lecun_normal(),
                      activation='sigmoid',
                      name='final')

    g_phr, g_vis = multimodal_gate_model(Xv, Xp)
    h_phr = Activation('tanh')(BatchNormalization()(mlp_phr(Xp)))
    h_vis = Activation('tanh')(BatchNormalization()(mlp_vis(Xv)))

    h = Add()([Multiply()([g_phr, h_phr]), Multiply()([g_vis, h_vis])])
    h = final_mlp(Dropout(0.4)(h))
    h = Flatten()(h)
    return h
def construct_model(inputs: tf.Tensor, is_training: bool,
                    num_classes: int) -> tf.Tensor:
    strides = [1, 2, 2, 2, 1, 2, 1]
    channels = [16, 24, 32, 64, 96, 160, 320]
    repetitions = [1, 2, 3, 4, 3, 3, 1]
    multipliers = [1, 6, 6, 6, 6, 6, 6]

    x = convo_bn_relu(inputs, 32, 3, is_training, 2)
    for i in range(len(channels)):
        for repetition_num in range(repetitions[i]):
            stride = min(int(repetition_num == 0) + 1, strides[i])
            x = bottleneck_residual_block(x,
                                          multipliers[i],
                                          output_channels=channels[i],
                                          is_training=is_training,
                                          stride=stride)

    x = tf.layers.conv2d(x,
                         1280,
                         1,
                         kernel_initializer=he_normal(),
                         kernel_regularizer=l2(L2_REGULARIZATION))
    x = tf.reduce_mean(x, axis=[1, 2])
    x = tf.layers.dense(x,
                        num_classes,
                        kernel_initializer=glorot_uniform(),
                        kernel_regularizer=l2(L2_REGULARIZATION))
    return x