예제 #1
0
def attention_3d_block(inputs, seq_len=21):
    # input_dim = int(inputs.shape[2])
    a = Permute((2, 1))(inputs)
    a = Dense(seq_len, activation='softmax')(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
    # output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')
    output_attention_mul = multiply([inputs, a_probs], name='attention_mul')
    return output_attention_mul
예제 #2
0
 def __attention_3d_block(self, _lstm_output, _time_steps) -> Layer:
     """https://github.com/philipperemy/keras-attention-mechanism/blob/master/attention_lstm.py
     """
     att = Permute((2, 1))(_lstm_output)
     att = Reshape((_lstm_output.shape[2].value, _time_steps))(att)
     att = Dense(_time_steps, activation='softmax')(att)
     att_probs = Permute((2, 1), name='attention_vec')(att)
     return Multiply(name='attention_mul')([_lstm_output, att_probs])
예제 #3
0
    def layer(input_tensor):

        h, w, c = int_shape(input_tensor)[1:]
        H = h * factor
        W = w * factor

        x = Conv2DBlock(c * factor**2, (1, 1),
                        padding='same',
                        name='duc_{}'.format(factor))(input_tensor)
        x = Permute((3, 1, 2))(x)
        x = Reshape((c, factor, factor, h, w))(x)
        x = Permute((1, 4, 2, 5, 3))(x)
        x = Reshape((c, H, W))(x)
        x = Permute((2, 3, 1))(x)
        return x
예제 #4
0
def attention_3d_block(inputs, SINGLE_ATTENTION_VECTOR=False):
    # inputs.shape = (batch_size, time_steps, input_dim)
    input_dim = int(inputs.shape[2])
    TIME_STEPS = int(inputs.shape[1])
    a = Permute((2, 1))(inputs)
    a = Reshape(
        (input_dim, TIME_STEPS)
    )(a)  # this line is not useful. It's just to know which dimension is what.
    a = Dense(TIME_STEPS, activation='softmax')(a)
    if SINGLE_ATTENTION_VECTOR:
        a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)
        a = RepeatVector(input_dim)(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)
    output_attention_mul = concatenate([inputs, a_probs], name='attention_mul')
    return output_attention_mul
예제 #5
0
def vgg_face_blank():
    # Model initialization
    mdl = Sequential()
    # First layer is a dummy-permutation = Identity to specify input shape
    mdl.add(Permute((1, 2, 3), input_shape=(224, 224, 3)))  # WARNING : 0 is the sample dim

    # Model body
    for l in convblock(64, 1, bits=2):
        mdl.add(l)

    for l in convblock(128, 2, bits=2):
        mdl.add(l)

    for l in convblock(256, 3, bits=3):
        mdl.add(l)

    for l in convblock(512, 4, bits=3):
        mdl.add(l)

    for l in convblock(512, 5, bits=3):
        mdl.add(l)

    # Model head
    mdl.add(Convolution2D(4096, kernel_size=(7, 7), activation='relu', name='fc6'))
    mdl.add(Dropout(0.5))

    mdl.add(Convolution2D(4096, kernel_size=(1, 1), activation='relu', name='fc7'))
    mdl.add(Dropout(0.5))

    mdl.add(Convolution2D(2622, kernel_size=(1, 1), activation='relu', name='fc8'))
    mdl.add(Flatten())
    mdl.add(Activation('softmax'))

    return mdl
예제 #6
0
def construct_model(windowSize,numChannels):
	# construct sequential model
	model = Sequential()
	# permute input so that it is as in EEG Net paper
	model.add(Permute((3,2,1), input_shape=(windowSize,numChannels,1)))
	# layer1
	model.add(Conv2D(16, kernel_size=(numChannels, 1), padding='valid', strides=(1, 1), data_format='channels_first', activation='relu'))
	model.add(BatchNormalization(axis=1, scale=False, center=False))
	#model.add(Permute((2,1,3)))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2),padding='same'))
	# layer2
	model.add(Conv2D(8,kernel_size=(1, 64),data_format='channels_first',padding='same'))
	model.add(BatchNormalization(axis=1,scale=False, center=False))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2),padding='same'))
	model.add(Dropout(0.5))
	# layer3
	model.add(Conv2D(4,kernel_size=(5, 5),data_format='channels_first',padding='same'))
	model.add(BatchNormalization(axis=1,scale=False,center=False))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first',padding='same'))
	model.add(Dropout(0.5))
	# layer4
	model.add(Flatten())
	model.add(Dense(1024, activation='relu'))
	model.add(Dropout(0.5))
	# layer5
	model.add(Dense(2, activation='softmax'))
	#print(model.summary())
	return model
예제 #7
0
파일: se.py 프로젝트: aayushARM/planet-cv
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block

    Args:
        input: input tensor
        filters: number of output filters

    Returns: a keras tensor

    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = K.int_shape(init)[channel_axis]
    se_shape = (1, 1, filters)
    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x
예제 #8
0
def default_classification_model(num_classes,
                                 num_anchors,
                                 pyramid_feature_size=256,
                                 prior_probability=0.01,
                                 classification_feature_size=256,
                                 name='classification_submodel'):
    """Creates the default regression submodel.

    Args:
        num_classes: Number of classes to predict a score for at each feature level.
        num_anchors: Number of anchors to predict classification
            scores for at each feature level.
        pyramid_feature_size: The number of filters to expect from the
            feature pyramid levels.
        classification_feature_size: The number of filters to use in the layers
            in the classification submodel.
        name: The name of the submodel.

    Returns:
        A keras.models.Model that predicts classes for each anchor.
    """
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
    }

    if K.image_data_format() == 'channels_first':
        inputs = Input(shape=(pyramid_feature_size, None, None, None))
    else:
        inputs = Input(shape=(None, None, None, pyramid_feature_size))
    outputs = inputs
    for i in range(4):
        outputs = Conv3D(filters=classification_feature_size,
                         activation='relu',
                         name='pyramid_classification_{}'.format(i),
                         kernel_initializer=RandomNormal(mean=0.0,
                                                         stddev=0.01,
                                                         seed=None),
                         bias_initializer='zeros',
                         **options)(outputs)

    outputs = Conv3D(
        filters=num_classes * num_anchors,
        kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),
        bias_initializer=PriorProbability(probability=prior_probability),
        name='pyramid_classification',
        **options)(outputs)

    # reshape output and apply sigmoid
    if K.image_data_format() == 'channels_first':
        outputs = Permute((2, 3, 1),
                          name='pyramid_classification_permute')(outputs)
    outputs = Reshape((-1, num_classes),
                      name='pyramid_classification_reshape')(outputs)
    outputs = Activation('sigmoid',
                         name='pyramid_classification_sigmoid')(outputs)

    return Model(inputs=inputs, outputs=outputs, name=name)
예제 #9
0
def EEGNet_old(nb_classes, Chans = 64, Samples = 128, regRate = 0.0001,
           dropoutRate = 0.25, kernels = [(2, 32), (8, 4)], strides = (2, 4)):
    """ Keras Implementation of EEGNet_v1 (https://arxiv.org/abs/1611.08024v2)
    This model is the original EEGNet model proposed on arxiv
            https://arxiv.org/abs/1611.08024v2
    
    with a few modifications: we use striding instead of max-pooling as this 
    helped slightly in classification performance while also providing a 
    computational speed-up. 
    
    Note that we no longer recommend the use of this architecture, as the new
    version of EEGNet performs much better overall and has nicer properties.
    
    Inputs:
        
        nb_classes     : total number of final categories
        Chans, Samples : number of EEG channels and samples, respectively
        regRate        : regularization rate for L1 and L2 regularizations
        dropoutRate    : dropout fraction
        kernels        : the 2nd and 3rd layer kernel dimensions (default is 
                         the [2, 32] x [8, 4] configuration)
        strides        : the stride size (note that this replaces the max-pool
                         used in the original paper)
    
    """

    # start the model
    input_main   = Input((1, Chans, Samples))
    layer1       = Conv2D(16, (Chans, 1), input_shape=(1, Chans, Samples),
                                 kernel_regularizer = l1_l2(l1=regRate, l2=regRate))(input_main)
    layer1       = BatchNormalization(axis=1)(layer1)
    layer1       = Activation('elu')(layer1)
    layer1       = Dropout(dropoutRate)(layer1)
    
    permute_dims = 2, 1, 3
    permute1     = Permute(permute_dims)(layer1)
    
    layer2       = Conv2D(4, kernels[0], padding = 'same', 
                            kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
                            strides = strides)(permute1)
    layer2       = BatchNormalization(axis=1)(layer2)
    layer2       = Activation('elu')(layer2)
    layer2       = Dropout(dropoutRate)(layer2)
    
    layer3       = Conv2D(4, kernels[1], padding = 'same',
                            kernel_regularizer=l1_l2(l1=0.0, l2=regRate),
                            strides = strides)(layer2)
    layer3       = BatchNormalization(axis=1)(layer3)
    layer3       = Activation('elu')(layer3)
    layer3       = Dropout(dropoutRate)(layer3)
    
    flatten      = Flatten(name = 'flatten')(layer3)
    
    dense        = Dense(nb_classes, name = 'dense')(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input_main, outputs=softmax)
예제 #10
0
def mlstm_fcn(config: Union[str,
                            SoccerSageConfig] = SoccerSageConfig()) -> Model:
    '''Load and train the MLSTM-FCN model.
    Source: https://github.com/titu1994/MLSTM-FCN/blob/master/eeg_model.py

    Args:
        config: The configuration data.

    Returns:
        Model: A model in TensorFlow.
    '''
    if isinstance(config, str):
        config = SoccerSageConfig.from_yaml(config)

    n_classes = config.num_results
    if config.classifier_type == 'total_goals':
        n_classes = config.num_goals

    ip = Input(shape=(config.time_steps, config.feature_size))

    x = Permute((2, 1))(ip)
    #x = Masking(mask_value=config.masking_value)(x)
    x = LSTM(64)(ip)
    x = Dropout(0.8)(x)

    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(ip)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(n_classes, activation='softmax')(x)

    classifier = Model(ip, out)

    classifier.compile(
        loss='categorical_crossentropy',
        optimizer=tf.keras.optimizers.Adam(lr=config.learning_rate),
        metrics=['accuracy', ranked_probability_score])

    if config.pretrained_classifier:
        files = SoccerSageFiles(config)
        classifier.load_weights(files.model_weights)

    return classifier
예제 #11
0
def get_cnn1d_tx_selu(output_size, img_height, img_width, show=True):
    model_input = Input(shape=(img_height * img_width, ), name='Main_input')
    x = Reshape((img_height, img_width), name='Reshape_1')(model_input)
    x = Permute((2, 1), name='Permute_1')(x)
    x = Conv1D(filters=256,
               kernel_size=48,
               strides=1,
               padding='same',
               activation='selu',
               name='Conv1D_selu_1')(x)
    x = Conv1D(filters=128,
               kernel_size=48,
               strides=1,
               padding='same',
               activation='selu',
               name='Conv1D_selu_2')(x)
    x = Conv1D(filters=64,
               kernel_size=48,
               strides=1,
               padding='same',
               activation='selu',
               name='Conv1D_selu_3')(x)
    x = Conv1D(filters=32,
               kernel_size=48,
               strides=1,
               padding='same',
               activation='selu',
               name='Conv1D_selu_4')(x)
    x = Conv1D(filters=16,
               kernel_size=48,
               strides=1,
               padding='same',
               activation='selu',
               name='Conv1D_selu_5')(x)
    x = Conv1D(filters=8,
               kernel_size=48,
               strides=1,
               padding='same',
               activation='selu',
               name='Conv1D_selu_6')(x)
    x = Flatten(name='Flatten_1')(x)
    x = Dense(512, activation='selu', name='Dense_selu_1')(x)
    x = Dense(512, activation='selu', name='Dense_selu_2')(x)
    x = Dense(512, activation='selu', name='Dense_selu_3')(x)
    cnn1d_output = Dense(output_size,
                         activation='linear',
                         name='Output_Dense_linear')(x)
    cnn1d = Model(inputs=model_input,
                  outputs=cnn1d_output,
                  name='CNN1D_Tx_selu')

    if show:
        print('CNN1D Tx summary:')
        cnn1d.summary()
        print()

    return cnn1d
예제 #12
0
def T_trans(T, T_F, H, W):
    T_in = Input(shape=(T + 7, H, W))
    T_in_p = Permute((2, 3, 1))(T_in)
    T_mid = Conv2D(filters=T_F, kernel_size=(1, 1), padding="same")(T_in_p)
    T_act = Activation('relu')(T_mid)
    T_fin = Conv2D(filters=1, kernel_size=(1, 1), padding="same")(T_act)
    T_mul = Activation('relu')(T_fin)
    T_model = Model(inputs=T_in, outputs=T_mul)

    return T_model
예제 #13
0
def default_n_linear(num_outputs, input_shape=(120, 160, 3), roi_crop=(0, 0)):

    drop = 0.1

    #we now expect that cropping done elsewhere. we will adjust our expeected image size here:
    input_shape = adjust_input_shape(input_shape, roi_crop)

    img_in = Input(shape=input_shape, name='img_in')
    x = img_in
    x = Convolution2D(24, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_1")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(32, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_2")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_3")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3, 3),
                      strides=(1, 1),
                      activation='relu',
                      name="conv2d_4")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3, 3),
                      strides=(1, 1),
                      activation='relu',
                      name="conv2d_5")(x)
    x = Dropout(drop)(x)

    # x = Flatten(name='flattened')(x)
    a, b, c, d = x.shape  # returns dimension
    a = b * c * d
    x = Permute([1, 2, 3])(x)
    x = Reshape((int(a), ))(x)  # convert dim -> int
    x = Dense(100, activation='relu')(x)
    x = Dropout(drop)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(drop)(x)

    outputs = []

    for i in range(num_outputs):
        outputs.append(
            Dense(1, activation='linear', name='n_outputs' + str(i))(x))

    model = Model(inputs=[img_in], outputs=outputs)

    return model
예제 #14
0
def horizontal_axis_convolution(input, filters, l2):
    shared_squash = Conv3D(filters, [4, 1, 1], kernel_regularizer=l2)

    squash_x = shared_squash(input)
    box_x = spread_plane(squash_x, filters, [1, 2, 3, 4])  # x, y, z, f -> x, y, z, f

    rotate = Permute([2, 1, 3, 4])(input)
    squash_y = shared_squash(rotate)
    box_y = spread_plane(squash_y, filters, [2, 1, 3, 4])  # y, x, z, f -> x, y, z, f

    return box_x, box_y
예제 #15
0
파일: model.py 프로젝트: ruihwang/gdynet
 def build_cgcnn_layer(self, atom_fea, bond_fea, nbr_list):
     total_fea = Lambda(_concat_nbrs,
                        output_shape=_concat_nbrs_output_shape)(
                            [atom_fea, bond_fea, nbr_list])
     # total_fea shape (None, N, M, 2 * atom_fea_len + bond_fea_len)
     nbr_core = Dense(self.atom_fea_len)(total_fea)
     nbr_filter = Dense(1)(total_fea)
     if self.use_bn:
         nbr_core = BatchNormalization(axis=-1)(nbr_core)
     nbr_filter = Permute((1, 3, 2))(nbr_filter)
     nbr_filter = Activation('softmax')(nbr_filter)
     nbr_filter = Permute((1, 3, 2))(nbr_filter)
     # nbr_filter = keras.activations.softmax(nbr_filter, axis=-2)
     nbr_core = Activation('relu')(nbr_core)
     nbr_sumed = Lambda(lambda x: tf.reduce_mean(x[0] * x[1], axis=2))(
         [nbr_filter, nbr_core])
     if self.use_bn:
         nbr_sumed = BatchNormalization(axis=-1)(nbr_sumed)
     out = Activation('relu')(Add()([atom_fea, nbr_sumed]))
     return out
예제 #16
0
def reg_mlstm_fcn(config: Union[
    str, SoccerSageConfig] = SoccerSageConfig()) -> Model:
    '''Load and train the regression MLSTM-FCN model.
    Source: https://github.com/titu1994/MLSTM-FCN/blob/master/eeg_model.py

    Args:
        config: The configuration data.

    Returns:
        Model: A model in TensorFlow.
    '''
    if isinstance(config, str):
        config = SoccerSageConfig.from_yaml(config)

    ip = Input(shape=(config.time_steps, config.feature_size))

    x = Permute((2, 1))(ip)
    # x = Masking(mask_value=config.masking_value)(x)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(ip)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(1, activation='linear')(x)

    classifier = Model(ip, out)

    classifier.compile(
        loss='mse',
        optimizer=tf.keras.optimizers.Adam(lr=config.learning_rate),
        metrics=['mse', 'mae'])

    if config.pretrained_classifier:
        files = SoccerSageFiles(config)
        classifier.load_weights(files.regressor_weights)

    return classifier
예제 #17
0
def vertical_plane_convolution(input, plane_filters, l2):
    reduce = Conv3D(plane_filters, 1, kernel_regularizer=l2)(input)
    shared_squash = Conv3D(plane_filters, [4, 1, 4], kernel_regularizer=l2)

    squash_xz = shared_squash(reduce)
    box_xz = spread_axis(squash_xz, plane_filters, [1, 3, 2, 4])  # x, z, y, f -> x, y, z, f

    rotate = Permute([2, 1, 3, 4])(reduce)
    squash_yz = shared_squash(rotate)
    box_yz = spread_axis(squash_yz, plane_filters, [3, 1, 2, 4])  # y, z, x, f -> x, y, z, f

    return box_xz, box_yz
예제 #18
0
def default_regression_model(num_values,
                             num_anchors,
                             pyramid_feature_size=256,
                             regression_feature_size=256,
                             name='regression_submodel'):
    """Creates the default regression submodel.

    Args:
        num_values (int): Number of values to regress.
        num_anchors (int): Number of anchors to regress for each feature level.
        pyramid_feature_size (int): The number of filters to expect from the
            feature pyramid levels.
        regression_feature_size (int): The number of filters to use in the layers
            in the regression submodel.
        name (str): The name of the submodel.

    Returns:
        tensorflow.keras.Model: A model that predicts regression values
            for each anchor.
    """
    # All new conv layers except the final one in the
    # RetinaNet (classification) subnets are initialized
    # with bias b = 0 and a Gaussian weight fill with stddev = 0.01.
    options = {
        'kernel_size': 3,
        'strides': 1,
        'padding': 'same',
        'kernel_initializer': RandomNormal(mean=0.0, stddev=0.01, seed=None),
        'bias_initializer': 'zeros'
    }

    if K.image_data_format() == 'channels_first':
        inputs = Input(shape=(pyramid_feature_size, None, None))
    else:
        inputs = Input(shape=(None, None, pyramid_feature_size))
    outputs = inputs
    for i in range(4):
        outputs = Conv2D(filters=regression_feature_size,
                         activation='relu',
                         name='pyramid_regression_{}'.format(i),
                         **options)(outputs)

    outputs = Conv2D(num_anchors * num_values,
                     name='pyramid_regression',
                     **options)(outputs)
    if K.image_data_format() == 'channels_first':
        outputs = Permute((2, 3, 1),
                          name='pyramid_regression_permute')(outputs)
    outputs = Reshape((-1, num_values),
                      name='pyramid_regression_reshape')(outputs)

    return Model(inputs=inputs, outputs=outputs, name=name)
예제 #19
0
def attention_3d_block(inputs):
    # inputs.shape = (batch_size, time_steps, input_dim)
    input_dim = int(inputs.shape[2])
    a = inputs
    AveragePooling = pooling.GlobalAveragePooling1D(
        data_format='channels_last')(a)
    den1 = Dense(input_dim, activation='relu')(AveragePooling)
    den2 = Dense(input_dim, activation='hard_sigmoid')(den1)
    if SINGLE_ATTENTION_VECTOR:
        a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(den2)
        a = RepeatVector(input_dim)(a)
    a_probs = Permute((1, 2), name='attention_vec')(a)

    output_attention_mul = merge.multiply([inputs, a_probs],
                                          name='attention_mul')
    # output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')  # 旧版本
    return output_attention_mul
예제 #20
0
def default_bhv(num_outputs, num_bvh_inputs, input_shape):
    '''
    Notes: this model depends on concatenate which failed on keras < 2.0.8
    '''

    img_in = Input(shape=input_shape, name='img_in')
    bvh_in = Input(shape=(num_bvh_inputs, ), name="behavior_in")

    x = img_in
    #x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
    x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    # x = Flatten(name='flattened')(x)
    a, b, c, d = x.shape  # returns dimension
    a = b * c * d
    x = Permute([1, 2, 3])(x)
    x = Reshape((int(a), ))(x)  # convert dim -> int
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)

    y = bvh_in
    y = Dense(num_bvh_inputs * 2, activation='relu')(y)
    y = Dense(num_bvh_inputs * 2, activation='relu')(y)
    y = Dense(num_bvh_inputs * 2, activation='relu')(y)

    z = concatenate([x, y])
    z = Dense(100, activation='relu')(z)
    z = Dropout(.1)(z)
    z = Dense(50, activation='relu')(z)
    z = Dropout(.1)(z)

    #categorical output of the angle
    angle_out = Dense(15, activation='softmax', name='angle_out')(
        z
    )  # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0

    #continous output of throttle
    throttle_out = Dense(20, activation='softmax', name='throttle_out')(
        z)  # Reduce to 1 number, Positive number only

    model = Model(inputs=[img_in, bvh_in], outputs=[angle_out, throttle_out])

    return model
예제 #21
0
파일: dien_udg.py 프로젝트: UDG-MM/UDG
def interest_evolution(concat_behavior, deep_input_item, user_behavior_length, gru_type="GRU", use_neg=False,
                       neg_concat_behavior=None, att_hidden_size=(64, 16), att_activation='sigmoid',
                       att_weight_normalization=False, ):
    if gru_type not in ["GRU", "AIGRU", "AGRU", "AUGRU"]:
        raise ValueError("gru_type error ")
    aux_loss_1 = None
    embedding_size = None
    rnn_outputs = DynamicGRU(embedding_size, return_sequence=True,
                             name="gru1")([concat_behavior, user_behavior_length])
    print(concat_behavior)
    print(neg_concat_behavior)
    if gru_type == "AUGRU" and use_neg:
        aux_loss_1 = auxiliary_loss(rnn_outputs[:, :-1, :], concat_behavior[:, 1:, :],

                                    neg_concat_behavior[:, 1:, :],

                                    tf.subtract(user_behavior_length, 1), stag="gru")  # [:, 1:]

    if gru_type == "GRU":
        rnn_outputs2 = DynamicGRU(embedding_size, return_sequence=True,
                                  name="gru2")([rnn_outputs, user_behavior_length])
        # attention_score = AttentionSequencePoolingLayer(hidden_size=att_hidden_size, activation=att_activation, weight_normalization=att_weight_normalization, return_score=True)([
        #     deep_input_item, rnn_outputs2, user_behavior_length])
        # outputs = Lambda(lambda x: tf.matmul(x[0], x[1]))(
        #     [attention_score, rnn_outputs2])
        # hist = outputs
        hist = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size, att_activation=att_activation,
                                             weight_normalization=att_weight_normalization, return_score=False)([
            deep_input_item, rnn_outputs2, user_behavior_length])

    else:  # AIGRU AGRU AUGRU

        scores = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size, att_activation=att_activation,
                                               weight_normalization=att_weight_normalization, return_score=True)([
            deep_input_item, rnn_outputs, user_behavior_length])

        if gru_type == "AIGRU":
            hist = multiply([rnn_outputs, Permute([2, 1])(scores)])
            final_state2 = DynamicGRU(embedding_size, gru_type="GRU", return_sequence=False, name='gru2')(
                [hist, user_behavior_length])
        else:  # AGRU AUGRU
            final_state2 = DynamicGRU(embedding_size, gru_type=gru_type, return_sequence=False,
                                      name='gru2')([rnn_outputs, user_behavior_length, Permute([2, 1])(scores)])
        hist = final_state2
    return hist, aux_loss_1
예제 #22
0
def default_avcnetCV(input_shape=(120, 160, 3), roi_crop=(0, 0)):

    #we now expect that cropping done elsewhere. we will adjust our expeected image size here:
    input_shape = adjust_input_shape(input_shape, roi_crop)
    opt = keras.optimizers.Adam()
    cfg = dk.load_config()
    drop = 0.2

    # Input
    img_in = Input(
        shape=input_shape, name='img_in'
    )  # First layer, input layer, Shape comes from camera.py resolution, RGB
    x = img_in

    x1 = Conv2D(24, (5, 5), padding='same')(x)
    x1 = Activation('relu')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), strides=[2, 2])(x1)
    x1 = Dropout(drop)(x1)

    x2 = Conv2D(48, (5, 5), padding='same')(x1)
    x2 = Activation('relu')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), strides=[2, 2])(x2)
    x2 = Dropout(drop)(x2)

    x3 = Conv2D(96, (5, 5), padding='same')(x2)
    x3 = Activation('relu')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), strides=[2, 2])(x3)
    x3 = Dropout(drop)(x3)

    # x4 = Flatten()(x3) # OpenCV/dnn does not support flatten
    a, b, c, d = x3.shape  # returns dimension
    a = b * c * d
    x4 = Permute([1, 2, 3])(x3)
    x4 = Reshape((int(a), ))(x4)  # convert dim -> int
    x4 = Dense(500)(x4)
    x4 = Activation('relu')(x4)
    x4 = Dropout(drop)(x4)

    angle_out = Dense(4, activation='softmax', name='angle_out')(x4)
    model = Model(inputs=[img_in], outputs=[angle_out])

    return model
예제 #23
0
def self_attn_block(inp, n_c, squeeze_factor=8):
    """ GAN Self Attention Block
    Code borrows from https://github.com/taki0112/Self-Attention-GAN-Tensorflow
    """
    msg = "Input channels must be >= {}, recieved nc={}".format(
        squeeze_factor, n_c)
    assert n_c // squeeze_factor > 0, msg
    var_x = inp
    shape_x = var_x.get_shape().as_list()

    var_f = Conv2D(
        n_c // squeeze_factor,
        1,
        kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_g = Conv2D(
        n_c // squeeze_factor,
        1,
        kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_h = Conv2D(
        n_c, 1, kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)

    shape_f = var_f.get_shape().as_list()
    shape_g = var_g.get_shape().as_list()
    shape_h = var_h.get_shape().as_list()
    flat_f = Reshape((-1, shape_f[-1]))(var_f)
    flat_g = Reshape((-1, shape_g[-1]))(var_g)
    flat_h = Reshape((-1, shape_h[-1]))(var_h)

    var_s = Lambda(lambda var_x: K.batch_dot(var_x[0],
                                             Permute((2, 1))(var_x[1])))(
                                                 [flat_g, flat_f])

    beta = Softmax(axis=-1)(var_s)
    var_o = Lambda(lambda var_x: K.batch_dot(var_x[0], var_x[1]))(
        [beta, flat_h])
    var_o = Reshape(shape_x[1:])(var_o)
    var_o = Scale()(var_o)

    out = add([var_o, inp])
    return out
예제 #24
0
def get_cnn1d_tx(output_size,
                 img_height,
                 img_width,
                 weight_decay=1e-4,
                 show=True):

    # value with same Tx would use in same channel
    model_input = Input(shape=(img_height * img_width, ), name='Main_input')
    x = Reshape((img_height, img_width), name='Reshape_1')(model_input)
    x = Permute((2, 1), name='Permute_1')(x)
    x = Conv1D(filters=256,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='selu',
               kernel_initializer='lecun_normal',
               bias_initializer='zeros',
               kernel_regularizer=l2(weight_decay),
               name='Conv1D_selu_1')(x)
    x = Conv1D(filters=128,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='selu',
               kernel_initializer='lecun_normal',
               bias_initializer='zeros',
               kernel_regularizer=l2(weight_decay),
               name='Conv1D_selu_2')(x)
    x = Conv1D(filters=64,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='selu',
               kernel_initializer='lecun_normal',
               bias_initializer='zeros',
               kernel_regularizer=l2(weight_decay),
               name='Conv1D_selu_3')(x)
    x = Conv1D(filters=32,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='selu',
               kernel_initializer='lecun_normal',
               bias_initializer='zeros',
               kernel_regularizer=l2(weight_decay),
               name='Conv1D_selu_4')(x)
    x = Conv1D(filters=16,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='selu',
               kernel_initializer='lecun_normal',
               bias_initializer='zeros',
               kernel_regularizer=l2(weight_decay),
               name='Conv1D_selu_5')(x)
    x = Flatten(name='Flatten_1')(x)
    x = Dense(512,
              activation='selu',
              kernel_initializer='lecun_normal',
              bias_initializer='zeros',
              kernel_regularizer=l2(weight_decay),
              name='Dense_selu_1')(x)
    x = Dense(512,
              activation='selu',
              kernel_initializer='lecun_normal',
              bias_initializer='zeros',
              kernel_regularizer=l2(weight_decay),
              name='Dense_selu_2')(x)
    cnn1d_output = Dense(output_size,
                         activation='linear',
                         name='Output_Dense_linear')(x)
    cnn1d = Model(inputs=model_input, outputs=cnn1d_output, name='CNN1D_Tx')

    if show:
        print('CNN1D_Tx summary:')
        cnn1d.summary()
        print()

    return cnn1d
예제 #25
0
def Unet(inputs, weights, n_classes, input_height, input_width):
    x = tf.reshape(inputs, shape=[-1, input_height, input_width, 3])

    w, b, s = get_weights_biases_scale(weights, 'conv2d_1/kernel:0',
                                       'conv2d_1/bias:0')
    conv1 = conv_2d(x, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_2/kernel:0',
                                       'conv2d_2/bias:0')
    conv1 = conv_2d(conv1, w, b, s, activation='relu')
    pool1 = maxpool_2d(conv1)

    w, b, s = get_weights_biases_scale(weights, 'conv2d_3/kernel:0',
                                       'conv2d_3/bias:0')
    conv2 = conv_2d(pool1, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_4/kernel:0',
                                       'conv2d_4/bias:0')
    conv2 = conv_2d(conv2, w, b, s, activation='relu')
    pool2 = maxpool_2d(conv2)

    w, b, s = get_weights_biases_scale(weights, 'conv2d_5/kernel:0',
                                       'conv2d_5/bias:0')
    conv3 = conv_2d(pool2, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_6/kernel:0',
                                       'conv2d_6/bias:0')
    conv3 = conv_2d(conv3, w, b, s, activation='relu')
    pool3 = maxpool_2d(conv3)

    w, b, s = get_weights_biases_scale(weights, 'conv2d_7/kernel:0',
                                       'conv2d_7/bias:0')
    conv4 = conv_2d(pool3, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_8/kernel:0',
                                       'conv2d_8/bias:0')
    conv4 = conv_2d(conv4, w, b, s, activation='relu')
    pool4 = maxpool_2d(conv4)

    w, b, s = get_weights_biases_scale(weights, 'conv2d_9/kernel:0',
                                       'conv2d_9/bias:0')
    conv5 = conv_2d(pool4, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_10/kernel:0',
                                       'conv2d_10/bias:0')
    conv5 = conv_2d(conv5, w, b, s, activation='relu')
    pool5 = maxpool_2d(conv5)

    up6 = UpSampling2D(size=(2, 2))(pool5)
    up6 = tf.concat([up6, conv5], axis=-1)
    w, b, s = get_weights_biases_scale(weights, 'conv2d_11/kernel:0',
                                       'conv2d_11/bias:0')
    conv6 = conv_2d(up6, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_12/kernel:0',
                                       'conv2d_12/bias:0')
    conv6 = conv_2d(conv6, w, b, s, activation='relu')

    up7 = UpSampling2D(size=(2, 2))(conv6)
    up7 = tf.concat([up7, conv4], axis=-1)
    w, b, s = get_weights_biases_scale(weights, 'conv2d_13/kernel:0',
                                       'conv2d_13/bias:0')
    conv7 = conv_2d(up7, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_14/kernel:0',
                                       'conv2d_14/bias:0')
    conv7 = conv_2d(conv7, w, b, s, activation='relu')

    up8 = UpSampling2D(size=(2, 2))(conv7)
    up8 = tf.concat([up8, conv3], axis=-1)
    w, b, s = get_weights_biases_scale(weights, 'conv2d_15/kernel:0',
                                       'conv2d_15/bias:0')
    conv8 = conv_2d(up8, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_16/kernel:0',
                                       'conv2d_16/bias:0')
    conv8 = conv_2d(conv8, w, b, s, activation='relu')

    up9 = UpSampling2D(size=(2, 2))(conv8)
    up9 = tf.concat([up9, conv2], axis=-1)
    w, b, s = get_weights_biases_scale(weights, 'conv2d_17/kernel:0',
                                       'conv2d_17/bias:0')
    conv9 = conv_2d(up9, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_18/kernel:0',
                                       'conv2d_18/bias:0')
    conv9 = conv_2d(conv9, w, b, s, activation='relu')

    up10 = UpSampling2D(size=(2, 2))(conv9)
    up10 = tf.concat([up10, conv1], axis=-1)
    w, b, s = get_weights_biases_scale(weights, 'conv2d_19/kernel:0',
                                       'conv2d_19/bias:0')
    conv10 = conv_2d(up10, w, b, s, activation='relu')
    w, b, s = get_weights_biases_scale(weights, 'conv2d_20/kernel:0',
                                       'conv2d_20/bias:0')
    conv10 = conv_2d(conv10, w, b, s, activation='relu')

    w, b, s = get_weights_biases_scale(weights, 'conv2d_21/kernel:0',
                                       'conv2d_21/bias:0')
    conv11 = conv_2d(conv10, w, b, s, activation='relu')
    conv11 = tf.reshape(conv11, shape=[n_classes, input_height * input_width])
    conv11 = Permute((2, 1))(conv11)
    return conv11
예제 #26
0
def spread_axis(input, filters, permute_dims):
    gather = Reshape((FOUR * filters,))(input)
    repeat = RepeatVector(FOUR * FOUR)(gather)
    spread = Reshape((FOUR, FOUR, FOUR, filters))(repeat)
    permute = Permute(permute_dims)(spread)
    return permute
예제 #27
0
    def build_model(self):
        """Helper method for creating the model"""
        vocab = set()
        for story, q, answer in self.train_stories + self.test_stories:
            vocab |= set(story + q + [answer])
        vocab = sorted(vocab)

        # Reserve 0 for masking via pad_sequences
        vocab_size = len(vocab) + 1
        story_maxlen = max(
            len(x) for x, _, _ in self.train_stories + self.test_stories)
        query_maxlen = max(
            len(x) for _, x, _ in self.train_stories + self.test_stories)

        word_idx = {c: i + 1 for i, c in enumerate(vocab)}
        self.inputs_train, self.queries_train, self.answers_train = (
            vectorize_stories(word_idx, story_maxlen, query_maxlen,
                              self.train_stories))
        self.inputs_test, self.queries_test, self.answers_test = (
            vectorize_stories(word_idx, story_maxlen, query_maxlen,
                              self.test_stories))

        # placeholders
        input_sequence = Input((story_maxlen, ))
        question = Input((query_maxlen, ))

        # encoders
        # embed the input sequence into a sequence of vectors
        input_encoder_m = Sequential()
        input_encoder_m.add(Embedding(input_dim=vocab_size, output_dim=64))
        input_encoder_m.add(Dropout(self.config.get("dropout", 0.3)))
        # output: (samples, story_maxlen, embedding_dim)

        # embed the input into a sequence of vectors of size query_maxlen
        input_encoder_c = Sequential()
        input_encoder_c.add(
            Embedding(input_dim=vocab_size, output_dim=query_maxlen))
        input_encoder_c.add(Dropout(self.config.get("dropout", 0.3)))
        # output: (samples, story_maxlen, query_maxlen)

        # embed the question into a sequence of vectors
        question_encoder = Sequential()
        question_encoder.add(
            Embedding(input_dim=vocab_size,
                      output_dim=64,
                      input_length=query_maxlen))
        question_encoder.add(Dropout(self.config.get("dropout", 0.3)))
        # output: (samples, query_maxlen, embedding_dim)

        # encode input sequence and questions (which are indices)
        # to sequences of dense vectors
        input_encoded_m = input_encoder_m(input_sequence)
        input_encoded_c = input_encoder_c(input_sequence)
        question_encoded = question_encoder(question)

        # compute a "match" between the first input vector sequence
        # and the question vector sequence
        # shape: `(samples, story_maxlen, query_maxlen)`
        match = dot([input_encoded_m, question_encoded], axes=(2, 2))
        match = Activation("softmax")(match)

        # add the match matrix with the second input vector sequence
        response = add([match, input_encoded_c
                        ])  # (samples, story_maxlen, query_maxlen)
        response = Permute(
            (2, 1))(response)  # (samples, query_maxlen, story_maxlen)

        # concatenate the match matrix with the question vector sequence
        answer = concatenate([response, question_encoded])

        # the original paper uses a matrix multiplication.
        # we choose to use a RNN instead.
        answer = LSTM(32)(answer)  # (samples, 32)

        # one regularization layer -- more would probably be needed.
        answer = Dropout(self.config.get("dropout", 0.3))(answer)
        answer = Dense(vocab_size)(answer)  # (samples, vocab_size)
        # we output a probability distribution over the vocabulary
        answer = Activation("softmax")(answer)

        # build the final model
        model = Model([input_sequence, question], answer)
        return model
예제 #28
0
def default_classification_model(num_classes,
                                 num_anchors,
                                 pyramid_feature_size=256,
                                 prior_probability=0.01,
                                 classification_feature_size=256,
                                 frames_per_batch=1,
                                 name='classification_submodel'):
    """Creates the default regression submodel.

    Args:
        num_classes (int): Number of classes to predict a score
            for at each feature level.
        num_anchors (int): Number of anchors to predict classification
            scores for at each feature level.
        pyramid_feature_size (int): The number of filters to expect from the
            feature pyramid levels.
        prior_probability (float): the prior probability
        classification_feature_size (int): The number of filters to use in the
            layers in the classification submodel.
        frames_per_batch (int): Size of z axis in generated batches.
            If equal to 1, assumes 2D data.
        name (str): The name of the submodel.

    Returns:
        tensorflow.keras.Model: A model that predicts classes for
            each anchor.
    """
    time_distributed = frames_per_batch > 1

    options = {
        'kernel_size': (3, 3, 3) if time_distributed else 3,
        'strides': 1,
        'padding': 'same',
    }

    shape = [None] * (4 if time_distributed else 3)
    if K.image_data_format() == 'channels_first':
        shape[0] = pyramid_feature_size
    else:
        shape[-1] = pyramid_feature_size
    inputs = Input(shape=shape)
    outputs = inputs
    conv = Conv3D if time_distributed else Conv2D
    for i in range(4):
        outputs = conv(filters=classification_feature_size,
                       activation='relu',
                       name='pyramid_classification_{}'.format(i),
                       kernel_initializer=RandomNormal(mean=0.0,
                                                       stddev=0.01,
                                                       seed=None),
                       bias_initializer='zeros',
                       **options)(outputs)

    outputs = conv(
        filters=num_classes * num_anchors,
        kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),
        bias_initializer=PriorProbability(probability=prior_probability),
        name='pyramid_classification',
        **options)(outputs)

    # reshape output and apply sigmoid
    if K.image_data_format() == 'channels_first':
        rank = 4 if time_distributed else 3
        perm = tuple(list(range(2, rank + 1)) + [1])
        outputs = Permute(perm, name='pyramid_classification_permute')(outputs)

    new_shape = (frames_per_batch, -1, num_classes)
    if not time_distributed:
        new_shape = new_shape[1:]

    outputs = Reshape(new_shape,
                      name='pyramid_classification_reshape')(outputs)
    outputs = Activation('sigmoid',
                         name='pyramid_classification_sigmoid')(outputs)

    return Model(inputs=inputs, outputs=outputs, name=name)
예제 #29
0
def default_regression_model(num_values,
                             num_anchors,
                             pyramid_feature_size=256,
                             regression_feature_size=256,
                             frames_per_batch=1,
                             name='regression_submodel'):
    """Creates the default regression submodel.

    Args:
        num_values (int): Number of values to regress.
        num_anchors (int): Number of anchors to regress for each feature level.
        pyramid_feature_size (int): The number of filters to expect from the
            feature pyramid levels.
        regression_feature_size (int): The number of filters to use in the layers
            in the regression submodel.
        frames_per_batch (int): Size of z axis in generated batches.
            If equal to 1, assumes 2D data.
        name (str): The name of the submodel.

    Returns:
        tensorflow.keras.Model: A model that predicts regression values
            for each anchor.
    """
    # All new conv layers except the final one in the
    # RetinaNet (classification) subnets are initialized
    # with bias b = 0 and a Gaussian weight fill with stddev = 0.01.
    time_distributed = frames_per_batch > 1

    options = {
        'kernel_size': (3, 3, 3) if time_distributed else 3,
        'strides': 1,
        'padding': 'same',
        'kernel_initializer': RandomNormal(mean=0.0, stddev=0.01, seed=None),
        'bias_initializer': 'zeros'
    }

    shape = [None] * (4 if time_distributed else 3)
    if K.image_data_format() == 'channels_first':
        shape[0] = pyramid_feature_size
    else:
        shape[-1] = pyramid_feature_size
    inputs = Input(shape=shape)
    outputs = inputs
    conv = Conv3D if time_distributed else Conv2D
    for i in range(4):
        outputs = conv(filters=regression_feature_size,
                       activation='relu',
                       name='pyramid_regression_{}'.format(i),
                       **options)(outputs)

    outputs = conv(num_anchors * num_values,
                   name='pyramid_regression',
                   **options)(outputs)

    if K.image_data_format() == 'channels_first':
        rank = 4 if time_distributed else 3
        perm = tuple(list(range(2, rank + 1)) + [1])
        outputs = Permute(perm, name='pyramid_regression_permute')(outputs)

    new_shape = (frames_per_batch, -1, num_values)
    if not time_distributed:
        new_shape = new_shape[1:]

    outputs = Reshape(new_shape, name='pyramid_regression_reshape')(outputs)

    return Model(inputs=inputs, outputs=outputs, name=name)
예제 #30
0
sequence_input = Input(shape=(data.shape[1], ), dtype='int32')  # (Batch size,
embedded_sequences = embedding_layer(sequence_input)
x = Bidirectional(LSTM(UNITS,
                       return_sequences=True,
                       dropout=DROP,
                       activity_regularizer=k.regularizers.l2(REG)),
                  merge_mode='concat')(
                      embedded_sequences)  # (batch_size, timesteps, units)
a = TimeDistributed(Dense(UNITS,
                          activity_regularizer=k.regularizers.l2(REG)))(x)
attention = TimeDistributed(Dense(1, activation='tanh', name='timeDense'))(
    a)  # (batch_size, timesteps, 1)
attention = Flatten()(attention)  # (batch size, timesteps)
attention = Activation('softmax')(attention)  # (batch, timesteps)
attention = RepeatVector(UNITS * 2)(attention)  # (batch, units, timesteps)
attention = Permute([2, 1])(attention)  #(batch, timesteps, units)
rejoined = multiply([x, attention])
# rejoined = k.backend.sum(rejoined, axis=-2 , keepdims=False)(rejoined)
# x = LSTM(UNITS, return_sequences=True, dropout=DROP, activity_regularizer=k.regularizers.l2(REG))(rejoined) # (batch_size, timesteps, units)
# x = TimeDistributed(Dense(UNITS, activation='relu', activity_regularizer=k.regularizers.l2(REG)))(x)
# attention = TimeDistributed(Dense(1, activation='tanh', name='timeDense'))(x) # (batch_size, timesteps, 1)
# attention = Flatten()(attention) # (batch size, timesteps)
# attention = Activation('softmax')(attention) # (batch, timesteps)
# attention = RepeatVector(UNITS)(attention) # (batch, units, timesteps)
# attention = Permute([2,1])(attention) #(batch, timesteps, units)
# rejoined = multiply([x, attention])

interm = LSTM(UNITS, activity_regularizer=k.regularizers.l2(REG),
              dropout=DROP)(rejoined)
interm = Dense(UNITS,
               activation='relu',