コード例 #1
0
def residual_block(x):
    """    Residual block    """
    filters = [64, 64]
    kernel_size = 3
    strides = 1
    padding = "same"
    momentum = 0.8
    activation = "relu"
    res = Conv2D(filters=filters[0],
                 kernel_size=kernel_size,
                 strides=strides,
                 padding=padding,
                 activation='relu')(x)

    res = BatchNormalization(momentum=momentum)(res)
    res = PReLU(shared_axes=[1, 2])(res)
    res = Conv2D(filters=filters[1],
                 kernel_size=kernel_size,
                 strides=strides,
                 padding=padding)(res)
    res = BatchNormalization(momentum=momentum)(res)
    # Add res and x
    res = Add()([res, x])
    return res
コード例 #2
0
ファイル: neural_net.py プロジェクト: andrewchen353/SISRD
def ResNet(lr):
    depth = 16  #v2, v1 = 4
    x_input = Input((64, 64, 1))

    conv1 = Conv2D(1, (3, 3), padding='same', use_bias=True,
                   activation='relu')(x_input)
    # rec = PReLU(alpha_initializer='zeros')(conv1) #v1
    rec = PReLU(alpha_initializer='zeros')(conv1)  #v2

    # DnCNN network
    for i in range(depth):
        rec1 = Conv2D(64, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='relu')(rec)
        rec1 = BatchNormalization(axis=3)(rec1)
        # rec = PReLU(alpha_initializer='zeros')(rec) #v1
        rec1 = LeakyReLU(alpha=0.3)(rec1)  #v2
        if i % 2 == 0:
            rec = Add()([rec, rec1])
        rec = Add()([rec1, rec])

    conv2 = Conv2D(1, (3, 3), padding='same', use_bias=True,
                   activation='relu')(rec)
    sub = Add()([x_input, conv2])

    conv3 = Conv2D(4, (3, 3), padding='same', use_bias=True,
                   activation='relu')(sub)
    spc1 = SubpixelConv2D(conv3.shape, scale=2)(conv3)

    # model = Model(spc1, x_input) #v1
    model = Model(x_input, spc1)  #v2

    model.compile(loss=loss.rmse, optimizer=Adam(lr=lr), metrics=['accuracy'])

    return model
コード例 #3
0
ファイル: layers.py プロジェクト: VAEs-Tutorial/BMI
 def f(x):
     if BN and activation != 'selu':
         if config == 'keras':
             h = BatchNormalization(momentum=momentum)(x, training=training)
         elif config == 'tf' or config == 'tensorflow':
             h = BatchNorm(is_training=training)(x)
         else:
             raise ValueError('config should be either `keras`, `tf` or `tensorflow`')
     else:
         h = x
     if activation is None:
         return h
     if activation in ['prelu', 'leakyrelu', 'elu', 'selu']:
         if activation == 'prelu':
             return PReLU(name=name)(h)
         if activation == 'leakyrelu':
             return LeakyReLU(name=name)(h)
         if activation == 'elu':
             return ELU(name=name)(h)
         if activation == 'selu':
             return Selu()(h)
     else:
         h = Activation(activation, name=name)(h)
         return h
def CNN_Model(seq_len, num_classes, num_features, embedding_matrix=None):

    in_text = Input(shape=(seq_len,))
    op_units, op_activation = _get_last_layer_units_and_activation(num_classes)

    trainable = True
    if embedding_matrix is None:
        x = Embedding(num_features, 64, trainable=trainable)(in_text)
    else:
        x = Embedding(num_features, 300, trainable=trainable, weights=[embedding_matrix])(in_text)

    x = Conv1D(128, kernel_size=5, padding='valid', kernel_initializer='glorot_uniform')(x)
    x = GlobalMaxPooling1D()(x)

    x = Dense(128)(x) #
    x = PReLU()(x)
    x = Dropout(0.35)(x) #0
    x = BatchNormalization()(x)

    y = Dense(op_units, activation=op_activation)(x)

    md = keras.models.Model(inputs = [in_text], outputs=y)

    return md
def RNN_Model(seq_len, num_classes, num_features, embedding_matrix=None):

    in_text = Input(shape=(seq_len,))
    op_units, op_activation = _get_last_layer_units_and_activation(num_classes)

    trainable = True
    if embedding_matrix is None:
        x = Embedding(num_features, 64, trainable=trainable)(in_text)
    else:
        x = Embedding(num_features, 300, trainable=trainable, weights=[embedding_matrix])(in_text)

    x = CuDNNGRU(128, return_sequences=True)(x)
    x = GlobalMaxPooling1D()(x)

    x = Dense(128)(x) #
    x = PReLU()(x)
    x = Dropout(0.35)(x) #0
    x = BatchNormalization()(x)

    y = Dense(op_units, activation=op_activation)(x)

    md = keras.models.Model(inputs = [in_text], outputs=y)

    return md
コード例 #6
0
def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16):
    def _residual_block(inputs):
        x = Conv2D(feature_dim, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(inputs)
        x = BatchNormalization()(x)
        x = PReLU(shared_axes=[1, 2])(x)
        x = Conv2D(feature_dim, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal")(x)
        x = BatchNormalization()(x)
        m = Add()([x, inputs])

        return m

    inputs = Input(shape=(None, None, input_channel_num))
    x = Conv2D(feature_dim, (3, 3),
               padding="same",
               kernel_initializer="he_normal")(inputs)
    x = PReLU(shared_axes=[1, 2])(x)
    x0 = x

    for i in range(resunit_num):
        x = _residual_block(x)

    x = Conv2D(feature_dim, (3, 3),
               padding="same",
               kernel_initializer="he_normal")(x)
    x = BatchNormalization()(x)
    x = Add()([x, x0])
    x = Conv2D(input_channel_num, (3, 3),
               padding="same",
               kernel_initializer="he_normal")(x)
    model = Model(inputs=inputs, outputs=x)

    return model
コード例 #7
0
def Subpixel(x, number, scale=2, sn=False):
    """
    使用亚像素卷积进行上采样的Keras组合拳
    :param x: Your Keras Layer input.
    :param number: This number is used to name the Subpixel Layers.
    :param scale: upsampling scale compared to input_shape. Default=2
    :param sn: If you want to use SpectralNormalization
    """
    if sn:
        x = SpectralNormalization(
            Conv2D(256,
                   kernel_size=3,
                   strides=1,
                   padding='same',
                   name='upSampleConv2d_' + str(number)))(x)
    else:
        x = Conv2D(256,
                   kernel_size=3,
                   strides=1,
                   padding='same',
                   name='upSampleConv2d_' + str(number))(x)
    x = SubpixelConv2D('upSampleSubPixel_' + str(number), scale)(x)
    x = PReLU(shared_axes=[1, 2], name='upSamplePReLU_' + str(number))(x)
    return x
def cnn_model():
    model = Sequential()
    model.add(Conv2D(128, (10, 3), input_shape = (128, 10, 1), padding = 'same') )
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2,2))) # 64 * 5
    model.add(Dropout(0.3)) 

    model.add(Conv2D(256, (10, 3), padding = 'same'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2,2))) # 32 * 2
    model.add(Dropout(0.3))
    
    model.add(Conv2D(512, (10, 3), padding = 'same'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2,2))) # 32 * 2
    model.add(Dropout(0.35))

   
    
    model.add(Flatten())
    
    model.add(Dense(units = 512, activation = 'relu'))
    model.add(PReLU(alpha_initializer='zeros'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))
    
    model.add(Dense(units = 256, activation = 'relu'))
    model.add(PReLU(alpha_initializer='zeros'))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))
  
    model.add(Dense(units = 128, activation = 'relu'))
    model.add(PReLU(alpha_initializer='zeros'))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))

    model.add(Dense(units = 41, activation = 'softmax'))
    model.summary()
    return model
コード例 #9
0
def cnn():
    inputs = Input(shape=(256, 256, 3), name='inputs')
    # conv_1 = Conv2D(16, (7, 7), padding='same', name='conv_1', activation='relu')(inputs)
    # pooling_1 = MaxPooling2D((4, 4), strides=(2, 2), name='pool_1')(conv_1)
    # # drop_1 = Dropout(0.5)(pooling_1)
    # conv_2 = Conv2D(32, (7, 7), padding='same', name='conv_2', activation='relu')(pooling_1)
    # pooling_2 = MaxPooling2D((4, 4), strides=(2, 2), name='pool_2')(conv_2)
    # # drop_2 = Dropout(0.5)(pooling_2)
    # conv_3 = Conv2D(64, (7, 7), padding='same', name='conv_3', activation='relu')(pooling_2)
    # pooling_3 = MaxPooling2D((4, 4), strides=(2, 2), name='pool_3')(conv_3)

    conv_1 = Conv2D(32, (5, 5), padding='same', name='conv_1')(inputs)
    bn_1 = BatchNormalization()(conv_1)
    pr_1 = PReLU()(bn_1)
    conv_2 = Conv2D(32, (5, 5), padding='same', name='conv_2')(pr_1)
    bn_2 = BatchNormalization()(conv_2)
    pr_2 = PReLU()(bn_2)
    pooling_2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool_2')(pr_2)
    conv_3 = Conv2D(64, (5, 5), padding='same', name='conv_3')(pooling_2)
    bn_3 = BatchNormalization()(conv_3)
    pr_3 = PReLU()(bn_3)
    pooling_3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool_3')(pr_3)

    conv_4 = Conv2D(128, (3, 3), padding='same', name='conv_4')(pooling_3)
    bn_4 = BatchNormalization()(conv_4)
    pr_4 = PReLU()(bn_4)
    pooling_4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool_4')(pr_4)
    fl = Flatten(name='flatten')(pooling_4)
    fc_1 = Dense(256, name='fc_1')(fl)
    bn_5 = BatchNormalization()(fc_1)
    pr_5 = PReLU()(bn_5)
    # dro_1 = Dropout(0.5)(fc_1)
    fc_2 = Dense(64, name='fc_2')(pr_5)
    bn_6 = BatchNormalization()(fc_2)
    pr_6 = PReLU()(bn_6)
    # dro_2 = Dropout(0.5)(fc_2)
    out = Dense(3, activation='sigmoid', name='out')(pr_6)

    model = Model(inputs=inputs, outputs=out)
    return model
コード例 #10
0
def bird_model(num_classes):
    #build the model
    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3),
                     input_shape=(xpixels, ypixels, 1)))
    model.add(PReLU(alpha_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Conv2D(64, kernel_size=(3, 3),
                     input_shape=(xpixels, ypixels, 1)))
    model.add(PReLU(alpha_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))

    model.add(Conv2D(64, kernel_size=(3, 3),
                     input_shape=(xpixels, ypixels, 1)))
    model.add(PReLU(alpha_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.4))

    model.add(
        Conv2D(128, kernel_size=(3, 3), input_shape=(xpixels, ypixels, 1)))
    model.add(PReLU(alpha_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))

    model.add(
        Conv2D(128, kernel_size=(3, 3), input_shape=(xpixels, ypixels, 1)))
    model.add(PReLU(alpha_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.4))

    #MLP
    model.add(Flatten())
    model.add(Dense(128))
    model.add(PReLU(alpha_regularizer=regularizers.l2(0.01)))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    return model
コード例 #11
0
    def build_generator(self, residual_blocks=16):
        """
        Build the generator network according to description in the paper.

        :param optimizer: Keras optimizer to use for network
        :param int residual_blocks: How many residual blocks to use
        :return: the compiled model
        """

        # def dense_block(input):
        #     x1 = Conv2D(64, kernel_size=3, strides=1, padding='same')(input)
        #     x1 = LeakyReLU(0.2)(x1)

        #     x2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(Concatenate()([input, x1]))
        #     x2 = LeakyReLU(0.2)(x2)

        #     x3 = Conv2D(64, kernel_size=3, strides=1, padding='same')(Concatenate()([input, x1, x2]))
        #     x3 = LeakyReLU(0.2)(x3)

        #     x4 = Conv2D(64, kernel_size=3, strides=1, padding='same')(Concatenate()([input, x1, x2, x3]))
        #     x4 = LeakyReLU(0.2)(x4)

        #     x5 = Conv2D(64, kernel_size=3, strides=1, padding='same')(Concatenate()([input, x1, x2, x3, x4]))
        #     x5 = Lambda(lambda x: x * 0.2)(x5)
        #     x = Add()([x5, input])
        #     return x

        # def RRDB(input):
        #     x = dense_block(input)
        #     x = dense_block(x)
        #     x = dense_block(x)
        #     x = Lambda(lambda x: x * 0.2)(x)
        #     out = Add()([x, input])
        #     return out

        def residual_block(input):
            x = Conv2D(64, kernel_size=3, strides=1, padding='same')(input)
            x = BatchNormalization(momentum=0.8)(x)
            x = PReLU(shared_axes=[1, 2])(x)
            x = Conv2D(64, kernel_size=3, strides=1, padding='same')(x)
            x = BatchNormalization(momentum=0.8)(x)
            x = Add()([x, input])
            return x

        def upsample(x, number):
            x = Conv2D(256,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       name='upSampleConv2d_' + str(number))(x)
            x = self.SubpixelConv2D('upSampleSubPixel_' + str(number), 2)(x)
            x = PReLU(shared_axes=[1, 2],
                      name='upSamplePReLU_' + str(number))(x)
            return x

        # Input low resolution image
        lr_input = Input(shape=(None, None, 3))

        # Pre-residual
        x_start = Conv2D(64, kernel_size=9, strides=1,
                         padding='same')(lr_input)
        x_start = PReLU(shared_axes=[1, 2])(x_start)

        # Residual blocks
        r = residual_block(x_start)
        for _ in range(residual_blocks - 1):
            r = residual_block(r)

        # Post-residual block
        x = Conv2D(64, kernel_size=3, strides=1, padding='same')(r)
        x = BatchNormalization(momentum=0.8)(x)
        x = Add()([x, x_start])

        # Upsampling depending on factor
        x = upsample(x, 1)
        if self.upscaling_factor > 2:
            x = upsample(x, 2)
        if self.upscaling_factor > 4:
            x = upsample(x, 3)

        # Generate high resolution output
        # tanh activation, see:
        # https://towardsdatascience.com/gan-ways-to-improve-gan-performance-acf37f9f59b
        hr_output = Conv2D(self.channels,
                           kernel_size=9,
                           strides=1,
                           padding='same',
                           activation='tanh')(x)

        # Create model and compile
        model = Model(inputs=lr_input, outputs=hr_output)
        return model
コード例 #12
0
def create_model(x_train, y_train, x_val, y_val, x_test, y_test):

    if sys.argv[1] == 'german':
        input_n = 24
    elif sys.argv[1] == 'australian':
        input_n = 15

    batch_size = 32
    epochs = 500
    inits = [
        'Zeros', 'Ones', 'RandomNormal', 'RandomUniform', 'TruncatedNormal',
        'Orthogonal', 'lecun_uniform', 'lecun_normal', 'he_uniform',
        'he_normal', 'glorot_uniform', 'glorot_normal'
    ]
    acts = [
        'tanh', 'softsign', 'sigmoid', 'hard_sigmoid', 'relu', 'softplus',
        'LeakyReLU', 'PReLU', 'elu', 'selu'
    ]
    init = inits[int({{quniform(0, 11, 1)}})]
    act = acts[9]

    neurons = int({{quniform(9, 180, 9)}})
    layers = {{choice([1, 2, 4, 8])}}
    norm = {{choice(['no', 'l1', 'l2'])}}
    dropout = {{choice([0, 1])}}
    earlystop = {{choice([0, 1])}}
    k1 = None
    k2 = None
    p = None

    if norm == 'no':
        reg = None
    elif norm == 'l1':
        k1 = {{loguniform(-9.2, -2.3)}}
        reg = regularizers.l1(k1)
    elif norm == 'l2':
        k2 = {{loguniform(-9.2, -2.3)}}
        reg = regularizers.l2(k2)

    X_input = Input((input_n, ))
    X = X_input

    for _ in range(layers):
        X = Dense(
            neurons,
            kernel_initializer=init,
            kernel_regularizer=reg,
        )(X)

        if act == 'LeakyReLU':
            X = LeakyReLU()(X)
        elif act == 'PReLU':
            X = PReLU()(X)
        else:
            X = Activation(act)(X)

        if dropout == 1:
            p = {{uniform(0, 1)}}
            X = Dropout(p)(X)

    X = Dense(1, kernel_initializer=init, kernel_regularizer=reg)(X)
    X_outputs = Activation('sigmoid')(X)

    model = Model(inputs=X_input, outputs=X_outputs)
    model.compile(
        loss='binary_crossentropy',
        optimizer='adam',
        metrics=['accuracy'],
    )

    patience = int({{quniform(1, 500, 1)}})
    es = EarlyStopping(
        monitor='val_loss',
        patience=patience,
        verbose=0,
        mode='auto',
    )
    if earlystop == 1:
        model.fit(
            x_train,
            y_train,
            batch_size=batch_size,
            verbose=0,
            epochs=epochs,
            validation_data=(x_val, y_val),
            callbacks=[es],
        )
    else:
        model.fit(
            x_train,
            y_train,
            batch_size=batch_size,
            verbose=0,
            epochs=epochs,
            validation_data=(x_val, y_val),
        )

    loss_t, score_t = model.evaluate(x_train, y_train, verbose=0)
    loss_v, score_v = model.evaluate(x_val, y_val, verbose=0)
    loss_te, score_te = model.evaluate(x_test, y_test, verbose=0)

    print(init + '\t' + act + '\t' + str(neurons) + '\t' + str(layers) + '\t' +
          str(norm) + '\t' + str(dropout) + '\t' + str(earlystop) +
          '%-24s%-24s%-24s%s' % (str(k1), str(k2), str(p), str(patience)) +
          '  ' + str(score_v) + '  ' + str(loss_v) + '  ' + str(score_te) +
          '  ' + str(loss_te))
    return {'loss': loss_v, 'status': STATUS_OK, 'model': model}
コード例 #13
0
def get_iseg_experimental3(input_shape, filters_list, kernel_size_list,
                           dense_size):
    merged_inputs = Input(shape=input_shape, name='merged_inputs')
    # Input splitting
    input_shape = K.int_shape(merged_inputs)
    t1 = Lambda(lambda l: K.expand_dims(l[:, 0, :, :, :], axis=1),
                output_shape=(1, ) + input_shape[2:])(merged_inputs)
    t2 = Lambda(lambda l: K.expand_dims(l[:, 1, :, :, :], axis=1),
                output_shape=(1, ) + input_shape[2:])(merged_inputs)

    # Convolutional part
    t2 = get_convolutional_block(t2, filters_list, kernel_size_list)
    t1 = get_convolutional_block(t1, filters_list, kernel_size_list)

    # Tissue binary stuff
    t2_f = Flatten()(t2)
    t1_f = Flatten()(t1)
    t2_f = Dense(dense_size, activation='relu')(t2_f)
    t2_f = Dropout(0.5)(t2_f)
    t1_f = Dense(dense_size, activation='relu')(t1_f)
    t1_f = Dropout(0.5)(t1_f)
    merged = concatenate([t2_f, t1_f])
    csf, gm, wm, csf_out, gm_out, wm_out = get_tissue_binary_stuff(merged)

    full = Conv3D(dense_size,
                  kernel_size=(1, 1, 1),
                  data_format='channels_first')(concatenate([t1, t2], axis=1))
    full = PReLU()(full)
    full = Conv3D(dense_size / 2,
                  kernel_size=(1, 1, 1),
                  data_format='channels_first')(full)
    full = PReLU()(full)
    full = Conv3D(dense_size / 4,
                  kernel_size=(1, 1, 1),
                  data_format='channels_first')(full)
    full = PReLU()(full)
    full = Conv3D(4, kernel_size=(1, 1, 1), data_format='channels_first')(full)

    full_shape = K.int_shape(full)

    # x LSTM
    x_combos = product(range(full_shape[-2]), range(full_shape[-1]))
    lambda_x = Lambda(lambda l: l[:, :, :, i, j],
                      output_shape=(4, full_shape[-3]))
    lambda_x_rev = Lambda(lambda l: l[:, :, -1::-1, i, j],
                          output_shape=(4, full_shape[-3]))
    x_input = [lambda_x(PReLU()(full)) for (i, j) in x_combos
               ] + [lambda_x_rev(PReLU()(full)) for (i, j) in x_combos]
    x_lstm = [LSTM(4, implementation=1)(x) for x in x_input]

    # y LSTM
    y_combos = product(range(full_shape[-3]), range(full_shape[-1]))
    lambda_y = Lambda(lambda l: l[:, :, i, :, j],
                      output_shape=(4, full_shape[-2]))
    lambda_y_rev = Lambda(lambda l: l[:, :, i, -1::-1, j],
                          output_shape=(4, full_shape[-2]))
    y_input = [lambda_y(PReLU()(full)) for (i, j) in y_combos
               ] + [lambda_y_rev(PReLU()(full)) for (i, j) in y_combos]
    y_lstm = [LSTM(4, implementation=1)(y) for y in y_input]

    # z LSTM
    z_combos = product(range(full_shape[-3]), range(full_shape[-2]))
    lambda_z = Lambda(lambda l: l[:, :, i, j, :],
                      output_shape=(4, full_shape[-1]))
    lambda_z_rev = Lambda(lambda l: l[:, :, i, j, -1::-1],
                          output_shape=(4, full_shape[-1]))
    z_input = [lambda_z(PReLU()(full)) for (i, j) in z_combos
               ] + [lambda_z_rev(PReLU()(full)) for (i, j) in z_combos]
    z_lstm = [LSTM(4, implementation=1)(PReLU()(z)) for z in z_input]

    # Final LSTM
    rf = Average()(x_lstm + y_lstm + z_lstm)

    # FC labeling
    full = Reshape((4, -1))(full)
    full = Permute((2, 1))(full)
    full_out = Activation('softmax', name='fc_out')(full)
    # rf = LSTM(4, implementation=1)(Reshape((4, -1))(full))
    # rf = Dense(4)(Reshape((4, -1))(full))

    # Final labeling
    merged = concatenate(
        [t2_f, t1_f,
         PReLU()(csf),
         PReLU()(gm),
         PReLU()(wm),
         PReLU()(rf)])
    # merged = concatenate([t2_f, t1_f, PReLU()(csf), PReLU()(gm), PReLU()(wm), PReLU()(Flatten()(full))])
    merged = Dropout(0.5)(merged)
    brain = Dense(4, name='brain', activation='softmax')(merged)

    # Weights and outputs
    weights = [0.2, 0.5, 0.5, 1.0, 0.8]
    outputs = [csf_out, gm_out, wm_out, brain, full_out]

    return compile_network(merged_inputs, outputs, weights)
コード例 #14
0
    def buildModel(self):
        params = self.params

        if self.params['charEmbeddings'] not in [
                None, "None", "none", False, "False", "false"
        ]:
            self.padCharacters()

        embeddings = self.embeddings
        casing2Idx = self.dataset['mappings']['casing']

        caseMatrix = np.identity(len(casing2Idx), dtype='float32')

        tokens = Sequential()
        tokens.add(
            Embedding(input_dim=embeddings.shape[0],
                      output_dim=embeddings.shape[1],
                      weights=[embeddings],
                      trainable=False,
                      name='token_emd'))

        casing = Sequential()
        # casing.add(Embedding(input_dim=len(casing2Idx), output_dim=self.addFeatureDimensions, trainable=True))
        casing.add(
            Embedding(input_dim=caseMatrix.shape[0],
                      output_dim=caseMatrix.shape[1],
                      weights=[caseMatrix],
                      trainable=False,
                      name='casing_emd'))

        mergeLayers = [tokens, casing]

        if self.additionalFeatures != None:
            for addFeature in self.additionalFeatures:
                maxAddFeatureValue = max([
                    max(sentence[addFeature])
                    for sentence in self.dataset['trainMatrix'] +
                    self.dataset['devMatrix'] + self.dataset['testMatrix']
                ])
                addFeatureEmd = Sequential()
                addFeatureEmd.add(
                    Embedding(input_dim=maxAddFeatureValue + 1,
                              output_dim=self.params['addFeatureDimensions'],
                              trainable=True,
                              name=addFeature + '_emd'))
                mergeLayers.append(addFeatureEmd)

        # :: Character Embeddings ::
        if params['charEmbeddings'] not in [
                None, "None", "none", False, "False", "false"
        ]:
            charset = self.dataset['mappings']['characters']
            charEmbeddingsSize = params['charEmbeddingsSize']
            maxCharLen = self.maxCharLen
            charEmbeddings = []
            for _ in charset:
                limit = math.sqrt(3.0 / charEmbeddingsSize)
                vector = np.random.uniform(-limit, limit, charEmbeddingsSize)
                charEmbeddings.append(vector)

            charEmbeddings[0] = np.zeros(charEmbeddingsSize)  # Zero padding
            charEmbeddings = np.asarray(charEmbeddings)

            chars = Sequential()
            chars.add(
                TimeDistributed(Embedding(input_dim=charEmbeddings.shape[0],
                                          output_dim=charEmbeddings.shape[1],
                                          weights=[charEmbeddings],
                                          trainable=True,
                                          mask_zero=True),
                                input_shape=(None, maxCharLen),
                                name='char_emd'))

            if params['charEmbeddings'].lower(
            ) == 'lstm':  # Use LSTM for char embeddings from Lample et al., 2016
                charLSTMSize = params['charLSTMSize']
                chars.add(
                    TimeDistributed(Bidirectional(
                        LSTM(charLSTMSize, return_sequences=False)),
                                    name="char_lstm"))
            else:  # Use CNNs for character embeddings from Ma and Hovy, 2016
                charFilterSize = params['charFilterSize']
                charFilterLength = params['charFilterLength']
                chars.add(
                    TimeDistributed(Convolution1D(charFilterSize,
                                                  charFilterLength,
                                                  border_mode='same'),
                                    name="char_cnn"))
                chars.add(
                    TimeDistributed(GlobalMaxPooling1D(), name="char_pooling"))

            mergeLayers.append(chars)
            if self.additionalFeatures == None:
                self.additionalFeatures = []

            self.additionalFeatures.append('characters')

        model = Sequential()
        model.add(Merge(mergeLayers, mode='concat'))
        for i in range(params['layers']):
            act = params["activation"]
            #act = LeakyReLU()
            #act = PReLU(init='zero',weights=None)
            if act not in ["leakyrelu..."]:  #act = LeakyReLU()
                # model.add(Bidirectional(
                #         #     LSTM(size, activation=params['activation'], return_sequences=True, dropout_W=params['dropout'][0],
                #         #          dropout_U=params['dropout'][1]), name="varLSTM_" + str(cnt)))

                model.add(
                    Bidirectional(
                        LSTM(units=params['LSTM-Size'][0],
                             activation="tanh",
                             recurrent_activation=act,
                             recurrent_initializer=params['init'],
                             return_sequences=True,
                             dropout=params['dropout'][0],
                             recurrent_dropout=params['dropout'][1])))
            elif act == "leakyrelu" and False:
                model.add(
                    SimpleRNN(units=params['LSTM-Size'][0],
                              activation="linear",
                              recurrent_initializer=params['init'],
                              return_sequences=True,
                              dropout=params['dropout'][0],
                              recurrent_dropout=params['dropout'][1]))
                model.add(LeakyReLU(alpha=float(params["activation_flag"])))
            elif act == "prelu" and False:
                model.add(
                    SimpleRNN(units=params['LSTM-Size'][0],
                              activation="linear",
                              recurrent_initializer=params['init'],
                              return_sequences=True,
                              dropout=params['dropout'][0],
                              recurrent_dropout=params['dropout'][1]))
                model.add(PReLU(init='zero', weights=None))

        # Add LSTMs
        cnt = 1

        # Softmax Decoder
        if params['classifier'].lower() == 'softmax':
            model.add(
                TimeDistributed(Dense(len(
                    self.dataset['mappings'][self.labelKey]),
                                      activation='softmax'),
                                name='softmax_output'))
            lossFct = 'sparse_categorical_crossentropy'
        elif params['classifier'].lower() == 'crf':
            model.add(
                TimeDistributed(Dense(
                    len(self.dataset['mappings'][self.labelKey])),
                                name='hidden_layer'))
            crf = ChainCRF()
            model.add(crf)
            lossFct = crf.sparse_loss
        elif params['classifier'].lower() == 'tanh-crf':
            model.add(
                TimeDistributed(Dense(len(
                    self.dataset['mappings'][self.labelKey]),
                                      activation='tanh'),
                                name='hidden_layer'))
            crf = ChainCRF()
            model.add(crf)
            lossFct = crf.sparse_loss
        else:
            print("Please specify a valid classifier")
            assert (False)  # Wrong classifier

        optimizerParams = {}
        if 'clipnorm' in self.params and self.params[
                'clipnorm'] != None and self.params['clipnorm'] > 0:
            optimizerParams['clipnorm'] = self.params['clipnorm']

        if 'clipvalue' in self.params and self.params[
                'clipvalue'] != None and self.params['clipvalue'] > 0:
            optimizerParams['clipvalue'] = self.params['clipvalue']

        #if learning_rate in self.params:
        #    optimizerParams["learning_rate"] = self.params["learning_rate"]
        learning_rate = self.params["learning_rate"]

        if params['optimizer'].lower() == 'adam':
            opt = Adam(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'nadam':
            opt = Nadam(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'rmsprop':
            opt = RMSprop(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'adadelta':
            opt = Adadelta(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'adagrad':
            opt = Adagrad(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'sgd':
            opt = SGD(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'adamax':
            opt = Adamax(lr=learning_rate, **optimizerParams)

        model.compile(loss=lossFct, optimizer=opt)

        self.model = model
        if self.verboseBuild:
            model.summary()
            logging.debug(model.get_config())
            logging.debug("Optimizer: %s, %s" %
                          (str(type(opt)), str(opt.get_config())))
コード例 #15
0
def fsrcnn(inputs, scale=4, n_channels=3,reg_strength=0.001):
    x = inputs
    ## Conv. Layer 1: feature extraction layer 1
    x = Conv2D(56, kernel_size=5,padding='same',
                  kernel_initializer=he_normal(), kernel_regularizer=regularizers.l2(reg_strength),
               name='sr_1_conv')(x)
    x = BatchNormalization(name='sr_1_bn')(x)
    x = PReLU(alpha_initializer=he_normal(),name='sr_1_p')(x)

    ## Conv. Layer 2: Shrinking
    x = Conv2D(12, kernel_size=1, padding='same',
                kernel_initializer=he_normal(), kernel_regularizer=regularizers.l2(reg_strength),
               name='sr_2_conv')(x )
    x = BatchNormalization(name='sr_2_bn')(x)
    x = PReLU(alpha_initializer=he_normal(),name='sr_2_p')(x)

    ## Conv. Layers 3–6: Mapping
    x = Conv2D(12, kernel_size=3, padding='same',
           kernel_initializer=he_normal(), kernel_regularizer=regularizers.l2(reg_strength),
               name='sr_3_conv')(x )
    x = BatchNormalization(name='sr_3_bn')(x)
    x = PReLU(alpha_initializer=he_normal(),name='sr_3_p')(x)

    x = Conv2D(12, kernel_size=3, padding='same',
           kernel_initializer=he_normal(), kernel_regularizer=regularizers.l2(reg_strength),
               name='sr_4_conv')(x )
    x = BatchNormalization(name='sr_4_bn')(x)
    x = PReLU(alpha_initializer=he_normal(),name='sr_4_p')(x)

    x = Conv2D(12, kernel_size=3, padding='same',
           kernel_initializer=he_normal(), kernel_regularizer=regularizers.l2(reg_strength),
               name='sr_5_conv')(x )
    x = BatchNormalization(name='sr_5_bn')(x)
    x = PReLU(alpha_initializer=he_normal(),name='sr_5_p')(x)

    x = Conv2D(12, kernel_size=3, padding='same',
           kernel_initializer=he_normal(), kernel_regularizer=regularizers.l2(reg_strength),
               name='sr_6_conv')(x )
    x = BatchNormalization(name='sr_6_bn')(x)
    x = PReLU(alpha_initializer=he_normal(),name='sr_6_p')(x)

    ##Conv.Layer  7: Expanding
    x = Conv2D(56, kernel_size=1, padding='same',
           kernel_initializer=he_normal(), kernel_regularizer=regularizers.l2(reg_strength),
               name='sr_7_conv')(x )
    x = BatchNormalization(name='sr_7_bn')(x)
    x = PReLU(alpha_initializer=he_normal(),name='sr_7_p')(x)

    ##DeConv Layer 8: Deconvolution

    x = Conv2D(3 * scale ** 2, 9, padding='same', name='sr_8_conv_up{0}'.format(scale),
               kernel_initializer=he_normal())(x)


    if n_channels == 1:
        x = SubpixelConv2D(scale)(x)
        x1 = Conv2D(1, 1, padding='same', name='sr', kernel_initializer=he_normal())(x)
    else:
        x1 = SubpixelConv2D(scale, name='sr')(x)

    return x1
コード例 #16
0
    def parallel_dilated_conv(self,
                              input,
                              filters=16,
                              name_base="D1",
                              last_filters=64):
        # dalition convolution
        kernel_size = (3, 3)
        # dilation_rate=1

        d1_tdc1 = self.TD_conv2d(last_layer=input,
                                 filters=filters,
                                 name=name_base + "_d1t1")
        d1_tdc1 = TimeDistributed(PReLU(), name=name_base + "_d1t1P1")(d1_tdc1)

        d1_tdc1 = BatchNormalization(axis=-1,
                                     name=name_base + "_d1t1BN1")(d1_tdc1)
        d1_tdc2 = self.TD_conv2d(last_layer=d1_tdc1,
                                 filters=filters,
                                 stride=(2, 2),
                                 name=name_base + "_d1t2")
        d1_tdc2 = TimeDistributed(PReLU(), name=name_base + "_d1t1P2")(d1_tdc2)

        # dilation_rate=2
        d2_tdc1 = self.TD_conv2d(last_layer=input,
                                 filters=filters,
                                 dilation_rate=(2, 2),
                                 name=name_base + "_d2t1")
        d2_tdc1 = TimeDistributed(PReLU(), name=name_base + "_d2t1P1")(d2_tdc1)

        d2_tdc1 = BatchNormalization(axis=-1,
                                     name=name_base + "_d2t1BN1")(d2_tdc1)
        d2_tdc2 = self.TD_conv2d(last_layer=d2_tdc1,
                                 filters=filters,
                                 stride=(2, 2),
                                 name=name_base + "_d2t2")
        d2_tdc2 = TimeDistributed(PReLU(), name=name_base + "_d2t1P2")(d2_tdc2)

        # dilation_rate=3
        d3_tdc1 = self.TD_conv2d(last_layer=input,
                                 filters=filters,
                                 dilation_rate=(2, 2),
                                 name=name_base + "_d3t1")
        d3_tdc1 = TimeDistributed(PReLU(), name=name_base + "_d3t1P1")(d3_tdc1)

        d3_tdc1 = BatchNormalization(axis=-1,
                                     name=name_base + "_d3t1BN1")(d3_tdc1)
        d3_tdc2 = self.TD_conv2d(last_layer=d3_tdc1,
                                 filters=filters,
                                 stride=(2, 2),
                                 name=name_base + "_d3t2")
        d3_tdc2 = TimeDistributed(PReLU(), name=name_base + "_d3t1P2")(d3_tdc2)

        # concat and Conv
        concat_1 = concatenate(inputs=[d1_tdc2, d2_tdc2, d3_tdc2], axis=-1)
        r3 = BatchNormalization(axis=-1, name=name_base + "concatBN")(concat_1)
        r3 = TimeDistributed(Conv2D(last_filters,
                                    kernel_size=(1, 1),
                                    padding='same'),
                             name=name_base + "fusion")(r3)
        r3 = TimeDistributed(PReLU(), name=name_base + "fusion_P1")(r3)
        return r3
コード例 #17
0
    def PPG_extractor_model(self):
        input_video = Input(shape=self.input_shape)

        tdc1 = self.TD_conv2d(last_layer=input_video, filters=32, name="tdc1")
        tdc1 = TimeDistributed(PReLU())(tdc1)
        tdc1 = BatchNormalization(axis=-1)(tdc1)

        tdc2 = self.TD_conv2d(last_layer=tdc1,
                              filters=32,
                              stride=(2, 2),
                              name="tdc2")
        tdc2 = TimeDistributed(PReLU())(tdc2)
        tdc2 = BatchNormalization(axis=-1)(tdc2)

        # dilation conv
        d_out1 = self.parallel_dilated_conv(tdc2,
                                            filters=16,
                                            name_base="D1",
                                            last_filters=48)

        d_out2 = self.parallel_dilated_conv(d_out1,
                                            filters=32,
                                            name_base="D2",
                                            last_filters=96)

        tdc3 = self.TD_conv2d(last_layer=d_out2, filters=128, name="tdc3")
        tdc3 = TimeDistributed(PReLU())(tdc3)
        tdc3 = BatchNormalization(axis=-1)(tdc3)

        tdc4 = self.TD_conv2d(last_layer=tdc3,
                              filters=192,
                              stride=(2, 2),
                              name="tdc4")
        tdc4 = TimeDistributed(PReLU())(tdc4)
        tdc4 = BatchNormalization(axis=-1)(tdc4)

        tdc5 = self.TD_conv2d(last_layer=tdc4, filters=192, name="tdc5")
        tdc5 = TimeDistributed(PReLU())(tdc5)
        tdc5 = BatchNormalization(axis=-1)(tdc5)

        tdc6 = self.TD_conv2d(last_layer=tdc5,
                              filters=256,
                              stride=(2, 2),
                              name="tdc6")
        tdc6 = TimeDistributed(PReLU())(tdc6)
        # d_tdc10 = BatchNormalization(axis=-1)(d_tdc10)

        pool_size = (4, 4)
        atten_pool = TimeDistributed(AveragePooling2D(pool_size))(tdc6)
        atten_squeez = Reshape((self.input_shape[0], 256))(atten_pool)

        bn_lstm1 = BatchNormalization(axis=-1)(atten_squeez)
        lstm_1 = Bidirectional(LSTM(96, return_sequences=True,
                                    dropout=0.25))(bn_lstm1)
        lstm_2 = Bidirectional(LSTM(32, return_sequences=True,
                                    dropout=0.25))(lstm_1)
        lstm_3 = LSTM(8, return_sequences=True)(lstm_2)

        bn_conv1d = BatchNormalization(axis=-1)(lstm_3)
        conv1d_1 = Conv1D(filters=1,
                          kernel_size=(3, ),
                          padding="same",
                          name="conv1d_1",
                          activation="relu")(bn_conv1d)
        PPG_out = Reshape((60, ), name="PPG_out")(conv1d_1)
        model = Model(inputs=input_video, outputs=PPG_out)
        return model
コード例 #18
0
    def create_net(self):
        e_input = Input(shape=(self.input_shape, self.input_shape, 1))
        x = Conv2D(self.filters, (3, 3), padding='same')(e_input)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        shape_before_flatten = K.int_shape(x)[1:]
        x = Flatten()(x)

        self.encoder_mu = Dense(self.latent_dim)(x)
        self.encoder_log_variance = Dense(self.latent_dim)(x)

        e_output = Lambda(
            self.sampling)([self.encoder_mu, self.encoder_log_variance])

        # Keep the encoder part
        self.e = Model(e_input, e_output)

        # And now the decoder part
        d_input = Input(shape=[self.latent_dim])
        x = Dense(np.prod(shape_before_flatten))(d_input)
        x = Reshape(target_shape=shape_before_flatten)(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)
        d_output = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

        self.d = Model(d_input, d_output)

        # Finalize the VAE
        vae_input = Input(shape=(self.input_shape, self.input_shape, 1))
        vae_enc_out = self.e(vae_input)
        vae_dec_out = self.d(vae_enc_out)

        self.vae = Model(vae_input, vae_dec_out)
        return
コード例 #19
0
def bn_prelu(x):
    x = BatchNormalization()(x)
    x = PReLU()(x)
    return x
    def create_net(self, input_shape):
        net_input = Input(shape=input_shape)
        x = Conv2D(self.filters, (3, 3), padding='same')(net_input)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = MaxPooling2D((2, 2), padding='same')(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        self.encoded = MaxPooling2D((2, 2), padding='same')(x)

        # Keep the encoder part
        self.encoder = Model(net_input, self.encoded)

        # And now the decoder part
        x = Conv2D(self.filters, (3, 3), padding='same')(self.encoded)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)

        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = Conv2D(self.filters, (3, 3), padding='same')(x)
        x = PReLU(alpha_initializer=Constant(value=0.25))(x)
        x = UpSampling2D((2, 2))(x)
        self.decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

        self.model = Model(net_input, self.decoded)
        return
コード例 #21
0
    def Fusion_model(self):
        input_video = Input(shape=self.input_shape)

        tdc1 = self.TD_conv2d(last_layer=input_video, filters=32, name="tdc1")
        tdc1 = TimeDistributed(PReLU())(tdc1)
        tdc1 = BatchNormalization(axis=-1)(tdc1)

        tdc2 = self.TD_conv2d(last_layer=tdc1,
                              filters=32,
                              stride=(2, 2),
                              name="tdc2")
        tdc2 = TimeDistributed(PReLU())(tdc2)
        tdc2 = BatchNormalization(axis=-1)(tdc2)

        # dilation conv
        d_out1 = self.parallel_dilated_conv(tdc2,
                                            filters=16,
                                            name_base="D1",
                                            last_filters=48)

        d_out2 = self.parallel_dilated_conv(d_out1,
                                            filters=32,
                                            name_base="D2",
                                            last_filters=96)

        tdc3 = self.TD_conv2d(last_layer=d_out2, filters=128, name="tdc3")
        tdc3 = TimeDistributed(PReLU())(tdc3)
        tdc3 = BatchNormalization(axis=-1)(tdc3)

        tdc4 = self.TD_conv2d(last_layer=tdc3,
                              filters=192,
                              stride=(2, 2),
                              name="tdc4")
        tdc4 = TimeDistributed(PReLU())(tdc4)
        tdc4 = BatchNormalization(axis=-1)(tdc4)

        tdc5 = self.TD_conv2d(last_layer=tdc4, filters=192, name="tdc5")
        tdc5 = TimeDistributed(PReLU())(tdc5)
        tdc5 = BatchNormalization(axis=-1)(tdc5)

        tdc6 = self.TD_conv2d(last_layer=tdc5,
                              filters=256,
                              stride=(2, 2),
                              name="tdc6")
        tdc6 = TimeDistributed(PReLU())(tdc6)
        # d_tdc10 = BatchNormalization(axis=-1)(d_tdc10)

        pool_size = (4, 4)
        atten_pool = TimeDistributed(AveragePooling2D(pool_size))(tdc6)
        atten_squeez = Reshape((self.input_shape[0], 256))(atten_pool)

        bn_lstm1 = BatchNormalization(axis=-1)(atten_squeez)
        lstm_1 = Bidirectional(LSTM(96, return_sequences=True,
                                    dropout=0.25))(bn_lstm1)
        lstm_2 = Bidirectional(LSTM(32, return_sequences=True,
                                    dropout=0.25))(lstm_1)
        lstm_3 = LSTM(8, return_sequences=True)(lstm_2)

        bn_conv1d = BatchNormalization(axis=-1)(lstm_3)
        conv1d_1 = Conv1D(filters=1,
                          kernel_size=(3, ),
                          padding="same",
                          name="conv1d_1",
                          activation="relu")(bn_conv1d)
        PPG_out = Reshape((60, ), name="PPG_out")(conv1d_1)

        input_BN = BatchNormalization(axis=-1, name="HR_BN_input")(conv1d_1)
        lstm_1 = Bidirectional(LSTM(32, return_sequences=True),
                               name="HR_bilstm1")(input_BN)
        lstm_2 = Bidirectional(LSTM(24, return_sequences=True),
                               name="HR_bilstm2")(lstm_1)
        lstm_3 = Bidirectional(LSTM(8, return_sequences=True),
                               name="HR_bilstm3")(lstm_2)
        lstm_4 = LSTM(1, return_sequences=True, name="HR_lstm_4")(lstm_3)

        HR_lstm_squeez = Reshape((60, ), name="HR_reshape")(lstm_4)

        dense_1 = Dense(32,
                        activation="tanh",
                        kernel_regularizer=regularizers.l2(0.001),
                        name="HR_dense_1")(HR_lstm_squeez)
        dense_1 = Dropout(0.25, name="HR_dropout_1")(dense_1)
        HR_out = Dense(1, activation="relu", name="HR_out")(dense_1)

        model = Model(inputs=input_video, outputs=[HR_out, PPG_out])

        return model
コード例 #22
0
def generator(options):
    """ A function that defines the generator based on the specified options.
    """

    skips = []
    num_layers = len(options['generator_encoder_num_kernels'])
    audio_shape = (options['window_length'], options['feat_dim'])
    generator_encoder_num_kernels = options['generator_encoder_num_kernels']
    generator_decoder_num_kernels = options['generator_decoder_num_kernels']
    filter_length = options['filter_length']
    strides = options['strides']
    padding = options['padding']
    use_bias = options['use_bias']
    std_dev = options['initializer_std_dev']
    show_summary = options['show_summary']
    z_in_use = options['z_in_use']

    ## Define the encoder
    encoder_in = Input(shape=audio_shape)
    encoder_out = encoder_in

    for layer_i, num_kernels in enumerate(generator_encoder_num_kernels):
        # Add convolution layer
        encoder_out = Conv1D(
            num_kernels,
            filter_length,
            strides=strides,
            padding=padding,
            use_bias=use_bias,
            kernel_initializer=tf.truncated_normal_initializer(
                stddev=std_dev))(encoder_out)

        # Add skip connections
        if layer_i < num_layers - 1:
            skips.append(encoder_out)

        # Apply PReLU
        encoder_out = PReLU(alpha_initializer='zeros',
                            weights=None)(encoder_out)

    ## Define the intermediate noise layer z
    z_dim = options['z_dim']
    # z = Input(shape=z_dim, name='noise_input')
    z = Input(shape=z_dim)

    ## Define the decoder
    if z_in_use:
        decoder_out = keras.layers.concatenate([encoder_out, z])
    else:
        decoder_out = encoder_out

    # Shape variables updated through the loop
    n_rows = z_dim[0]
    n_cols = decoder_out.get_shape().as_list()[-1]

    for layer_i, num_kernels in enumerate(generator_decoder_num_kernels):
        shape_in = decoder_out.get_shape().as_list()

        # Need to transform the data to be in 3D, as conv2dtranspose need 3D input
        new_shape = (shape_in[1], 1, shape_in[2])
        decoder_out = Reshape(new_shape)(decoder_out)
        decoder_out = Conv2DTranspose(
            num_kernels, [filter_length, 1],
            strides=[strides, 1],
            padding=padding,
            use_bias=use_bias,
            kernel_initializer=tf.truncated_normal_initializer(
                stddev=std_dev))(decoder_out)

        # Reshape back to 2D
        n_rows = strides * n_rows
        n_cols = num_kernels
        decoder_out.set_shape([None, n_rows, 1, n_cols])
        new_shape = (n_rows, n_cols)

        if layer_i == (num_layers - 1):
            decoder_out = Reshape(new_shape)(decoder_out)

        else:
            decoder_out = Reshape(new_shape)(decoder_out)

        if layer_i < num_layers - 1:
            # Apply PReLU
            decoder_out = PReLU(alpha_initializer='zeros',
                                weights=None)(decoder_out)
            # Add skip connections
            skips_dec = skips[-(layer_i + 1)]
            decoder_out = keras.layers.concatenate([decoder_out, skips_dec])

    ## Create the model graph
    if z_in_use:
        G = Model(inputs=[encoder_in, z], outputs=decoder_out)
    else:
        G = Model(inputs=[encoder_in], outputs=decoder_out)

    if show_summary:
        G.summary()

    return G
コード例 #23
0
def build_model(input_shape, appliances):
    seq_length = input_shape[0]

    x = Input(shape=input_shape)

    # time_conv
    conv_1 = Conv1D(filters=40, kernel_size=10, strides=1, padding=MODEL_CONV_PADDING)(x)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = PReLU()(conv_1)
    drop_1 = Dropout(0.15)(conv_1)

    conv_2 = Conv1D(filters=40, kernel_size=7, strides=3, padding=MODEL_CONV_PADDING)(drop_1)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = PReLU()(conv_2)
    drop_2 = Dropout(0.15)(conv_2)

    # freq_conv
    conv_3 = Conv1D(filters=80, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_2)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = PReLU()(conv_3)
    drop_3 = Dropout(0.15)(conv_3)

    conv_4 = Conv1D(filters=80, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_3)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = PReLU()(conv_4)
    drop_4 = Dropout(0.15)(conv_4)

    conv_5 = Conv1D(filters=80, kernel_size=3, strides=2, padding=MODEL_CONV_PADDING)(drop_4)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = PReLU()(conv_5)
    drop_5 = Dropout(0.15)(conv_5)

    flat_5 = Flatten()(drop_5)

#===============================================================================================
    # time_conv
    conv_10 = Conv1D(filters=30, kernel_size=31, strides=1, padding=MODEL_CONV_PADDING)(x)
    conv_10 = BatchNormalization()(conv_10)
    conv_10 = PReLU()(conv_10)
    drop_10 = Dropout(0.15)(conv_10)

    conv_20 = Conv1D(filters=30, kernel_size=21, strides=10, padding=MODEL_CONV_PADDING)(drop_10)
    conv_20 = BatchNormalization()(conv_20)
    conv_20 = PReLU()(conv_20)
    drop_20 = Dropout(0.15)(conv_20)

    # freq_conv
    conv_30 = Conv1D(filters=60, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_20)
    conv_30 = BatchNormalization()(conv_30)
    conv_30 = PReLU()(conv_30)
    drop_30 = Dropout(0.15)(conv_30)

    conv_40 = Conv1D(filters=60, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_30)
    conv_40 = BatchNormalization()(conv_40)
    conv_40 = PReLU()(conv_40)
    drop_40 = Dropout(0.15)(conv_40)

    conv_50 = Conv1D(filters=60, kernel_size=3, strides=2, padding=MODEL_CONV_PADDING)(drop_40)
    conv_50 = BatchNormalization()(conv_50)
    conv_50 = PReLU()(conv_50)
    drop_50 = Dropout(0.15)(conv_50)

    flat_50 = Flatten()(drop_40)

#===============================================================================================
    # time_conv
    conv_11 = Conv1D(filters=30, kernel_size=61, strides=1, padding=MODEL_CONV_PADDING)(x)
    conv_11 = BatchNormalization()(conv_11)
    conv_11 = PReLU()(conv_11)
    drop_11 = Dropout(0.15)(conv_11)

    conv_21 = Conv1D(filters=30, kernel_size=41, strides=20, padding=MODEL_CONV_PADDING)(drop_11)
    conv_21 = BatchNormalization()(conv_21)
    conv_21 = PReLU()(conv_21)
    drop_21 = Dropout(0.15)(conv_21)

    # freq_conv
    conv_31 = Conv1D(filters=60, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_21)
    conv_31 = BatchNormalization()(conv_31)
    conv_31 = PReLU()(conv_31)
    drop_31 = Dropout(0.15)(conv_31)

    conv_41 = Conv1D(filters=60, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_31)
    conv_41 = BatchNormalization()(conv_41)
    conv_41 = PReLU()(conv_41)
    drop_41 = Dropout(0.15)(conv_41)

    conv_51 = Conv1D(filters=60, kernel_size=3, strides=2, padding=MODEL_CONV_PADDING)(drop_41)
    conv_51 = BatchNormalization()(conv_51)
    conv_51 = PReLU()(conv_51)
    drop_51 = Dropout(0.15)(conv_51)

    flat_51 = Flatten()(drop_41)

#===============================================================================================
    # time_conv
    conv_12 = Conv1D(filters=30, kernel_size=19, strides=1, padding=MODEL_CONV_PADDING)(x)
    conv_12 = BatchNormalization()(conv_12)
    conv_12 = PReLU()(conv_12)
    drop_12 = Dropout(0.15)(conv_12)

    conv_22 = Conv1D(filters=30, kernel_size=13, strides=6, padding=MODEL_CONV_PADDING)(drop_12)
    conv_22 = BatchNormalization()(conv_22)
    conv_22 = PReLU()(conv_22)
    drop_22 = Dropout(0.15)(conv_22)

    # freq_conv
    conv_32 = Conv1D(filters=60, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_22)
    conv_32 = BatchNormalization()(conv_32)
    conv_32 = PReLU()(conv_32)
    drop_32 = Dropout(0.15)(conv_32)

    conv_42 = Conv1D(filters=60, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_32)
    conv_42 = BatchNormalization()(conv_42)
    conv_42 = PReLU()(conv_42)
    drop_42 = Dropout(0.15)(conv_42)

    conv_52 = Conv1D(filters=60, kernel_size=3, strides=2, padding=MODEL_CONV_PADDING)(drop_42)
    conv_52 = BatchNormalization()(conv_52)
    conv_52 = PReLU()(conv_52)
    drop_52 = Dropout(0.15)(conv_52)

    flat_52 = Flatten()(drop_52)

#===============================================================================================

    conv_0 = Conv1D(filters=80, kernel_size=1, padding='same')(x)
    conv_0 = BatchNormalization()(conv_0)
    conv_0 = Activation('linear')(conv_0)
    drop_0 = Dropout(0.15)(conv_0)
    conv_00 = Conv1D(filters=40, kernel_size=3, padding='same')(drop_0)
    conv_00 = BatchNormalization()(conv_00)
    conv_00 = Activation('linear')(conv_00)
    drop_00 = Dropout(0.15)(conv_00)
    conv_000 = Conv1D(filters=4, kernel_size=1, padding='same')(drop_00)
    conv_000 = BatchNormalization()(conv_000)
    conv_000 = Activation('linear')(conv_000)
    drop_000 = Dropout(0.15)(conv_000)
    flat_53 = Flatten()(drop_000)

    # merge
    concate_5 = concatenate([flat_5, flat_50, flat_51, flat_52, flat_53])

    dense_6 = Dense(1960)(concate_5)
    dense_6 = BatchNormalization()(dense_6)
    dense_6 = PReLU()(dense_6)
    drop_6 = Dropout(0.15)(dense_6)

    dense_7 = Dense(1080)(drop_6)
    dense_7 = BatchNormalization()(dense_7)
    dense_7 = PReLU()(dense_7)
    drop_7 = Dropout(0.15)(dense_7)

    reshape_8 = Reshape(target_shape=(seq_length, -1))(drop_7)
    biLSTM_1 = Bidirectional(LSTM(9, return_sequences=True))(reshape_8)
    biLSTM_2 = Bidirectional(LSTM(9, return_sequences=True))(biLSTM_1)

    outputs_disaggregation = []

    for appliance_name in appliances:
        biLSTM_3 = Bidirectional(LSTM(6, return_sequences=True))(biLSTM_2)
    	biLSTM_3 = PReLU()(biLSTM_3)
        outputs_disaggregation.append(TimeDistributed(Dense(1, activation='relu'), name=appliance_name.replace(" ", "_"))(biLSTM_3))

    model = Model(inputs=x, outputs=outputs_disaggregation)
    optimizer = RMSprop(lr=0.001, clipnorm=40)
    model.compile(optimizer=optimizer, loss='mse', metrics=['mae', 'mse'])

    return model
コード例 #24
0
# Save training images and labels in a numpy array
numpy.save('numpy_training_datasets/microexpstcnn_images.npy', training_set)
numpy.save('numpy_training_datasets/microexpstcnn_labels.npy', traininglabels)

# Load training images and labels that are stored in numpy array
"""
training_set = numpy.load('numpy_training_datasets/microexpstcnn_images.npy')
traininglabels =numpy.load('numpy_training_datasets/microexpstcnn_labels.npy')
"""

# MicroExpSTCNN Model
model = Sequential()
model.add(
    Convolution3D(32, (3, 3, 15),
                  input_shape=(1, image_rows, image_columns, image_depth)))
model.add(PReLU(alpha_initializer="zeros"))
model.add(MaxPooling3D(pool_size=(3, 3, 3)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, init='normal'))
model.add(PReLU(alpha_initializer="zeros"))
model.add(Dropout(0.5))
model.add(Dense(3, init='normal'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='SGD',
              metrics=['accuracy'])

model.summary()

filepath = "weights_microexpstcnn/weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
コード例 #25
0
comment = Input(shape=(maxlen, ))
emb_comment = Embedding(max_features,
                        embed_size,
                        weights=[embedding_matrix],
                        trainable=train_embed)(comment)
emb_comment = SpatialDropout1D(spatial_dropout)(emb_comment)

## 步1
block1 = Conv1D(filter_nr,
                kernel_size=filter_size,
                padding='same',
                activation='linear',
                kernel_regularizer=conv_kern_reg,
                bias_regularizer=conv_bias_reg)(emb_comment)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block1 = Conv1D(filter_nr,
                kernel_size=filter_size,
                padding='same',
                activation='linear',
                kernel_regularizer=conv_kern_reg,
                bias_regularizer=conv_bias_reg)(block1)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)

# 先filtersize取3,卷积成maxlen*64,然后filtersize取1,卷积成maxlen*64

#we pass embedded comment through conv1d with filter size 1 because it needs to have the same shape as block output
#if you choose filter_nr = embed_size (300 in this case) you don't have to do this part and can add emb_comment directly to block1_output
resize_emb = Conv1D(filter_nr,
                    kernel_size=1,
コード例 #26
0
ファイル: generate_test_models.py プロジェクト: rl-lab/fdeep
def get_test_model_full():
    """Returns a maximally complex test model,
    using all supported layer types with different parameter combination.
    """
    input_shapes = [
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (27, 29, 1),
        (17, 1),
        (17, 4),
        (2, 3),
        (2, 3, 4, 5),
        (2, 3, 4, 5, 6),
    ]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Flatten()(inputs[3]))
    outputs.append(Flatten()(inputs[4]))
    outputs.append(Flatten()(inputs[5]))
    outputs.append(Flatten()(inputs[9]))
    outputs.append(Flatten()(inputs[10]))

    for inp in inputs[6:8]:
        for padding in ['valid', 'same']:
            for s in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv1D(out_channels,
                                   s,
                                   padding=padding,
                                   dilation_rate=d)(inp))
        for padding_size in range(0, 5):
            outputs.append(ZeroPadding1D(padding_size)(inp))
        for crop_left in range(0, 2):
            for crop_right in range(0, 2):
                outputs.append(Cropping1D((crop_left, crop_right))(inp))
        for upsampling_factor in range(1, 5):
            outputs.append(UpSampling1D(upsampling_factor)(inp))
        for padding in ['valid', 'same']:
            for pool_factor in range(1, 6):
                for s in range(1, 4):
                    outputs.append(
                        MaxPooling1D(pool_factor, strides=s,
                                     padding=padding)(inp))
                    outputs.append(
                        AveragePooling1D(pool_factor,
                                         strides=s,
                                         padding=padding)(inp))
        outputs.append(GlobalMaxPooling1D()(inp))
        outputs.append(GlobalAveragePooling1D()(inp))

    for inp in [inputs[0], inputs[5]]:
        for padding in ['valid', 'same']:
            for h in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   padding=padding,
                                   dilation_rate=(d, 1))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            padding=padding,
                                            dilation_rate=(d, 1))(inp))
                    for sy in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   strides=(1, sy),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            strides=(sy, sy),
                                            padding=padding)(inp))
                for sy in range(1, 4):
                    outputs.append(
                        DepthwiseConv2D((h, 1),
                                        strides=(sy, sy),
                                        padding=padding)(inp))
                    outputs.append(
                        MaxPooling2D((h, 1), strides=(1, sy),
                                     padding=padding)(inp))
            for w in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4) if sy == 1 else [1]:
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   padding=padding,
                                   dilation_rate=(1, d))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            padding=padding,
                                            dilation_rate=(1, d))(inp))
                    for sx in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   strides=(sx, 1),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            strides=(sx, sx),
                                            padding=padding)(inp))
                for sx in range(1, 4):
                    outputs.append(
                        DepthwiseConv2D((1, w),
                                        strides=(sy, sy),
                                        padding=padding)(inp))
                    outputs.append(
                        MaxPooling2D((1, w), strides=(1, sx),
                                     padding=padding)(inp))
    outputs.append(ZeroPadding2D(2)(inputs[0]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[0]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[0]))
    outputs.append(Cropping2D(2)(inputs[0]))
    outputs.append(Cropping2D((2, 3))(inputs[0]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[0]))
    for y in range(1, 3):
        for x in range(1, 3):
            outputs.append(UpSampling2D(size=(y, x))(inputs[0]))
    outputs.append(GlobalAveragePooling2D()(inputs[0]))
    outputs.append(GlobalMaxPooling2D()(inputs[0]))
    outputs.append(AveragePooling2D((2, 2))(inputs[0]))
    outputs.append(MaxPooling2D((2, 2))(inputs[0]))
    outputs.append(UpSampling2D((2, 2))(inputs[0]))
    outputs.append(Dropout(0.5)(inputs[0]))

    # same as axis=-1
    outputs.append(Concatenate()([inputs[1], inputs[2]]))
    outputs.append(Concatenate(axis=3)([inputs[1], inputs[2]]))
    # axis=0 does not make sense, since dimension 0 is the batch dimension
    outputs.append(Concatenate(axis=1)([inputs[1], inputs[2]]))
    outputs.append(Concatenate(axis=2)([inputs[1], inputs[2]]))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(center=False)(inputs[0]))
    outputs.append(BatchNormalization(scale=False)(inputs[0]))

    outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=False)(inputs[0]))

    outputs.append(Dense(2, use_bias=True)(inputs[3]))
    outputs.append(Dense(2, use_bias=False)(inputs[3]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[1]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[2]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[2]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(keras.layers.Add()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Subtract()([inputs[4], inputs[8]]))
    outputs.append(keras.layers.Multiply()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Average()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(keras.layers.Maximum()([inputs[4], inputs[8], inputs[8]]))
    outputs.append(Concatenate()([inputs[4], inputs[8], inputs[8]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[3]),
        Activation('hard_sigmoid')(inputs[3]),
        Activation('selu')(inputs[3]),
        Activation('sigmoid')(inputs[3]),
        Activation('softplus')(inputs[3]),
        Activation('softmax')(inputs[3]),
        Activation('relu')(inputs[3]),
        LeakyReLU()(inputs[3]),
        ELU()(inputs[3]),
        PReLU()(inputs[2]),
        PReLU()(inputs[3]),
        PReLU()(inputs[4]),
        shared_activation(inputs[3]),
        Activation('linear')(inputs[4]),
        Activation('linear')(inputs[1]),
        x,
        shared_activation(x),
    ]

    print('Model has {} outputs.'.format(len(outputs)))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_full')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    batch_size = 1
    epochs = 10
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=epochs, batch_size=batch_size)
    return model
コード例 #27
0
# model.add(Convolution2D(64, 3, 3, activation='relu'))
# model.add(Flatten())
# model.add(Dense(100))
# model.add(Dense(50))
# model.add(Dense(10))
# model.add(Dense(1))
# model.compile(loss='mse', optimizer='adam')

# # model.fit(X_train, y_train, validation_split=0.2, shuffle=True)

models = Sequential()
models.add(Cropping2D(cropping=((50, 20), (1, 1)), input_shape=(160, 320, 1)))
models.add(Lambda(lambda x: x / 255.0 - 0.5))
models.add(
    Convolution2D(24, 5, 5, subsample=(2, 2), init=initializations.he_uniform))
models.add(PReLU())
models.add(BatchNormalization())
models.add(
    Convolution2D(36, 5, 5, subsample=(2, 2), init=initializations.he_uniform))
models.add(PReLU())
models.add(BatchNormalization())
models.add(
    Convolution2D(48, 5, 5, subsample=(2, 2), init=initializations.he_uniform))
models.add(PReLU())
models.add(BatchNormalization())
models.add(Convolution2D(64, 3, 3, init=initializations.he_uniform))
models.add(PReLU())
models.add(BatchNormalization())
# models.add(MaxPooling2D()) #
models.add(Flatten())
models.add(Dense(100, init=initializations.he_uniform))
コード例 #28
0
ファイル: generate_test_models.py プロジェクト: rl-lab/fdeep
def get_test_model_small():
    """Returns a minimalist test model."""
    input_shapes = [
        (17, 4),
        (16, 18, 3),
        (8, ),
        (8, ),
        (2, 3, 5),
        (2, 3, 5),
        (32, 32, 3),
        (2, 3, 4, 5),
        (2, 3, 4, 5, 6),
    ]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Dense(3)(inputs[2]))
    outputs.append(Dense(3)(inputs[0]))
    outputs.append(Dense(3)(inputs[1]))
    outputs.append(Dense(3)(inputs[7]))

    outputs.append(Flatten()(inputs[0]))
    outputs.append(Flatten()(inputs[1]))
    outputs.append(Flatten()(inputs[7]))
    outputs.append(Flatten()(inputs[8]))

    outputs.append(Activation('sigmoid')(inputs[7]))
    outputs.append(Activation('sigmoid')(inputs[8]))

    # same as axis=-1
    outputs.append(Concatenate()([inputs[4], inputs[5]]))
    outputs.append(Concatenate(axis=3)([inputs[4], inputs[5]]))
    # axis=0 does not make sense, since dimension 0 is the batch dimension
    outputs.append(Concatenate(axis=1)([inputs[4], inputs[5]]))
    outputs.append(Concatenate(axis=2)([inputs[4], inputs[5]]))

    outputs.append(PReLU()(inputs[0]))
    outputs.append(PReLU()(inputs[1]))
    outputs.append(PReLU()(inputs[2]))
    outputs.append(PReLU(shared_axes=[1, 2])(inputs[1]))
    outputs.append(PReLU(shared_axes=[1, 3])(inputs[1]))
    outputs.append(PReLU(shared_axes=[2, 3])(inputs[1]))
    outputs.append(PReLU(shared_axes=[1, 2, 3])(inputs[1]))
    outputs.append(PReLU(shared_axes=[1])(inputs[1]))
    outputs.append(PReLU(shared_axes=[2])(inputs[1]))
    outputs.append(PReLU(shared_axes=[3])(inputs[1]))

    outputs.append(PReLU()(Conv2D(8, (3, 3), padding='same',
                                  activation='elu')(inputs[6])))

    outputs.append(GlobalMaxPooling2D()(inputs[1]))
    outputs.append(MaxPooling2D()(inputs[1]))
    outputs.append(AveragePooling1D()(inputs[0]))

    outputs.append(Conv1D(2, 3)(inputs[0]))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(center=False)(inputs[0]))
    outputs.append(BatchNormalization(scale=False)(inputs[0]))

    outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[1]))
    outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[1]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[1]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[1]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=True)(inputs[1]))
    outputs.append(DepthwiseConv2D(2, (3, 3), use_bias=False)(inputs[1]))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_small')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
コード例 #29
0
    out_frame_file=
    "data-spectrogram/dev_dt_05_clean_global_normalized/feats.scp.mod",
    batch_size=1024,
    buffer_size=10,
    context=5,
    out_frame_count=1,
    shuffle=False,
)

inputs = Input(shape=(11, 771))

fc1 = Flatten()(inputs)
fc1 = Dropout(0.3)(fc1)
fc1 = Dense(2048)(fc1)
fc1 = BatchNormalization(momentum=0.999)(fc1)
fc1 = PReLU()(fc1)
fc1 = Dropout(0.3)(fc1)

fc2 = Dense(2048)(fc1)
fc2 = BatchNormalization(momentum=0.999)(fc2)
fc2 = PReLU()(fc2)
fc2 = Dropout(0.3)(fc2)

out = Dense(257)(fc2)
model = Model(inputs=inputs, outputs=out)

adam = Adam(lr=0.0001, decay=1e-8)
model.compile(optimizer=adam, loss='mse')

for epoch in range(100):
    train_loss = model.fit_generator(train_loader.batchify(), 5299)
コード例 #30
0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

K.set_image_dim_ordering('tf')

# generator
nch = 256
g_input = Input(shape=(100, ))
g_input_cond = Input(shape=(10, ))
g = Dense(nch * 7 * 7, init='glorot_normal')(merge([g_input, g_input_cond],
                                                   mode='concat',
                                                   concat_axis=1))
g = BatchNormalization(mode=2)(g)
g = PReLU()(g)
g = Reshape([7, 7, nch])(g)
g = Convolution2D(nch, 3, 3, border_mode='same', init='glorot_uniform')(g)
g = BatchNormalization(axis=3, mode=2)(g)
g = PReLU()(g)
g = UpSampling2D(size=(2, 2))(g)
g = Convolution2D(nch / 2, 3, 3, border_mode='same', init='glorot_uniform')(g)
g = BatchNormalization(axis=3, mode=2)(g)
g = PReLU()(g)
g = UpSampling2D(size=(2, 2))(g)
g = Convolution2D(nch / 4, 3, 3, border_mode='same', init='glorot_uniform')(g)
g = BatchNormalization(axis=3, mode=2)(g)
g = PReLU()(g)
g = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(g)
g_output = Activation('sigmoid')(g)
generator = Model([g_input, g_input_cond], g_output)