コード例 #1
0
def ResNet(input_shape, output_shape, optimizer='adam', leak=0.0):
    """Summary
    
    Args:
        input_shape (TYPE): Description
        output_shape (TYPE): Description
        optimizer (str, optional): Description
        leak (float, optional): Description
    
    Returns:
        TYPE: Description
    """
    inputs = Input(shape=input_shape)

    x = Conv1D(64, 3, border_mode='same')(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    #x = MaxPooling1D(pool_length=2, border_mode='same')(x)

    x = res_block_1d(x, [64, 64, 256], leak=leak, shortcut=True)
    x = res_block_1d(x, [64, 64, 256], leak=leak, shortcut=False)
    x = res_block_1d(x, [64, 64, 256], leak=leak, shortcut=False)

    x = res_block_1d(x, [128, 128, 512], leak=leak, shortcut=True)
    x = res_block_1d(x, [128, 128, 512], leak=leak, shortcut=False)
    x = res_block_1d(x, [128, 128, 512], leak=leak, shortcut=False)
    x = res_block_1d(x, [128, 128, 512], leak=leak, shortcut=False)

    x = res_block_1d(x, [256, 256, 1024], leak=leak, shortcut=True)
    x = res_block_1d(x, [256, 256, 1024], leak=leak, shortcut=False)
    x = res_block_1d(x, [256, 256, 1024], leak=leak, shortcut=False)
    x = res_block_1d(x, [256, 256, 1024], leak=leak, shortcut=False)
    #     x = res_block_1d(x, [256, 256, 1024], leak=leak, shortcut=False)
    #     x = res_block_1d(x, [256, 256, 1024], leak=leak, shortcut=False)

    #     x = res_block_1d(x, [512, 512, 2048], leak=leak, shortcut=True)
    #     x = res_block_1d(x, [512, 512, 2048], leak=leak, shortcut=False)
    #     x = res_block_1d(x, [512, 512, 2048], leak=leak, shortcut=False)

    x = AveragePooling1D(pool_length=4)(x)

    x = Flatten()(x)
    outputs = Dense(output_shape, activation='softmax')(x)

    model = Model(inputs, outputs)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
コード例 #2
0
def conv_filt_v2(input_a, no_filters, filt_width):
    ''' base module for text data
    Inputs: input_a -- input tensor
            no_filters -- number of filter maps used in conv layer (details)
            filt_width -- convolution window width
    outputs: output tensor of base module
    '''
    # 20000 is the vocabulary size. It should be greater than or equal to your tokenizer vocabulary size
    embed = Embedding(vocab_size, embed_size)(input_a)
    conv = Conv1D(no_filters, filt_width, strides=1)(embed)
    conv = Activation('relu')(conv)
    conv = AveragePooling1D(pool_size=2)(conv)
    conv = Dropout(0.5)(conv)
    conv = GlobalAveragePooling1D()(conv)
    return conv
コード例 #3
0
def build_model():
    data_shape = (300, 5)  # (batch, channels, steps)

    def square(x):
        return K.square(x)

    def log(x):
        return K.log(K.clip(x, min_value=1e-7, max_value=10000))
    
    model = Sequential()
    model.add(Conv1D(40, 15,
        data_format='channels_last', input_shape=data_shape))
    model.add(Conv1D(40, 5, use_bias=False, data_format='channels_first'))
    model.add(BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1))
    model.add(Activation(square))
    model.add(AveragePooling1D(pool_size=40, strides=15))
    model.add(Activation(log))
    model.add(LSTM(64, activation='sigmoid', recurrent_activation='hard_sigmoid', \
                    use_bias=True, kernel_initializer='glorot_uniform', \
                    recurrent_initializer='orthogonal', \
                    unit_forget_bias=True, kernel_regularizer=None, \
                    recurrent_regularizer=None, \
                    bias_regularizer=None, activity_regularizer=None, \
                    kernel_constraint=None, recurrent_constraint=None, \
                    bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, \
                    implementation=1, return_sequences=True, return_state=False, \
                    go_backwards=True, stateful=False, unroll=False))
    model.add(Dropout(0.2))
    model.add(LSTM(32, activation='sigmoid', recurrent_activation='hard_sigmoid', \
                    use_bias=True, kernel_initializer='glorot_uniform', \
                    recurrent_initializer='orthogonal', \
                    unit_forget_bias=True, kernel_regularizer=None, \
                    recurrent_regularizer=None, \
                    bias_regularizer=None, activity_regularizer=None, \
                    kernel_constraint=None, recurrent_constraint=None, \
                    bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, \
                    implementation=1, return_sequences=True, return_state=False, \
                    go_backwards=True, stateful=False, unroll=False))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(3, activation='softmax', kernel_constraint=max_norm(0.5)))
    

    start = time.time()
    model.compile(loss="categorical_crossentropy", optimizer=optimizers.Adagrad(), metrics=['accuracy'])
    print("Compilation time: ", time.time(), '-', start)
    
    return model
コード例 #4
0
def structureSubModel(ssInput):
    ssConv = Conv1D(filters=256,
                    kernel_size=12,
                    padding="valid",
                    activation="relu",
                    strides=1)(ssInput)
    ssPool = AveragePooling1D(pool_size=5, strides=5)(ssConv)
    ssDout1 = Dropout(rate=0.7)(ssPool)
    seqBiLstm = Bidirectional(LSTM(units=128, return_sequences=True))(ssDout1)
    seqDout2 = Dropout(rate=0.7)(seqBiLstm)
    ssFlat = Flatten()(seqDout2)
    ssDen1 = Dense(256, kernel_initializer='glorot_uniform',
                   activation='relu')(ssFlat)
    ssDout2 = Dropout(rate=0.7)(ssDen1)

    return ssDout2
コード例 #5
0
 def attentionWrap(self, hid_dim, input_length, input_dim):
     inputs = Input(shape=(input_length, input_dim))
     lstm = GRU(hid_dim,
                return_sequences=True,
                dropout=0.2,
                recurrent_dropout=0.2,
                input_shape=(input_length, input_dim))(inputs)
     att = TimeDistributed(Dense(1, activation='tanh'))(lstm)
     att = Flatten()(att)
     att = Activation(activation='softmax')(att)
     att = RepeatVector(hid_dim)(att)
     att = Permute((2, 1))(att)
     mer = multiply([lstm, att])
     hid = AveragePooling1D(pool_size=input_length)(mer)
     hid = Flatten()(hid)
     return inputs, hid
コード例 #6
0
    def build_model(self):
        main_input = Input(shape=(None, ), dtype='int32', name='main_input')
        x = Embedding(output_dim=self.embedding_dimension,
                      input_dim=n_vocab)(main_input)
        x = Convolution1D(64, 5, padding='same', activation='relu')(x)

        if self.dropout_parameter > 0.0:
            x = Dropout(self.dropout_parameter)(x)

        if self.rnn_type is 'GRU':
            rnn = GRU(self.rnn_units, return_sequences=True)
        elif self.rnn_type is 'LSTM':
            rnn = LSTM(self.rnn_units, return_sequences=True)
        else:
            rnn = SimpleRNN(self.rnn_units)

        if self.bidirectional:
            x = Bidirectional(rnn)(x)
        else:
            x = rnn(x)

        if self.maxPooling:
            x = MaxPooling1D(strides=1, padding='same')(x)
            print("Using MaxPooling")
        elif self.averagePooling:
            x = AveragePooling1D(strides=1, padding='same')(x)
            print("Using AveragePooling")
        slot_output = TimeDistributed(Dense(n_slots, activation='softmax'),
                                      name='slot_output')(x)
        intent_output = TimeDistributed(Dense(n_classes, activation='softmax'),
                                        name='intent_output')(x)
        model = kerasModel(inputs=[main_input],
                           outputs=[intent_output, slot_output])

        # rmsprop is recommended for RNNs https://stats.stackexchange.com/questions/315743/rmsprop-and-adam-vs-sgd
        model.compile(optimizer='rmsprop',
                      loss={
                          'intent_output': 'categorical_crossentropy',
                          'slot_output': 'categorical_crossentropy'
                      })
        plot_model(model, 'models/' + self.name + '.png')

        self.model = model

        return
コード例 #7
0
def conv1d_nn(input_dim=4, window=3):
    conv1d = Sequential()
    # conv1d.add(Dense(8,
    # 	input_shape=(3, 1)))
    # conv1d.add(ZeroPadding1D(padding=1))
    # above commented, no padding --> pool_size=1
    conv1d.add(
        Conv1D(filters=input_dim,
               input_shape=(window, 1),
               kernel_size=window,
               strides=1,
               use_bias=True,
               padding='same'))
    conv1d.add(AveragePooling1D(pool_size=window, strides=1))
    conv1d.add(Flatten())
    conv1d.add(Dense(1))
    conv1d.compile(loss='mean_squared_error', optimizer='adam')
    return conv1d
def structureModel(InputShape):
    conv1 = Conv1D(filters=256,
                   kernel_size=12,
                   padding='valid',
                   activation='relu',
                   strides=1)(InputShape)

    mapool = AveragePooling1D(pool_size=5, strides=5)(conv1)
    dout1 = Dropout(rate=0.2)(mapool)

    structBiLstm = Bidirectional(LSTM(units=128, return_sequences=True))(dout1)
    seqDout2 = Dropout(rate=0.4)(structBiLstm)
    flat = Flatten()(seqDout2)
    den1 = Dense(256, kernel_initializer='glorot_uniform')(flat)
    activa2 = PReLU(alpha_initializer='zero', weights=None)(den1)
    dout2 = Dropout(rate=0.5)(activa2)

    return dout2
コード例 #9
0
def structureModel(structInput):
	structCov = Conv1D(filters = 16,
			   kernel_size = 12,
			   padding = 'valid',
			   activation = 'relu',
			   strides = 1)(structInput)

	structPool = AveragePooling1D(pool_size = 20, strides = 10)(structCov)
	structPoolDout = Dropout(rate=0.2)(structPool)
	
	structBiLstm = Bidirectional(LSTM(units = 8, return_sequences = True))(structPoolDout)
	structBiLstmDout = Dropout(rate = 0.5)(structBiLstm)
	structFlat = Flatten()(structBiLstmDout)

	structDen1 = Dense(2, kernel_initializer='glorot_uniform')(structFlat)
	structActivaDen1 = PReLU(alpha_initializer = 'zero', weights= None)(structDen1)
	structDout1 = Dropout(rate=0.9)(structActivaDen1)

	return structDout1
コード例 #10
0
def resnet_block(input_tensor, final_layer_output=220, append='n'):
    x = Conv1D(64, 7, strides=2, padding='same',
               name='conv1' + append)(input_tensor)
    x = BatchNormalization(name='bn_conv1' + append)(x)
    x = Activation('relu')(x)
    x = MaxPooling1D(3, strides=2)(x)
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a' + append, strides=1)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b' + append)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c' + append)
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a' + append)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + append)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c' + append)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d' + append)
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='g' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='h' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='i' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='j' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='k' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='l' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='m' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='n' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='o' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='p' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='q' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='r' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='s' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='t' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='u' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='v' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='w' + append)
    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a' + append)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b' + append)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c' + append)
    x = AveragePooling1D(final_layer_output, name='avg_pool' + append)(x)
    x = Flatten()(x)
    return x
コード例 #11
0
ファイル: _1d.py プロジェクト: rosdyana/keras-resnet
def ResNet18_1d(input_tensor, final_layer_output=220, append='n'):
    x = Conv1D(64, 7, strides=2, padding='same',
               name='conv1' + append)(input_tensor)
    x = BatchNormalization(name='bn_conv1' + append)(x)
    x = Activation('relu')(x)
    x = MaxPooling1D(3, strides=2)(x)
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a' + append, strides=1)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b' + append)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c' + append)
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a' + append)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + append)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c' + append)
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + append)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c' + append)
    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a' + append)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b' + append)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c' + append)
    x = AveragePooling1D(final_layer_output, name='avg_pool' + append)(x)
    x = Flatten()(x)
    return x
def conv_filt(input_dim, no_filters, filt_sizes):
    ''' returns a list of parallel convolutonal layers. We will concatenate them in concat_conv_filt() '''
    filt_models = []
    print(no_filters, filt_sizes)
    repeat_threshold = int(len(no_filters)/2)
    for filt_ind, filt in enumerate(no_filters):
        model = Sequential()
        # Anything more than vocabulary size to be considered is fine. I used 20000 here
        model.add(Embedding(20000, 300, input_length=global_vars.MaxLen))
        model.add(Conv1D(no_filters[filt_ind], filt_sizes[filt_ind], strides=1))
        model.add(Activation('relu'))
        if filt_ind < repeat_threshold:
            pool_sizes = 2
        else:
            pool_sizes = 7
        print(pool_sizes)
        model.add(AveragePooling1D(pool_size=pool_sizes))
        model.add(Dropout(0.5))
        model.add(GlobalAveragePooling1D())
        filt_models.append(model)
    return filt_models
コード例 #13
0
ファイル: spotify.py プロジェクト: Swiman/genre_recognition
def get(input_shape, num_classes, batchnorm=True, activation='softmax'):
    input_layer = Input(shape=input_shape)
    layer = input_layer
    for i in range(3):
        layer = Conv1D(filters=256,
                       kernel_size=4,
                       strides=2,
                       padding='same',
                       kernel_initializer='glorot_normal')(layer)
        layer = Activation('relu')(layer)
        layer = MaxPooling1D(2)(layer)
    avg_pool = AveragePooling1D(pool_size=4)(layer)
    max_pool = MaxPooling1D(pool_size=4)(layer)
    layer = concatenate([avg_pool, max_pool])
    layer = Flatten()(layer)
    layer = Dense(units=2048, activation='relu')(layer)
    layer = Dropout(rate=0.5)(layer)
    layer = Dense(units=num_classes)(layer)
    layer = Dropout(rate=0.5)(layer)
    layer = Activation(activation)(layer)
    return Model(inputs=input_layer, outputs=layer)
コード例 #14
0
    def visual_model(self):
        # when input is visual feature
        model = Sequential()
        model.add(
            BatchNormalization(input_shape=(self.seq_length, 4805),
                               name='visual_BN_1'))
        model.add(AveragePooling1D(pool_size=2, name='visual_average'))

        # lstm layer
        model.add(LSTM(64, name='visual_lstm'))
        model.add(Activation('relu', name='visual_activation1'))
        model.add(BatchNormalization(name='visual_BN_2'))
        model.add(Dropout(0.5, name='visual_dropout_1'))

        # the hidden layer
        model = self.add_hidden_layer(model, 'visual')

        # the decision layer
        model.add(self.decision_layer('visual'))

        return model
コード例 #15
0
 def __call__(self,
              window_size,
              n_channels=4,
              regression=False,
              dense=False):
     input = Input(shape=(window_size, n_channels))
     x = Conv1D(64, n_channels, padding='same', activation='relu')(input)
     for i in range(5):
         output = Conv1D(64, 3, padding='same', activation='relu')(x)
         output = Conv1D(64, 3, padding='same')(output)
         output = Add()([x, output])
         output = Activation('relu')(output)
         x = output
     output = AveragePooling1D(2)(output)
     output = Flatten()(output)
     output_size = window_size if dense else 1
     output = Dense(output_size)(output)
     if not regression:
         output = Activation('sigmoid')(output)
     model = Model(inputs=[input], outputs=[output])
     return model
コード例 #16
0
    def bimodal_audio_visual(self):
        # audio model
        audio_feature = Sequential()
        audio_feature.add(
            BatchNormalization(input_shape=(self.audio_feature_f_dim, ),
                               name='av_audio_BN_1'))
        audio_feature = self.add_hidden_layer(audio_feature, 'av_audio')

        #visual model
        face_fusion = Sequential()
        face_fusion.add(
            BatchNormalization(input_shape=(self.seq_length,
                                            self.face_fusion_f_dim),
                               name='av_visual_BN_1'))
        face_fusion.add(AveragePooling1D(pool_size=2,
                                         name='av_visual_average'))
        face_fusion.add(LSTM(64, name='av_visual_lstm'))
        face_fusion.add(Activation('relu', name='av_visual_activation1'))
        face_fusion.add(BatchNormalization(name='av_visual_BN_2'))
        face_fusion.add(Dropout(0.5, name='av_visual_dropout_1'))
        face_fusion = self.add_hidden_layer(face_fusion, 'av_visual')

        audio_feature_input = audio_feature.input
        audio_feature_output = audio_feature.layers[-1].output
        face_fusion_input = face_fusion.input
        face_fusion_output = face_fusion.layers[-1].output

        concat_layer = concatenate([audio_feature_output, face_fusion_output])

        x = Dense(1024)(concat_layer)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.5)(x)

        out = self.decision_layer('bimodal-av')(x)

        fusion = Model([audio_feature_input, face_fusion_input], out)
        return fusion
コード例 #17
0
    def _pool(self, p=3, strides=1, padding='same', type="max"):
        """
		Function for applying a pooling layer.

		To call:
			_pool(p, strides, padding, type)

		Parameters:
			p		pooling parameter
			strides
			padding
			type		'max' or 'avg'
		"""

        # ==================================================
        #	Check to see if a model has already
        #	been initialized. If not, create the
        #	model and add the input
        # ==================================================
        try:
            self.model_
        except:
            self.model_ = []
            self.model_.append(Input(shape=self.trainX_.shape[1:]))

        # ==================================================
        #	Apply the pooling, depending on which
        #	type was chosen (max is default)
        # ==================================================
        if type.lower() == 'avg':
            self.model_.append(
                AveragePooling1D(pool_size=p, strides=strides,
                                 padding=padding)(self.model_[-1]))

        else:
            self.model_.append(
                MaxPooling1D(pool_size=p, strides=strides,
                             padding=padding)(self.model_[-1]))
コード例 #18
0
    def bimodal_visual_word(self):
        #visual model
        face_fusion = Sequential()
        face_fusion.add(
            BatchNormalization(input_shape=(self.seq_length,
                                            self.face_fusion_f_dim),
                               name='vw_visual_BN_1'))
        face_fusion.add(AveragePooling1D(pool_size=2,
                                         name='vw_visual_average'))
        face_fusion.add(LSTM(64, name='vw_visual_lstm'))
        face_fusion.add(Activation('relu', name='vw_visual_activation1'))
        face_fusion.add(BatchNormalization(name='vw_visual_BN_2'))
        face_fusion.add(Dropout(0.5, name='vw_visual_dropout_1'))
        face_fusion = self.add_hidden_layer(face_fusion, 'vw_visual')
        #visual model
        word_fusion = Sequential()
        word_fusion.add(
            BatchNormalization(input_shape=(self.word_fusion_f_dim, ),
                               name='vw_word_BN_1'))
        # add the hidden layer
        word_fusion = self.add_hidden_layer(word_fusion, 'vw_word')

        face_fusion_input = face_fusion.input
        face_fusion_output = face_fusion.layers[-1].output
        word_input = word_fusion.input
        word_output = word_fusion.layers[-1].output

        concat_layer = concatenate([face_fusion_output, word_output])

        x = Dense(1024)(concat_layer)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.5)(x)

        out = self.decision_layer('bimodal-vw')(x)

        fusion = Model([face_fusion_input, word_input], out)
        return fusion
コード例 #19
0
    def body_fusion(self):
        # when input is visual feature
        rand = self.get_random()
        model = Sequential()
        model.add(
            BatchNormalization(input_shape=(self.seq_length,
                                            self.body_fusion_f_dim),
                               name='body_fusion_BN_1' + rand))
        model.add(
            AveragePooling1D(pool_size=2, name='body_fusion_average' + rand))

        # lstm layer
        model.add(LSTM(64, name='body_fusion_lstm' + rand))
        model.add(Activation('relu', name='body_fusion_activation1' + rand))
        model.add(BatchNormalization(name='body_fusion_BN_2' + rand))
        model.add(Dropout(0.5, name='body_fusion_dropout_1' + rand))

        # the hidden layer
        model = self.add_hidden_layer(model, 'body_fusion_hidden' + rand)

        # the decision layer
        model.add(self.decision_layer('body_fusion' + rand))
        return model
コード例 #20
0
def train_model():
    # 定义conv1D神经网络层结构
    input_layer = Input(shape=(time_steps, 1), dtype='float32')
    zeropadding_layer = ZeroPadding1D(padding=1)(input_layer)

    conv1D_layer1 = Conv1D(64, 3, strides=1, use_bias=True)(zeropadding_layer)
    avgpooling_layer = AveragePooling1D(pool_size=3, strides=1)(conv1D_layer1)

    flatten_layer = Flatten()(avgpooling_layer)
    dropout_layer = Dropout(.45)(flatten_layer)

    output_layer = Dense(1, activation='tanh')(dropout_layer)

    ts_model = Model(inputs=input_layer, outputs=output_layer)
    # 编译模型
    ts_model.compile(loss='mean_absolute_error', optimizer='adam')

    # ts_model.summary() # 输出模型层结构

    # 存储损失函数最小值时的模型为hdf文件
    save_best = ModelCheckpoint(
        '%s_conv1D_weights.{epoch:02d}-{val_loss:.4f}.h5' % stock_code,
        monitor='val_loss',
        verbose=2,
        save_best_only=True,
        save_weights_only=False,
        mode='min',
        period=1)
    # 开始训练模型(拟合)
    ts_model.fit(x=X_train,
                 y=y_train,
                 batch_size=16,
                 epochs=45,
                 verbose=2,
                 callbacks=[save_best],
                 validation_data=(X_val, y_val),
                 shuffle=True)
コード例 #21
0
    def bimodal_model_audio_visual(self):
        # audio model
        audio_model = Sequential()
        audio_model.add(
            BatchNormalization(input_shape=(1582, ), name='av_audio_BN_1'))
        audio_model = self.add_hidden_layer(audio_model, 'av_audio')

        #visual model
        visual_model = Sequential()
        visual_model.add(
            BatchNormalization(input_shape=(self.seq_length, 4805),
                               name='av_visual_BN_1'))
        visual_model.add(
            AveragePooling1D(pool_size=2, name='av_visual_average'))
        visual_model.add(LSTM(64, name='av_visual_lstm'))
        visual_model.add(Activation('relu', name='av_visual_activation1'))
        visual_model.add(BatchNormalization(name='av_visual_BN_2'))
        visual_model.add(Dropout(0.5, name='av_visual_dropout_1'))
        visual_model = self.add_hidden_layer(visual_model, 'av_visual')

        audio_input = audio_model.input
        audio_output = audio_model.layers[-1].output
        visual_input = visual_model.input
        visual_output = visual_model.layers[-1].output

        concat_layer = concatenate([audio_output, visual_output])

        x = Dense(1024)(concat_layer)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.5)(x)

        out = self.decision_layer('bimodal-av')(x)

        fusion_model = Model([audio_input, visual_input], out)
        return fusion_model
コード例 #22
0
def conv_filt(input_dim, no_filters, filt_sizes):

    filt_models = []
    strides_list = [1] * len(no_filters)
    print(no_filters, filt_sizes, strides_list)
    repeat_threshold = int(len(no_filters) / 2)
    for filt_ind, filt in enumerate(no_filters):
        model = Sequential()
        model.add(Embedding(20000, 300, input_length=MaxLen))
        model.add(
            Conv1D(no_filters[filt_ind],
                   filt_sizes[filt_ind],
                   strides=strides_list[filt_ind]))
        model.add(Activation('relu'))
        if filt_ind < repeat_threshold:
            pool_sizes = 2
        else:
            pool_sizes = 7
        print(pool_sizes)
        model.add(AveragePooling1D(pool_size=pool_sizes))
        model.add(Dropout(0.5))
        model.add(GlobalAveragePooling1D())
        filt_models.append(model)
    return filt_models
コード例 #23
0
    def __init__(self, input_shape, classes=7):
        x_input = Input(input_shape)
        x = Reshape((600, 2), input_shape=(input_shape,))(x_input)
        x = Conv1D(64, 7, strides=2, padding='same', name='conv1')(x)
        x = BatchNormalization(name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(3, strides=2)(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=1)
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

        # x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

        # x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        # x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        # x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        x = AveragePooling1D(classes, name='avg_pool')(x)

        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='fc'+str(classes))(x)
        self.model = Model(inputs=x_input, outputs=x, name='ResNet1D')
        self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

        self.history = None
コード例 #24
0
#Add zero padding
zeropadding_layer = ZeroPadding1D(padding=1)(input_layer)

# The first argument of Conv1D is the number of filters, which determine the number of features in the output. Second argument indicates length of the 1D convolution window. The third argument is strides and represent the number of places to shift the convolution window. Lastly, setting use_bias as True, add a bias value during computation of an output feature. Here, the 1D convolution can be thought of as generating local AR models over rolling window of three time units.

# In[23]:

#Add 1D convolution layer
conv1D_layer = Conv1D(64, 3, strides=1, use_bias=True)(zeropadding_layer)

# AveragePooling1D is added next to downsample the input by taking average over pool size of three with stride of one timesteps. The average pooling in this case can be thought of as taking moving averages over a rolling window of three time units. We have used average pooling instead of max pooling to generate the moving averages.

# In[24]:

#Add AveragePooling1D layer
avgpooling_layer = AveragePooling1D(pool_size=3, strides=1)(conv1D_layer)

# The preceeding pooling layer returns 3D output. Hence before passing to the output layer, a Flatten layer is added. The Flatten layer reshapes the input to (number of samples, number of timesteps*number of features per timestep), which is then fed to the output layer

# In[25]:

#Add Flatten layer
flatten_layer = Flatten()(avgpooling_layer)

# In[26]:

dropout_layer = Dropout(0.2)(flatten_layer)

# In[27]:

#Finally the output layer gives prediction for the next day's air pressure.
コード例 #25
0
ファイル: EI_REG.py プロジェクト: ynuwm/SemEval-2018
#x_shuffled = x_train[shuffle_indices]
#y_shuffled = score_train[shuffle_indices]

embed_1 = Embedding(input_dim=len(word_indices) + 1, output_dim=EMBEDDING_DIM, weights=[word_embedding_matrix], 
                    input_length=MAX_SEQUENCE_LENGTH, trainable=True)

conv_1 = Conv1D(128, 3, activation='relu', name='conv1')
conv_2 = Conv1D(128, 3, activation='relu', name='conv2')
conv_3 = Conv1D(256, 3, activation='relu', name='conv3')
conv_4 = Conv1D(256, 3, activation='relu', name='conv4')
conv_5 = Conv1D(256, 3, activation='relu', name='conv5')
conv_6 = Conv1D(1024, 3, activation='relu', name='conv6')
conv_7 = Conv1D(1024, 3, activation='relu', name='conv7')
conv_8 = Conv1D(1024, 3, activation='relu', name='conv8')

pool_1 = AveragePooling1D(pool_length=3, name='pool1')
pool_2 = AveragePooling1D(pool_length=3,  name='pool2')
pool_3 = MaxPooling1D(pool_length=3, name='pool3')
pool_4 = MaxPooling1D(pool_length=3, name='pool4')

lstm_1 = LSTM(256, name='lstm1', return_sequences=True)
lstm_2 = LSTM(128, name='lstm2', return_sequences=True)
lstm_3 = LSTM(64, name='lstm3')

gru_1 = GRU(256, name='gru1', return_sequences=True)
gru_2 = GRU(256, name='gru2', return_sequences=True)
gru_3 = GRU(256, name='gru3')

bi_lstm_1 = Bidirectional(lstm_1, name='bilstm1')
bi_lstm_2 = Bidirectional(lstm_2, name='bilstm2')
bi_lstm_3 = Bidirectional(lstm_3, name='bilstm3')
コード例 #26
0
ファイル: cnn_models.py プロジェクト: Spiffical/StarNet
    def model(self):
        """
        Implementation of the popular ResNet
        https://www.kaggle.com/meownoid/tiny-resnet-with-keras-99-314

        Returns:
        model -- a Model() instance in Keras
        """
        from keras.layers.merge import add

        def block(n_output, upscale=False):
            # n_output: number of feature maps in the block
            # upscale: should we use the 1x1 conv2d mapping for shortcut or not

            # keras functional api: return the function of type
            # Tensor -> Tensor
            def f(x):

                # H_l(x):
                # first pre-activation
                h = BatchNormalization()(x)
                h = Activation('relu')(h)
                # first convolution
                h = Conv1D(kernel_size=3,
                           filters=n_output,
                           strides=1,
                           padding='same')(h)

                # second pre-activation
                h = BatchNormalization()(h)
                h = Activation('relu')(h)
                # second convolution
                h = Conv1D(kernel_size=3,
                           filters=n_output,
                           strides=1,
                           padding='same')(h)

                # f(x):
                if upscale:
                    # 1x1 conv2d
                    f = Conv1D(kernel_size=1,
                               filters=n_output,
                               strides=1,
                               padding='same')(x)
                else:
                    # identity
                    f = x

                # F_l(x) = f(x) + H_l(x):
                return add([f, h])

            return f

        x_input = Input(self.get_input_shape())

        # first conv2d with post-activation to transform the input data to some reasonable form
        x = Conv1D(kernel_size=3, filters=4, strides=1,
                   padding='same')(x_input)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

        # F_1
        x = block(4)(x)
        # F_2
        x = block(4)(x)

        # F_3
        # H_3 is the function from the tensor of size 28x28x16 to the the tensor of size 28x28x32
        # and we can't add together tensors of inconsistent sizes, so we use upscale=True
        x = block(8, upscale=True)(
            x)  # !!! <------- Uncomment for local evaluation
        # F_4
        x = block(8)(x)  # !!! <------- Uncomment for local evaluation
        # F_5
        x = block(8)(x)  # !!! <------- Uncomment for local evaluation

        # F_6
        x = block(16, upscale=True)(
            x)  # !!! <------- Uncomment for local evaluation
        # F_7
        x = block(16)(x)  # !!! <------- Uncomment for local evaluation

        # last activation of the entire network's output
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

        # average pooling across the channels
        # 28x28x48 -> 1x48
        x = AveragePooling1D()(x)

        # dropout for more robust learning
        #x = Dropout(0.2)(x)

        # output layer
        x = Flatten()(x)
        x = Dense(len(self.targetname),
                  activation=self.last_layer_activation,
                  name='fc' + str(len(self.targetname)),
                  kernel_initializer=glorot_uniform(seed=0))(x)

        # Create model
        model = Model(inputs=x_input, outputs=x, name='ResNet')

        return model
コード例 #27
0
ファイル: cnn_models.py プロジェクト: Spiffical/StarNet
    def model(self):
        x_input = Input(self.get_input_shape())

        # Zero-Padding
        x = ZeroPadding1D(3)(x_input)

        # Stage 1
        x = Conv1D(4,
                   7,
                   strides=1,
                   name='conv1',
                   kernel_initializer=glorot_normal(seed=0))(x)
        x = BatchNormalization(name='bn_conv1')(x)
        x = Activation('relu')(x)
        # x = MaxPooling1D(3, strides=2)(x)

        # Stage 2
        x = conv_block(x,
                       kernel_size=3,
                       filters=[4, 4, 16],
                       stage=2,
                       block='a',
                       s=1)
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='b')
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='c')

        # Stage 3
        x = conv_block(x,
                       kernel_size=3,
                       filters=[8, 8, 32],
                       stage=3,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='b')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='c')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='d')

        # Stage 4
        x = conv_block(x,
                       kernel_size=3,
                       filters=[16, 16, 64],
                       stage=4,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='b')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='c')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='d')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='e')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='f')

        # Stage 5
        x = conv_block(x,
                       kernel_size=3,
                       filters=[32, 32, 128],
                       stage=5,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [32, 32, 128], stage=5, block='b')
        x = identity_block(x, 3, [32, 32, 128], stage=5, block='c')

        # AVGPOOL
        x = AveragePooling1D(2, name="avg_pool")(x)

        # Output layer
        x = Flatten()(x)
        mu, sigma = GaussianLayer(len(self.targetname), name='main_output')(x)

        # Additional 'input' for the labels
        label_layer = Input((len(self.targetname), ))

        # Create model
        model = Model(inputs=[x_input, label_layer],
                      outputs=[mu, sigma],
                      name='ResNetDeepEnsemble')

        # Define the loss function (needs to be defined here because it uses an intermediate layer)
        # NOTE: do not include loss function when compiling model because of this
        div_result = Lambda(lambda y: y[0] / y[1])(
            [K.square(label_layer - mu), sigma])
        loss = K.mean(0.5 * tf.log(sigma) + 0.5 * div_result) + 5

        # Add loss to model
        model.add_loss(loss)

        return model
コード例 #28
0
ファイル: cnn_models.py プロジェクト: Spiffical/StarNet
    def model(self):
        x_input = Input(self.get_input_shape())

        # Zero-Padding
        x = ZeroPadding1D(3)(x_input)

        # Stage 1
        x = Conv1D(4,
                   7,
                   strides=1,
                   name='conv1',
                   kernel_initializer=glorot_normal(seed=0))(x)
        x = BatchNormalization(name='bn_conv1')(x)
        x = Activation('relu')(x)
        # x = MaxPooling1D(3, strides=2)(x)

        # Stage 2
        x = conv_block(x,
                       kernel_size=3,
                       filters=[4, 4, 16],
                       stage=2,
                       block='a',
                       s=1)
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='b')
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='c')

        # Stage 3
        x = conv_block(x,
                       kernel_size=3,
                       filters=[8, 8, 32],
                       stage=3,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='b')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='c')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='d')

        # Stage 4
        x = conv_block(x,
                       kernel_size=3,
                       filters=[16, 16, 64],
                       stage=4,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='b')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='c')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='d')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='e')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='f')

        # Stage 5
        x = conv_block(x,
                       kernel_size=3,
                       filters=[32, 32, 128],
                       stage=5,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [32, 32, 128], stage=5, block='b')
        x = identity_block(x, 3, [32, 32, 128], stage=5, block='c')

        # AVGPOOL
        x = AveragePooling1D(2, name="avg_pool")(x)

        # Output layer
        x = Flatten()(x)
        mu, sigma = GaussianLayer(len(self.targetname), name='main_output')(x)

        # Create model
        model = Model(inputs=x_input, outputs=mu, name='ResNetDeepEnsemble')

        return model, sigma
コード例 #29
0
ファイル: cnn_models.py プロジェクト: Spiffical/StarNet
    def model(self):
        """
        Implementation of the popular ResNet the following architecture:
        CONV1D -> BATCHNORM -> RELU -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
        -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
        modified version retrieved from two sources:
            https://github.com/priya-dwivedi/Deep-Learning/blob/master/resnet_keras/
            https://github.com/viig99/mkscancer/blob/master/medcan_evaluate_next.py

        Returns:
        model -- a Model() instance in Keras
        """

        x_input = Input(self.get_input_shape())

        # Zero-Padding
        x = ZeroPadding1D(3)(x_input)

        # Stage 1
        x = Conv1D(4,
                   7,
                   strides=1,
                   name='conv1',
                   kernel_initializer=glorot_uniform(seed=0))(x)
        x = BatchNormalization(name='bn_conv1')(x)
        x = Activation('relu')(x)
        # x = MaxPooling1D(3, strides=2)(x)

        # Stage 2
        x = conv_block(x,
                       kernel_size=3,
                       filters=[4, 4, 16],
                       stage=2,
                       block='a',
                       s=1)
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='b')
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='c')

        # Stage 3
        x = conv_block(x,
                       kernel_size=3,
                       filters=[8, 8, 32],
                       stage=3,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='b')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='c')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='d')

        # Stage 4
        x = conv_block(x,
                       kernel_size=3,
                       filters=[16, 16, 64],
                       stage=4,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='b')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='c')

        # AVGPOOL
        x = AveragePooling1D(2, name="avg_pool")(x)

        # Output layer
        x = Flatten()(x)
        x = Dense(len(self.targetname),
                  activation=self.last_layer_activation,
                  name='fc' + str(len(self.targetname)),
                  kernel_initializer=glorot_uniform(seed=0))(x)

        # Create model
        model = Model(inputs=x_input, outputs=x, name='ResNet')

        return model
コード例 #30
0
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)
main_input = Input(shape=(MAX_SENTENCE, MAX_SEQUENCE_LENGTH),
                   dtype='float32',
                   name="main_input")
sequence_input = TimeDistributed(embedding_layer,
                                 name="sequence_input")(main_input)
gru = GRU(HIDDEN_SIZE,
          return_sequences=True,
          kernel_initializer='glorot_uniform')
bi_gru = TimeDistributed(Bidirectional(gru, merge_mode='concat', weights=None),
                         name="bi_gru")(sequence_input)
pooled_hidden = TimeDistributed(AveragePooling1D(pool_size=TIMESTAMP_1,
                                                 strides=None,
                                                 padding='valid'),
                                name="pooled_hidden")(bi_gru)
x = Reshape((TIMESTAMP_2, 2 * HIDDEN_SIZE), name="x")(pooled_hidden)
h = Bidirectional(gru, merge_mode='concat', weights=None, name="h")(x)
dd = AveragePooling1D(pool_size=TIMESTAMP_2,
                      strides=None,
                      padding='valid',
                      name="dd")(h)
d = Reshape((2 * HIDDEN_SIZE, ), name="d")(dd)

wc = K.variable(value=0.25, name="wc")
ws = K.variable(value=0.25, name="ws")
wr = K.variable(value=0.25, name="wr")

for j in range(MAX_SENTENCE):