def residual_block(x, s, i, activation, causal, nb_filters, kernel_size): original_x = x if causal: x = ZeroPadding1D(((2**i) // 2, 0))(x) conv = AtrousConvolution1D(filters=nb_filters, kernel_size=kernel_size, atrous_rate=2**i, padding='same', name='dilated_conv_%d_tanh_s%d' % (2**i, s))(x) conv = Cropping1D((0, (2**i) // 2))(conv) else: conv = AtrousConvolution1D(filters=nb_filters, kernel_size=kernel_size, atrous_rate=2**i, padding='same', name='dilated_conv_%d_tanh_s%d' % (2**i, s))(x) if activation == 'norm_relu': x = Activation('relu')(conv) x = Lambda(channel_normalization)(x) elif activation == 'wavenet': x = wave_net_activation(conv) else: x = Activation(activation)(conv) x = SpatialDropout1D(0.05)(x) # 1x1 conv. x = Convolution1D(nb_filters, 1, padding='same')(x) res_x = Merge(mode='sum')([original_x, x]) return res_x, x
def build_residual_block(input): sigm_conv1d = AtrousConvolution1D(num_filters, kernel_size, dilation_rate=dilation_rate,padding="same", activation="sigmoid")(input) tanh_conv1d = AtrousConvolution1D(num_filters, kernel_size, dilation_rate=dilation_rate,padding="same", activation="tanh")(input) multiplyLayers = Multiply()([sigm_conv1d, tanh_conv1d]) skip_connection = Conv1D(1, 1)(multiplyLayers) residual = Add()([input, skip_connection]) return residual, skip_connection
def f(input_): residual = input_ tanh_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size, atrous_rate=atrous_rate, border_mode='same', activation='tanh')(input_) sigmoid_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size, atrous_rate=atrous_rate, border_mode='same', activation='sigmoid')(input_) merged = keras.layers.Multiply()([tanh_out, sigmoid_out]) skip_out = Convolution1D(1, 1, activation='relu', border_mode='same')(merged) out = keras.layers.Add()([skip_out, residual]) return out, skip_out
def final_model_mod(input_dim, filters, kernel_size, conv_stride,conv_border_mode, units, output_dim=29): """ Build a deep network for speech """ # Main acoustic input input_data = Input(name='the_input', shape=(None, input_dim)) # Add convolutional layer conv_1d = AtrousConvolution1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu', name='conv1d')(input_data) # Add batch normalization bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d) # Add a recurrent layer simp_rnn = Bidirectional(LSTM(units, activation='relu', return_sequences=True, implementation=2, dropout_U=0.0,name='rnn'),merge_mode="concat")(bn_cnn) dropout_simp_rnn = Dropout(0.5)(simp_rnn) # TODO: Add batch normalization bn_rnn = BatchNormalization(name="bn_rnn")(dropout_simp_rnn) simp_rnn1 = Bidirectional(LSTM(units, activation='relu', return_sequences=True, implementation=2,dropout_U=0.0,name='rnn'),merge_mode="concat")(bn_rnn) # TODO: Add a TimeDistributed(Dense(output_dim)) layer time_dense = TimeDistributed(Dense(output_dim))(simp_rnn1) # Add softmax activation layer y_pred = Activation('softmax', name='softmax')(time_dense) # Specify the model model = Model(inputs=input_data, outputs=y_pred) model.output_length = lambda x: x print (mode.summary()) return model
def f(input_): residual = input_ tanh_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size, atrous_rate=atrous_rate, border_mode='same', activation='tanh')(input_) sigmoid_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size, atrous_rate=atrous_rate, border_mode='same', activation='sigmoid')(input_) merged = merge([tanh_out, sigmoid_out], mode='mul') skip_out = Convolution1D(1, 1, activation='relu', border_mode='same')(merged) out = merge([skip_out, residual], mode='sum') return out, skip_out
def CausalConvolution1D(input_layer, nfilters, filter_length, atrous_rate=1, activation="linear", batch_norm=True, **kwargs): total_length = filter_length + (filter_length-1)*(atrous_rate-1) # Asymetric padding : 0 added only on the left side padd = ZeroPadding1D((total_length-1,0))(input_layer) # Convolution conv = AtrousConvolution1D(nfilters, filter_length, atrous_rate=atrous_rate, border_mode='valid', **kwargs)(padd) if batch_norm: bn = BatchNormalization()(conv) activ = Activation(activation)(bn) else: activ = Activation(activation)(conv) # Return return activ
def f(input_): residual = input_ tanh_out = AtrousConvolution1D(atrous_n_filters, atrous_filter_size, atrous_rate=atrous_rate, border_mode='same', W_regularizer=l2(L2REGULARIZER), activation='tanh')(input_) sigmoid_out = AtrousConvolution1D(atrous_n_filters, atrous_filter_size, atrous_rate=atrous_rate, border_mode='same', W_regularizer=l2(L2REGULARIZER), activation='sigmoid')(input_) merged = merge([tanh_out, sigmoid_out], mode='mul') # Could add batchnorm here like so. Way slow though : # merged = BatchNormalization()(merged) skip_out = Convolution1D(1, 1, border_mode='same', W_regularizer=l2(L2REGULARIZER), activation='relu')(merged) out = merge([skip_out, residual], mode='sum') return out, skip_out
import numpy as np from keras.models import Model from keras.layers import Input, AtrousConvolution1D inp = Input((100, 1)) M = AtrousConvolution1D(1, 2, atrous_rate=25, border_mode='same')(inp) M = Model(inp, M) M.compile('sgd', 'mse') M.train_on_batch(np.random.rand(1, 100, 1), np.random.rand(1, 100, 1))
def build_model(input_shape, appliances): seq_length = input_shape[0] x = Input(shape=input_shape) # time_conv #conv_1 = Conv1D(filters=40, kernel_size=9, strides=1, padding=MODEL_CONV_PADDING)(x) conv_1 = AtrousConvolution1D(40, 5, border_mode='same', atrous_rate=2)(x) conv_1 = BatchNormalization()(conv_1) conv_1 = PReLU()(conv_1) drop_1 = Dropout(0.14)(conv_1) #conv_2 = Conv1D(filters=40, kernel_size=7, strides=2, padding=MODEL_CONV_PADDING)(drop_1) conv_2 = AtrousConvolution1D(40, 5, border_mode='same', atrous_rate=2)(drop_1) conv_2 = BatchNormalization()(conv_2) conv_2 = PReLU()(conv_2) drop_2 = Dropout(0.16)(conv_2) # freq_conv conv_3 = Conv1D(filters=80, kernel_size=5, strides=1, padding=MODEL_CONV_PADDING)(drop_2) conv_3 = BatchNormalization()(conv_3) conv_3 = PReLU()(conv_3) drop_3 = Dropout(0.18)(conv_3) conv_4 = Conv1D(filters=80, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_3) conv_4 = BatchNormalization()(conv_4) conv_4 = PReLU()(conv_4) drop_4 = Dropout(0.22)(conv_4) #=============================================================================================== # time_conv #conv_10 = Conv1D(filters=10, kernel_size=31, strides=1, padding=MODEL_CONV_PADDING)(x) conv_10 = AtrousConvolution1D(10, 15, border_mode='same', atrous_rate=2)(x) conv_10 = BatchNormalization()(conv_10) conv_10 = PReLU()(conv_10) drop_10 = Dropout(0.12)(conv_10) #conv_20 = Conv1D(filters=10, kernel_size=25, strides=6, padding=MODEL_CONV_PADDING)(drop_10) conv_20 = AtrousConvolution1D(10, 15, border_mode='same', atrous_rate=2)(drop_10) conv_20 = BatchNormalization()(conv_20) conv_20 = PReLU()(conv_20) drop_20 = Dropout(0.14)(conv_20) # freq_conv conv_30 = Conv1D(filters=20, kernel_size=5, strides=1, padding=MODEL_CONV_PADDING)(drop_20) conv_30 = BatchNormalization()(conv_30) conv_30 = PReLU()(conv_30) drop_30 = Dropout(0.16)(conv_30) conv_40 = Conv1D(filters=20, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_30) conv_40 = BatchNormalization()(conv_40) conv_40 = PReLU()(conv_40) drop_40 = Dropout(0.18)(conv_40) #=============================================================================================== # time_conv #conv_11 = Conv1D(filters=10, kernel_size=61, strides=1, padding=MODEL_CONV_PADDING)(x) conv_11 = AtrousConvolution1D(10, 31, border_mode='same', atrous_rate=2)(x) conv_11 = BatchNormalization()(conv_11) conv_11 = PReLU()(conv_11) drop_11 = Dropout(0.12)(conv_11) #conv_21 = Conv1D(filters=10, kernel_size=49, strides=12, padding=MODEL_CONV_PADDING)(drop_11) conv_21 = AtrousConvolution1D(10, 31, border_mode='same', atrous_rate=2)(drop_11) conv_21 = BatchNormalization()(conv_21) conv_21 = PReLU()(conv_21) drop_21 = Dropout(0.12)(conv_21) # freq_conv conv_31 = Conv1D(filters=20, kernel_size=5, strides=1, padding=MODEL_CONV_PADDING)(drop_21) conv_31 = BatchNormalization()(conv_31) conv_31 = PReLU()(conv_31) drop_31 = Dropout(0.15)(conv_31) conv_41 = Conv1D(filters=20, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_31) conv_41 = BatchNormalization()(conv_41) conv_41 = PReLU()(conv_41) drop_41 = Dropout(0.15)(conv_41) #=============================================================================================== # time_conv #conv_12 = Conv1D(filters=20, kernel_size=21, strides=1, padding=MODEL_CONV_PADDING)(x) conv_12 = AtrousConvolution1D(20, 11, border_mode='same', atrous_rate=2)(x) conv_12 = BatchNormalization()(conv_12) conv_12 = PReLU()(conv_12) drop_12 = Dropout(0.14)(conv_12) #conv_22 = Conv1D(filters=20, kernel_size=17, strides=4, padding=MODEL_CONV_PADDING)(drop_12) conv_22 = AtrousConvolution1D(20, 11, border_mode='same', atrous_rate=2)(drop_12) conv_22 = BatchNormalization()(conv_22) conv_22 = PReLU()(conv_22) drop_22 = Dropout(0.14)(conv_22) # freq_conv conv_32 = Conv1D(filters=40, kernel_size=5, strides=1, padding=MODEL_CONV_PADDING)(drop_22) conv_32 = BatchNormalization()(conv_32) conv_32 = PReLU()(conv_32) drop_32 = Dropout(0.18)(conv_32) conv_42 = Conv1D(filters=40, kernel_size=3, strides=1, padding=MODEL_CONV_PADDING)(drop_32) conv_42 = BatchNormalization()(conv_42) conv_42 = PReLU()(conv_42) drop_42 = Dropout(0.18)(conv_42) #=============================================================================================== conv_0 = Conv1D(filters=4, kernel_size=3, padding='same', activation='linear')(x) conv_0 = BatchNormalization()(conv_0) drop_0 = Dropout(0.15)(conv_0) # merge concate_5 = concatenate([drop_4, drop_40, drop_41, drop_42, drop_0]) #reshape_8 = Reshape(target_shape=(seq_length, -1))(concate_5) #dense_7 = Dense(1920)(concate_5) #dense_7 = BatchNormalization()(dense_7) #dense_7 = PReLU()(dense_7) #drop_7 = Dropout(0.18)(dense_7) app_0 = Conv1D(filters=32, kernel_size=1, strides=1, padding=MODEL_CONV_PADDING)(concate_5) app_0 = BatchNormalization()(app_0) drop_7 = Dropout(0.22)(app_0) biLSTM_1 = Bidirectional( LSTM(32, dropout=0.1, recurrent_dropout=0.15, return_sequences=True))(drop_7) biLSTM_2 = Bidirectional( LSTM(32, dropout=0.1, recurrent_dropout=0.15, return_sequences=True))(biLSTM_1) outputs_disaggregation = [] for appliance_name in appliances: biLSTM_3 = Bidirectional( LSTM(16, dropout=0.1, recurrent_dropout=0.15, return_sequences=True))(biLSTM_2) biLSTM_3 = PReLU()(biLSTM_3) outputs_disaggregation.append( TimeDistributed(Dense(1, activation='relu'), name=appliance_name.replace(" ", "_"))(biLSTM_3)) model = Model(inputs=x, outputs=outputs_disaggregation) optimizer = RMSprop(lr=0.001, clipnorm=40) model.compile(optimizer=optimizer, loss='mse', metrics=['mae', 'mse']) return model
#improves test accuracy, 0.849 -> 0.854 y_train = np.zeros((len(y_train_tmp), 2)) y_test = np.zeros((len(y_test_tmp), 2)) y_train[np.arange(len(y_train_tmp)), y_train_tmp] = 1 y_test[np.arange(len(y_test_tmp)), y_test_tmp] = 1 print('Build model...') model = Sequential() model.add(Embedding(max_features, embedding_size, input_length=maxlen)) model.add(Dropout(0.2)) model.add( AtrousConvolution1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) model.add(MaxPooling1D(pool_size=pool_size)) model.add(Bidirectional(LSTM(lstm_output_size, return_sequences=True))) model.add(Dropout(0.6)) model.add( AtrousConvolution1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) model.add(MaxPooling1D(pool_size=pool_size)) model.add(Bidirectional(LSTM(lstm_output_size)))
#!/usr/bin/python3 # -*- coding: utf-8 -*- import keras from keras.models import Sequential from keras.layers import Conv1D, Dense, Conv2D, AtrousConvolution1D model = Sequential() #model.add(Dense(1, input_shape=(1,), activation='linear')) #model.add(Conv2D(32, kernel_size=(3, 3), # activation='relu', input_shape=(28, 28, 1))) #model.add(Conv1D(64, 2, dilation_rate=2, activation='tanh', input_shape = (128, 32))) model.add( AtrousConvolution1D(64, 2, atrous_rate=2, activation='tanh', input_shape=(128, 32))) model.save('dilation_conv1d.h5')