Пример #1
0
    def build_model(self, input_shape, nb_classes):
        # refercen: https://github.com/titu1994/Keras-ResNeXt/blob/master/resnext.py
        OUTPUT_CLASS = nb_classes
        input1 = Input(shape=input_shape, name='input_ecg')
        k = 1  # increment every 4th residual block
        p = True  # pool toggle every other residual block (end with 2^8)
        convfilt = 64
        encoder_confilt = 64  # encoder filters' num
        convstr = 1
        ksize = 16
        poolsize = 2
        poolstr = 2
        drop = 0.5
        cardinality = 16
        grouped_channels = int(convfilt / cardinality)

        # First convolutional block (conv,BN, relu)
        lcount = 0
        x = Conv1D(filters=convfilt,
                   kernel_size=ksize,
                   padding='same',
                   strides=convstr,
                   kernel_initializer='he_normal',
                   name='layer' + str(lcount))(input1)
        lcount += 1
        x = BatchNormalization(name='layer' + str(lcount))(x)
        lcount += 1
        x = Activation('relu')(x)

        ## Second convolutional block (conv, BN, relu, dropout, conv) with residual net
        # Left branch (convolutions)
        x1 = Conv1D(filters=convfilt,
                    kernel_size=ksize,
                    padding='same',
                    strides=convstr,
                    kernel_initializer='he_normal',
                    name='layer' + str(lcount))(x)
        lcount += 1
        x1 = BatchNormalization(name='layer' + str(lcount))(x1)
        lcount += 1
        x1 = Activation('relu')(x1)
        x1 = Dropout(drop)(x1)
        x1 = Conv1D(filters=convfilt,
                    kernel_size=ksize,
                    padding='same',
                    strides=convstr,
                    kernel_initializer='he_normal',
                    name='layer' + str(lcount))(x1)
        lcount += 1
        x1 = MaxPooling1D(pool_size=poolsize, strides=poolstr,
                          padding='same')(x1)
        # Right branch, shortcut branch pooling
        x2 = MaxPooling1D(pool_size=poolsize, strides=poolstr,
                          padding='same')(x)
        # Merge both branches
        x = keras.layers.add([x1, x2])
        del x1, x2

        fms = []
        ## Main loop
        p = not p
        for l in range(15):

            if (l % 4 == 0) and (
                    l > 0):  # increment k on every fourth residual block
                k += 1
                # increase depth by 1x1 Convolution case dimension shall change
                xshort = Conv1D(filters=convfilt * k,
                                kernel_size=1,
                                name='layer' + str(lcount))(x)
                lcount += 1
                x = Conv1D(filters=convfilt * k,
                           kernel_size=1,
                           name='layer' + str(lcount))(x)
                lcount += 1
            else:
                xshort = x
                # Left branch (convolutions)

            grouped_channels = int(convfilt * k / cardinality)
            # notice the ordering of the operations has changed
            x1 = BatchNormalization(name='layer' + str(lcount))(x)
            lcount += 1
            x1 = Activation('relu')(x1)
            x1 = Dropout(drop)(x1)
            ### grouped convlutional block
            ksize_choice = [2, 4, 8, 16, 20, 24, 28, 32]
            group_list = []
            for c in range(cardinality):
                x_tmp = Lambda(lambda z: z[:, :, c * grouped_channels:
                                           (c + 1) * grouped_channels])(x1)

                x_tmp = Conv1D(grouped_channels,
                               ksize_choice[int(c / 2)],
                               padding='same',
                               strides=convstr,
                               kernel_initializer='he_normal',
                               name='layer' + str(lcount))(x_tmp)
                group_list.append(x_tmp)
                lcount += 1

            x1 = concatenate(group_list, axis=-1)
            # x1 = x_tmp

            # x1 = Conv1D(filters=convfilt * k,
            #             kernel_size=ksize,
            #             padding='same',
            #             strides=convstr,
            #             kernel_initializer='he_normal', name='layer' + str(lcount))(x1)
            # lcount += 1
            x1 = BatchNormalization(name='layer' + str(lcount))(x1)
            lcount += 1
            x1 = Activation('relu')(x1)
            x1 = Dropout(drop)(x1)

            ### grouped convlutional block
            group_list = []
            for c in range(cardinality):
                x_tmp = Lambda(lambda z: z[:, :, c * grouped_channels:
                                           (c + 1) * grouped_channels])(x1)

                x_tmp = Conv1D(grouped_channels,
                               ksize_choice[int(c / 2)],
                               padding='same',
                               strides=convstr,
                               kernel_initializer='he_normal',
                               name='layer' + str(lcount))(x_tmp)

                group_list.append(x_tmp)
                lcount += 1

            x1 = concatenate(group_list, axis=-1)
            # x1 = x_tmp
            # x1 = Conv1D(filters=convfilt * k,
            #             kernel_size=ksize,
            #             padding='same',
            #             strides=convstr,
            #             kernel_initializer='he_normal', name='layer' + str(lcount))(x1)
            # lcount += 1
            if p:
                x1 = MaxPooling1D(pool_size=poolsize,
                                  strides=poolstr,
                                  padding='same')(x1)

                # Right branch: shortcut connection
            if p:
                x2 = MaxPooling1D(pool_size=poolsize,
                                  strides=poolstr,
                                  padding='same')(xshort)
            else:
                x2 = xshort  # pool or identity
            # Merging branches
            x = keras.layers.add([x1, x2])
            # change parameters
            p = not p  # toggle pooling

        # x = Conv1D(filters=convfilt * k, kernel_size=ksize, padding='same', strides=convstr, kernel_initializer='he_normal')(x)
        # x_reg = Conv1D(filters=convfilt * k, kernel_size=1, padding='same', strides=convstr, kernel_initializer='he_normal')(x)

        # Final bit
        x = BatchNormalization(name='layer' + str(lcount))(x)
        lcount += 1
        x = Activation('relu')(x)

        x_ecg = Flatten()(x)

        out1 = Dense(OUTPUT_CLASS, activation='softmax',
                     name='main_output')(x_ecg)

        model = Model(inputs=input1, outputs=out1)

        model.summary()

        return model
Пример #2
0
_X_train = np.expand_dims(x_train,axis=2)

#%%
_X_train.shape

# %%

# model = Sequential()
# model.add(Conv1D(filters=64,kernel_size=2,activation='relu',input_shape=(30,1)))
# model.add(MaxPooling1D(pool_size=2))
# model.add(Flatten())
# model.add(Dense(50,activation='relu'))
# model.add(Dense(1))

model = Sequential()
model.add(Conv1D(filters=32,kernel_size=2,activation='relu', input_shape=(30,1)))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=32,kernel_size=3,activation='relu'))
model.add(MaxPooling1D(pool_size=1))   
model.add(Flatten())
model.add(Dense(8, activation='relu'))
model.add(Dense(5))
model.compile(loss =tf.keras.losses.sparse_categorical_crossentropy,optimizer = 'adam',metrics = ['accuracy'])
# model.compile(optimizer='adam', loss=tf.keras.losses.mse, metrics=['acc'])
print('compile ok')
#%%
model.summary()
# %%
model.fit(_X_train,y_train,epochs=1,verbose=1)
# %%
Пример #3
0
def build_model(hp, conv_layers, input_shape):
    """Build a keras model.

    Uses keras tuner to build model - can control # layers, # filters in each layer, kernel size,
    regularization etc

    Parameters
    ----------
    hp : tensorflow.keras.HyperParameters()
            Hyperparameters class from which to sample hyperparameters

    conv_layers : int
            number of layers (one layer is Conv and MaxPool) in the sequential model.

    input_shape : int
            input shape of X so the model gets built continuously as you are adding layers

    Returns
    -------
    model : tensorflow.keras.Model
            compiled model that uses hyperparameters defined inline to hypertune the model

    """
    model = Sequential()
    model.add(
        Conv1D(
            filters=hp.Int("init_conv_filters",
                           min_value=32,
                           max_value=512,
                           step=32),
            kernel_size=hp.Int("init_conv_kernel",
                               min_value=1,
                               max_value=4,
                               step=1),
            activation="relu",
            input_shape=input_shape,
        ))

    for i in range(conv_layers - 1):
        model.add(
            Conv1D(
                filters=hp.Int("conv_filters" + str(i),
                               min_value=32,
                               max_value=512,
                               step=32),
                kernel_size=hp.Int("conv_kernel" + str(i),
                                   min_value=1,
                                   max_value=4,
                                   step=1),
                activation="relu",
            ))

        model.add(MaxPool1D(pool_size=2, padding="same"))

    model.add(Dropout(0.25))
    model.add(Flatten())

    dense_filters_2 = hp.Int("dense_filters_2",
                             min_value=32,
                             max_value=512,
                             step=32)
    model.add(Dense(dense_filters_2, activation="relu"))
    model.add(Dropout(0.25))
    model.add(Dense(64, activation="relu"))
    model.add(Dense(1, activation="linear"))

    model.compile(loss="mean_squared_error",
                  optimizer="adam",
                  metrics=["mean_squared_error"])

    return model
Пример #4
0
X = dataset.iloc[:, 34:]
Y = dataset.iloc[:, 1]
print(X.shape)

from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
Y = le.fit_transform(Y)

from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25, random_state = 0)
X_train = tf.reshape(X_train, (X_train.shape[0], 1000, 1))
X_test = tf.reshape(X_test, (X_test.shape[0], 1000, 1))

#Initializing CNN
model = Sequential()
model.add(Conv1D(filters= 64, kernel_size=3, activation ='relu',strides = 2, padding = 'valid', input_shape= (1000, 1)))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters= 128, kernel_size=3, activation ='relu',strides = 2, padding = 'valid'))
model.add(MaxPooling1D(pool_size=2))

model.add(Dropout(0.9))
model.add(MaxPooling1D(pool_size=2))

model.add(Flatten())

model.add(Dense(21)) 
model.add(Activation('softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs = 150, batch_size = 32, validation_data = (X_test, Y_test), shuffle = True)

# Read the binary data for time series data (ts_data)
ts_data = np.fromfile("data/data.bin", dtype=float)
ts_data = ts_data.reshape(60, 2)
ts_data = np.expand_dims(ts_data, axis=0)
print(ts_data)

# Model constraints
input_shape = (60, 2)
BATCH_SIZE = 128
latent_dim = 4

# The Encoder part of the model
inp = Input(shape=input_shape, name='input')
encoder_conv1 = Conv1D(filters=16,
                       kernel_size=3,
                       padding='same',
                       dilation_rate=1,
                       activation='relu',
                       kernel_regularizer=regularizers.l2(3e-5))(inp)
encoder_conv2 = Conv1D(filters=16,
                       kernel_size=3,
                       padding='same',
                       dilation_rate=2,
                       activation='relu',
                       kernel_regularizer=regularizers.l2(3e-5))(encoder_conv1)
encoder_conv3 = Conv1D(filters=16,
                       kernel_size=3,
                       padding='same',
                       dilation_rate=4,
                       activation='relu',
                       kernel_regularizer=regularizers.l2(3e-5))(encoder_conv2)
encoder_flat = Flatten()(encoder_conv3)
#     maxx = np.max(data[:,:,i])
#     data[:,:,i] = (data[:,:,i] - minx) / (maxx - minx)
#
# OH this was bad!

data_slice = data[0:1][:][:]
print(data.shape)
print(data_slice.shape)

window_length = data.shape[1]

#TODO: Normalize Data

#Encoder
input_window = Input(shape=(window_length, 3))
x = Conv1D(16, 3, activation="relu",
           padding="same")(input_window)  # Full Dimension
x = BatchNormalization()(x)
x = MaxPooling1D(3, padding="same")(x)
x = Conv1D(1, 3, activation="relu", padding="same")(x)
x = BatchNormalization()(x)
encoded = MaxPooling1D(2, padding="same")(
    x)  # 3 dims... I'm not super convinced this is actually 3 dimensions

encoder = Model(input_window, encoded)

# 3 dimensions in the encoded layer

x = Conv1D(1, 3, activation="relu", padding="same")(encoded)  # Latent space
x = BatchNormalization()(x)
x = UpSampling1D(2)(x)  # 6 dims
x = Conv1D(16, 3, activation='relu', padding='same')(x)  # 5 dims
Пример #7
0
# additional parameters
params2 = {
    'conv_activation': 'relu',
    'output_activation': 'linear',  #'sigmoid',
    'signal_dataset_file': signal_dataset_file,
    'noise_dataset_file': noise_dataset_file,
    'npoints': npoints,
    'scale': scale,
    'offset': offset,
}
experiment.log_parameters(params2)

# Build model with functional API

input_img = Input(shape=(npoints, 1))
x = Conv1D(64, 5, padding='same',
           activation=params2['conv_activation'])(input_img)
x = MaxPooling1D(2, padding='same')(x)
x = Conv1D(32, 5, padding='same', activation=params2['conv_activation'])(x)
x = MaxPooling1D(2, padding='same')(x)
x = Conv1D(32, 5, padding='same', activation=params2['conv_activation'])(x)
encoded = MaxPooling1D(2, padding='same')(x)

x = Conv1D(32, 5, padding='same',
           activation=params2['conv_activation'])(encoded)
x = UpSampling1D(2)(x)
x = Conv1D(32, 5, padding='same', activation=params2['conv_activation'])(x)
x = UpSampling1D(2)(x)
x = Conv1D(64, 5, padding='same', activation=params2['conv_activation'])(x)
x = UpSampling1D(2)(x)
decoded = Conv1D(1, 5, padding='same',
                 activation=params2['output_activation'])(x)
Пример #8
0
# Create all necessary splits to separate the data in train and validation sets
# Here, we will use only the fist "sort" just for testing
from sklearn.model_selection import StratifiedKFold, KFold
kf = StratifiedKFold(n_splits=10, random_state=512, shuffle=True)
splits = [(train_index, val_index)
          for train_index, val_index in kf.split(data, target)]

# shuffle, sort = 0
x = data[splits[0][0]]
y = target[splits[0][0]]
x_val = data[splits[0][1]]
y_val = target[splits[0][1]]

model = Sequential()
model.add(Conv1D(16, kernel_size=2, activation='relu', input_shape=(100, 1)))
model.add(Conv1D(32, kernel_size=2, activation='relu'))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='linear'))
model.add(Activation('sigmoid'))

# compile the model
model.compile(
    'adam',
    loss='binary_crossentropy',
    metrics=['acc'],
)

sp_obj = sp(patience=25, verbose=True, save_the_best=True)
sp_obj.set_validation_data((x_val, y_val))
 def convolutional_layer(x):
     x = Conv1D(filters=number_of_kernels, kernel_size=2, strides=2)(x)
     x = Activation("relu")(x)
     x = MaxPooling1D(pool_size=1)(x)
     return x
Пример #10
0
def build_model_with_L2(input_tensor, convfilt=64, ksize=16, depth=15, drop=0):

    k = 1  # increment every 4th residual block
    p = True  # pool toggle every other residual block (end with 2^8)
    convfilt = convfilt
    encoder_confilt = 64  # encoder filters' num
    convstr = 1
    ksize = ksize
    poolsize = 2
    poolstr = 2
    drop = drop
    depth = depth

    # First convolutional block (conv,BN, relu)
    lcount = 0
    x = Conv1D(filters=convfilt,
               kernel_size=ksize,
               padding='same',
               strides=convstr,
               kernel_regularizer=keras.regularizers.l2(0.001),
               kernel_initializer='he_normal',
               name='layer' + str(lcount))(input_tensor)
    lcount += 1
    x = BatchNormalization(name='layer' + str(lcount))(x)
    lcount += 1
    x = Activation('relu')(x)

    ## Second convolutional block (conv, BN, relu, dropout, conv) with residual net
    # Left branch (convolutions)
    x1 = Conv1D(filters=convfilt,
                kernel_size=ksize,
                padding='same',
                strides=convstr,
                kernel_regularizer=keras.regularizers.l2(0.001),
                kernel_initializer='he_normal',
                name='layer' + str(lcount))(x)
    lcount += 1
    x1 = BatchNormalization(name='layer' + str(lcount))(x1)
    lcount += 1
    x1 = Activation('relu')(x1)
    if drop:
        x1 = Dropout(drop)(x1)
    x1 = Conv1D(filters=convfilt,
                kernel_size=ksize,
                padding='same',
                strides=convstr,
                kernel_regularizer=keras.regularizers.l2(0.001),
                kernel_initializer='he_normal',
                name='layer' + str(lcount))(x1)
    lcount += 1
    x1 = MaxPooling1D(pool_size=poolsize, strides=poolstr, padding='same')(x1)
    # Right branch, shortcut branch pooling
    x2 = MaxPooling1D(pool_size=poolsize, strides=poolstr, padding='same')(x)
    # Merge both branches
    x = keras.layers.add([x1, x2])
    del x1, x2

    # fms = []
    ## Main loop
    p = not p
    for l in range(depth):

        if (l % 4 == 0) and (l >
                             0):  # increment k on every fourth residual block
            k += 1
            # increase depth by 1x1 Convolution case dimension shall change
            xshort = Conv1D(filters=convfilt * k,
                            kernel_size=1,
                            name='layer' + str(lcount))(x)
            lcount += 1
        else:
            xshort = x
            # Left branch (convolutions)
        # notice the ordering of the operations has changed
        x1 = BatchNormalization(name='layer' + str(lcount))(x)
        lcount += 1
        x1 = Activation('relu')(x1)
        if drop:
            x1 = Dropout(drop)(x1)
        x1 = Conv1D(filters=convfilt * k,
                    kernel_size=ksize,
                    padding='same',
                    strides=convstr,
                    kernel_regularizer=keras.regularizers.l2(0.001),
                    kernel_initializer='he_normal',
                    name='layer' + str(lcount))(x1)
        lcount += 1
        x1 = BatchNormalization(name='layer' + str(lcount))(x1)
        lcount += 1
        x1 = Activation('relu')(x1)
        if drop:
            x1 = Dropout(drop)(x1)
        x1 = Conv1D(filters=convfilt * k,
                    kernel_size=ksize,
                    padding='same',
                    strides=convstr,
                    kernel_regularizer=keras.regularizers.l2(0.001),
                    kernel_initializer='he_normal',
                    name='layer' + str(lcount))(x1)
        lcount += 1
        if p:
            x1 = MaxPooling1D(pool_size=poolsize,
                              strides=poolstr,
                              padding='same')(x1)

            # Right branch: shortcut connection
        if p:
            x2 = MaxPooling1D(pool_size=poolsize,
                              strides=poolstr,
                              padding='same')(xshort)
        else:
            x2 = xshort  # pool or identity
        # Merging branches
        x = keras.layers.add([x1, x2])
        # change parameters
        p = not p  # toggle pooling
        # if l == 5:
        #     fms.append(x)
        # if l == 6:
        #     fms.append(x)
        #     fms.append(x)
        #     fms.append(x)

    # x = Conv1D(filters=convfilt * k, kernel_size=ksize, padding='same', strides=convstr, kernel_initializer='he_normal')(x)
    # x_reg = Conv1D(filters=convfilt * k, kernel_size=1, padding='same', strides=convstr, kernel_initializer='he_normal')(x)

    # Final bit
    x = BatchNormalization(name='layer' + str(lcount))(x)
    lcount += 1
    x = Activation('relu')(x)

    x = Flatten()(x)

    # bbox_num = 1
    #
    # x2od2 = Conv1D(filters=bbox_num * 2, kernel_size=1, padding='same', strides=convstr,
    #                kernel_initializer='he_normal')(
    #     fms[0])
    # out2 = Reshape((1136, bbox_num, 2), name='aux_output1')(x2od2)
    #
    # x2od3 = Conv1D(filters=bbox_num * 2, kernel_size=1, padding='same', strides=convstr,
    #                kernel_initializer='he_normal')(
    #     fms[1])
    # out3 = Reshape((1136, bbox_num, 2), name='aux_output2')(x2od3)
    #
    # x2od4 = Conv1D(filters=bbox_num * 2, kernel_size=1, padding='same', strides=convstr,
    #                kernel_initializer='he_normal')(
    #     fms[2])
    # out4 = Reshape((1136, bbox_num, 2), name='aux_output3')(x2od4)
    #
    # x2od5 = Conv1D(filters=bbox_num * 2, kernel_size=1, padding='same', strides=convstr,
    #                kernel_initializer='he_normal')(
    #     fms[3])
    # out5 = Reshape((1136, bbox_num, 2), name='aux_output4')(x2od5)

    return x
Пример #11
0
def get_resnet34_BN2(input_tensor, convfilt=64, ksize=16, depth=15, drop=0):

    k = 1  # increment every 4th residual block
    p = True  # pool toggle every other residual block (end with 2^8)
    convfilt = convfilt
    encoder_confilt = 64  # encoder filters' num
    convstr = 1
    ksize = ksize
    poolsize = 2
    poolstr = 2
    depth = depth
    drop = drop

    # First convolutional block (conv,BN, relu)
    lcount = 0
    x = Conv1D(filters=convfilt,
               kernel_size=ksize,
               padding='same',
               strides=convstr,
               kernel_initializer='he_normal',
               name='layer' + str(lcount))(input_tensor)
    lcount += 1
    x = BatchNormalization(axis=1, name='layer' + str(lcount))(x)
    lcount += 1
    x = Activation('relu')(x)

    ## Second convolutional block (conv, BN, relu, dropout, conv) with residual net
    # Left branch (convolutions)
    x1 = Conv1D(filters=convfilt,
                kernel_size=ksize,
                padding='same',
                strides=convstr,
                kernel_initializer='he_normal',
                name='layer' + str(lcount))(x)
    lcount += 1
    x1 = BatchNormalization(axis=1, name='layer' + str(lcount))(x1)
    lcount += 1
    x1 = Activation('relu')(x1)
    if drop:
        x1 = Dropout(drop)(x1)
    x1 = Conv1D(filters=convfilt,
                kernel_size=ksize,
                padding='same',
                strides=convstr,
                kernel_initializer='he_normal',
                name='layer' + str(lcount))(x1)
    lcount += 1
    x1 = MaxPooling1D(pool_size=poolsize, strides=poolstr, padding='same')(x1)
    # Right branch, shortcut branch pooling
    x2 = MaxPooling1D(pool_size=poolsize, strides=poolstr, padding='same')(x)
    # Merge both branches
    x = keras.layers.add([x1, x2])
    del x1, x2

    ## Main loop
    p = not p
    for l in range(15):

        if (l % 4 == 0) and (l >
                             0):  # increment k on every fourth residual block
            k += 1
            # increase depth by 1x1 Convolution case dimension shall change
            xshort = Conv1D(filters=convfilt * k,
                            kernel_size=1,
                            name='layer' + str(lcount))(x)
            lcount += 1
        else:
            xshort = x
            # Left branch (convolutions)
        # notice the ordering of the operations has changed
        x1 = BatchNormalization(axis=1, name='layer' + str(lcount))(x)
        lcount += 1
        x1 = Activation('relu')(x1)
        if drop:
            x1 = Dropout(drop)(x1)
        x1 = Conv1D(filters=convfilt * k,
                    kernel_size=ksize,
                    padding='same',
                    strides=convstr,
                    kernel_initializer='he_normal',
                    name='layer' + str(lcount))(x1)
        lcount += 1
        x1 = BatchNormalization(axis=1, name='layer' + str(lcount))(x1)
        lcount += 1
        x1 = Activation('relu')(x1)
        if drop:
            x1 = Dropout(drop)(x1)
        x1 = Conv1D(filters=convfilt * k,
                    kernel_size=ksize,
                    padding='same',
                    strides=convstr,
                    kernel_initializer='he_normal',
                    name='layer' + str(lcount))(x1)
        lcount += 1
        if p:
            x1 = MaxPooling1D(pool_size=poolsize,
                              strides=poolstr,
                              padding='same')(x1)

            # Right branch: shortcut connection
        if p:
            x2 = MaxPooling1D(pool_size=poolsize,
                              strides=poolstr,
                              padding='same')(xshort)
        else:
            x2 = xshort  # pool or identity
        # Merging branches
        x = keras.layers.add([x1, x2])
        # change parameters
        p = not p  # toggle pooling

    # x = Conv1D(filters=convfilt * k, kernel_size=ksize, padding='same', strides=convstr, kernel_initializer='he_normal')(x)
    # x_reg = Conv1D(filters=convfilt * k, kernel_size=1, padding='same', strides=convstr, kernel_initializer='he_normal')(x)

    # Final bit
    x = BatchNormalization(axis=1, name='layer' + str(lcount))(x)
    lcount += 1
    x = Activation('relu')(x)

    x = Flatten()(x)

    return x
Пример #12
0
n_train_hours = 40000
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]


# 监督学习结果划分
train_x, train_y = train[:, :-1], train[:, -1]
test_x, test_y = test[:, :-1], test[:, -1]

# 为了在LSTM中应用该数据,需要将其格式转化为3D format,即[Samples, timesteps, features]
train_X = train_x.reshape((train_x.shape[0], 1, train_x.shape[1]))
test_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))

model = Sequential()
model.add(Conv1D(filters=32, kernel_size=5,
                 strides=1, padding="causal",
                 activation="relu"))
model.add(
    GRU(
        32,
        input_shape=(
            train_X.shape[1],
            train_X.shape[2]),
        return_sequences=True))
model.add(GRU(16, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss=tf.keras.losses.Huber(),
              optimizer='adam',
              metrics=["mse"])
Пример #13
0
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
print('x_train.shape :', x_train.shape)
print('x_test.shape :', x_test.shape)
# x_train.shape : (309, 10)
# x_test.shape : (133, 10)

# 2.4 reshape (데이터 구조 바꾸는 거)
x_train = x_train.reshape(309, 10, 1)
x_test = x_test.reshape(133, 10, 1)
print("reshape x:", x_train.shape, x_test.shape)

# 모델링
model = Sequential()
model.add(Conv1D(800, 2, input_shape=(10, 1)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(400))
model.add(Dense(288))
model.add(Dense(150))
model.add(Dense(90))
model.add(Dense(80))
model.add(Dense(1))

model.summary()

# 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mse'])

es = EarlyStopping(monitor='val_loss', patience=7, mode='auto')
Пример #14
0
def main():    

    """
    ---------- Read data ----------
    """
    # Read the data into a pandas data frame
    filepath = os.path.join("..", "data", "Game_of_Thrones_Script.csv")
    df = pd.read_csv(filepath)
    # Make a df with the two columns: season and sentence from the original data set
    df = df[["Season", "Sentence"]]
    sentence = df['Sentence'].values
    season = df['Season'].values
    
    """
    ---------- Train, split and vectorize data ----------
    """
    # Train and test split using sklearn. X is the data, so in this case the scentences and y is the labels which is the season.
    X_train, X_test, y_train, y_test = train_test_split(sentence, 
                                                    season, 
                                                    test_size=0.25, # We split the data in 75% training and 25% test
                                                    random_state=42)
    # Vectorize using sklearn
    vectorizer = CountVectorizer()
    
    # First we do it for our training data...
    X_train_feats = vectorizer.fit_transform(X_train)
    #... then we do it for our test data
    X_test_feats = vectorizer.transform(X_test)
    # We can also create a list of the feature names. 
    feature_names = vectorizer.get_feature_names()
    
    """
    ---------- Deep learning model ----------
    """
    # Factorize the labels from a string to a number 
    y_train = pd.factorize(y_train)[0]
    y_test = pd.factorize(y_test)[0]
    
    # Word embeddings

    # Initialize tokenizer
    tokenizer = Tokenizer(num_words=5000) #we use this to get all the full scentences 
    # Fit to training data
    tokenizer.fit_on_texts(X_train)

    # Tokenized training and test data
    X_train_toks = tokenizer.texts_to_sequences(X_train)
    X_test_toks = tokenizer.texts_to_sequences(X_test)

    # Overall vocabulary size
    vocab_size = len(tokenizer.word_index) + 1  # Adding 1 because of reserved 0 index

    # Inspect it
    print(X_train[2])
    print(X_train_toks[2])
    
    # Padding
    # Max length for a doc
    maxlen = 100

    # Pad training data to maxlen
    X_train_pad = pad_sequences(X_train_toks, 
                                padding='post', # sequences can be padded "pre" or "post"
                                maxlen=maxlen)
    # Pad testing data to maxlen
    X_test_pad = pad_sequences(X_test_toks, 
                               padding='post', 
                               maxlen=maxlen)
    # Use the Regularization model
    l2 = L2(0.0001)

    # Set the embedding dimension to 50
    embedding_dim = 50
    # Create embedding_matrix
    embedding_matrix = create_embedding_matrix('../data/glove.6B.50d.txt',
                                               tokenizer.word_index, 
                                               embedding_dim)
    # New model
    model = Sequential()

    # Embedding -> CONV+ReLU -> MaxPool -> FC+ReLU -> Out
    model.add(Embedding(vocab_size,                  # Vocab size from Tokenizer()
                        embedding_dim,               # Embedding input layer size
                        weights=[embedding_matrix],  # Pretrained embeddings
                        input_length=maxlen,         # Maxlen of padded doc
                        trainable=True))             # Trainable embeddings
    model.add(Conv1D(128, 5, 
                    activation='relu',
                    kernel_regularizer=l2))          # L2 regularization 
    model.add(GlobalMaxPool1D())
    model.add(Dense(10, activation='relu', kernel_regularizer=l2))
    model.add(Dense(1, activation='softmax'))        # We use the softmax activations because we have multiple labels

    # Compile the model
    model.compile(loss='categorical_crossentropy',   # We use the categorical_crossentropy because we have multiple labels
                  optimizer="adam",
                  metrics=['accuracy'])

    # Print the summary
    model.summary()
    
    # Create history of the model
    history = model.fit(X_train_pad, y_train,
                    epochs=20,
                    verbose=False,
                    validation_data=(X_test_pad, y_test),
                    batch_size=10)

    # Evaluate the model 
    loss, accuracy = model.evaluate(X_train_pad, y_train, verbose=False)
    print("Training Accuracy: {:.4f}".format(accuracy))
    loss, accuracy = model.evaluate(X_test_pad, y_test, verbose=False)
    print("Testing Accuracy:  {:.4f}".format(accuracy))

    # Plot the history
    plot_history(history, epochs = 20)
    
    # Create predictions an print the classification report
    predictions = model.predict(X_test_pad, batch_size = 10)
    print(classification_report(y_test, predictions.argmax(axis=1)))
Пример #15
0
def create_c3nn_classifier(ninput=100,
                           nfilters=32,
                           kernel_size=4,
                           ndense=128,
                           pool_size=2,
                           dropout_rate=0.2,
                           noutput=1,
                           pooling=False):
    """ An easy way of creating a CNN with 3 convolutional layers and 2 dense layers

    Parameters
    ----------
    ninput:
        input shape
    nfilters:
        number of filters
    kernel_size:
        kernel size
    ndense: tuple
        number of neurons in dense layers
    pool_size:
        pool size in MaxPooling
    dropout_rate:
        dropout rate
    noutput:
        output shape
    pooling:
        if True, add pooling layers

    Returns
    -------

    """
    model = Sequential()
    model.add(
        Conv1D(filters=nfilters,
               kernel_size=kernel_size,
               strides=1,
               padding="valid",
               activation="relu",
               input_shape=(ninput, 1)))  # ,data_format="channels_last"
    model.add(BatchNormalization())
    if pooling:
        model.add(MaxPooling1D(pool_size, padding="valid"))

    model.add(
        Conv1D(nfilters / 2, kernel_size, padding="valid", activation="relu"))
    model.add(BatchNormalization())
    if pooling:
        model.add(MaxPooling1D(pool_size, padding="valid"))

    model.add(
        Conv1D(nfilters / 4, kernel_size, padding="valid", activation="relu"))
    model.add(BatchNormalization())
    if pooling:
        model.add(MaxPooling1D(pool_size, padding="valid"))
    model.add(Dropout(dropout_rate))

    model.add(Flatten())
    model.add(Dense(
        ndense,
        activation="relu",
    ))  # input_shape=(4000,)
    model.add(BatchNormalization())

    if noutput == 1:
        model.add(Dense(noutput, activation="sigmoid"))
    else:
        model.add(Dense(noutput, activation="softmax"))
    return model
Пример #16
0
    if temp is not None:
        embedding_matrix[i] = temp

embedding = Embedding(
    *embedding_matrix.shape,  ## 54533 100
    weights=[embedding_matrix],
    trainable=False,
    ## 0으로 패딩된 값은 무시
    mask_zero=True)

### 제목 layer
title_input = Input(shape=(None, ))
embedded_title = embedding(title_input)

embedded_title1 = Conv1D(64, 5, activation='relu', strides=1)(embedded_title)
embedded_title1 = SpatialDropout1D(0.2)(embedded_title1)
embedded_title1 = GlobalMaxPooling1D()(embedded_title1)
embedded_title1 = Flatten()(embedded_title1)

embedded_title2 = Conv1D(128, 5, activation='relu', strides=1)(embedded_title)
embedded_title2 = SpatialDropout1D(0.2)(embedded_title2)
embedded_title2 = GlobalMaxPooling1D()(embedded_title2)
embedded_title2 = Flatten()(embedded_title2)

embedded_title3 = Conv1D(256, 5, activation='relu', strides=1)(embedded_title)
embedded_title3 = SpatialDropout1D(0.2)(embedded_title3)
embedded_title3 = GlobalMaxPooling1D()(embedded_title3)
embedded_title3 = Flatten()(embedded_title3)

### 질문 layer
Пример #17
0
perm_test = np.random.permutation(len(data_test))
data_test = data_test[perm_test]
labels_test = labels_test[perm_test]
perm_val = np.random.permutation(len(data_val))
data_val = data_val[perm_val]
labels_val = labels_val[perm_val]
model = Sequential()
# Embedded layer
model.add(
    Embedding(len(embedding_matrix),
              EMBEDDING_DIM,
              weights=[embedding_matrix],
              input_length=MAX_SEQUENCE_LENGTH,
              trainable=False))
# Convolutional Layer
model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
# LSTM Layer
model.add(LSTM(300))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc'])
print(model.summary())
early_stop = EarlyStopping(monitor='val_loss', patience=3)

df = pd.DataFrame(vectors[name_index[:, 1].astype(int)])
df.index = name_index[:, 0]
df.to_csv("embedding.csv")

hist = model.fit(data_train, labels_train, \
Пример #18
0
def get_test_model_recurrent():
    """Returns a minimalistic test model for recurrent layers."""
    input_shapes = [(17, 4), (1, 10), (20, 40), (6, 7, 10, 3)]

    outputs = []

    inputs = [Input(shape=s) for s in input_shapes]

    inp = PReLU()(inputs[0])

    lstm = Bidirectional(
        LSTM(
            units=4,
            return_sequences=True,
            bias_initializer=
            'random_uniform',  # default is zero use random to test computation
            activation='tanh',
            recurrent_activation='relu'),
        merge_mode='concat')(inp)

    lstm2 = Bidirectional(LSTM(units=6,
                               return_sequences=True,
                               bias_initializer='random_uniform',
                               activation='elu',
                               recurrent_activation='hard_sigmoid'),
                          merge_mode='sum')(lstm)

    lstm3 = LSTM(units=10,
                 return_sequences=False,
                 bias_initializer='random_uniform',
                 activation='selu',
                 recurrent_activation='sigmoid')(lstm2)

    outputs.append(lstm3)

    conv1 = Conv1D(2, 1, activation='sigmoid')(inputs[1])
    lstm4 = LSTM(units=15,
                 return_sequences=False,
                 bias_initializer='random_uniform',
                 activation='tanh',
                 recurrent_activation='elu')(conv1)

    dense = (Dense(23, activation='sigmoid'))(lstm4)
    outputs.append(dense)

    time_dist_1 = TimeDistributed(Conv2D(2, (3, 3), use_bias=True))(inputs[3])
    flatten_1 = TimeDistributed(Flatten())(time_dist_1)

    outputs.append(
        Bidirectional(LSTM(units=6,
                           return_sequences=True,
                           bias_initializer='random_uniform',
                           activation='tanh',
                           recurrent_activation='sigmoid'),
                      merge_mode='ave')(flatten_1))

    outputs.append(TimeDistributed(MaxPooling2D(2, 2))(inputs[3]))
    outputs.append(TimeDistributed(AveragePooling2D(2, 2))(inputs[3]))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_recurrent')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 2
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
    array_text.append(array_shape)

dx = np.asarray(array_text)
print(dx.shape)

dy = np.asarray(binary)
from sklearn.model_selection import train_test_split as tts
xtrain, xtest, ytrain, ytest = tts(dx,
                                   dy,
                                   test_size=.2,
                                   random_state=101,
                                   stratify=dy)

model = Sequential()
model.add(Conv1D(max_length, 3, input_shape=(max_length, size)))
model.add(Activation("sigmoid"))
model.add(AvgPool1D(pool_size=3))
#model.add(Dropout(0.2))

model.add(Conv1D(max_length, 3))
model.add(Activation("sigmoid"))
model.add(MaxPool1D(pool_size=3))
model.add(Dropout(0.2))

model.add(Conv1D(max_length, 2))
model.add(Activation("sigmoid"))
model.add(AvgPool1D(pool_size=2))
model.add(Dropout(0.2))

model.add(Conv1D(max_length, 2))
Пример #20
0
def get_test_model_exhaustive():
    """Returns a exhaustive test model."""
    input_shapes = [(2, 3, 4, 5, 6), (2, 3, 4, 5, 6), (7, 8, 9, 10),
                    (7, 8, 9, 10), (11, 12, 13), (11, 12, 13), (14, 15),
                    (14, 15), (16, ),
                    (16, ), (2, ), (1, ), (2, ), (1, ), (1, 3), (1, 4),
                    (1, 1, 3), (1, 1, 4), (1, 1, 1, 3), (1, 1, 1, 4),
                    (1, 1, 1, 1, 3), (1, 1, 1, 1, 4), (26, 28, 3), (4, 4, 3),
                    (4, 4, 3), (4, ), (2, 3), (1, ), (1, ), (1, ), (2, 3),
                    (9, 16, 1), (1, 9, 16)]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Conv1D(1, 3, padding='valid')(inputs[6]))
    outputs.append(Conv1D(2, 1, padding='same')(inputs[6]))
    outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6]))
    outputs.append(ZeroPadding1D(2)(inputs[6]))
    outputs.append(Cropping1D((2, 3))(inputs[6]))
    outputs.append(MaxPooling1D(2)(inputs[6]))
    outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(AveragePooling1D(2)(inputs[6]))
    outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(GlobalMaxPooling1D()(inputs[6]))
    outputs.append(GlobalMaxPooling1D(data_format="channels_first")(inputs[6]))
    outputs.append(GlobalAveragePooling1D()(inputs[6]))
    outputs.append(
        GlobalAveragePooling1D(data_format="channels_first")(inputs[6]))

    outputs.append(Conv2D(4, (3, 3))(inputs[4]))
    outputs.append(Conv2D(4, (3, 3), use_bias=False)(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4]))

    outputs.append(SeparableConv2D(3, (3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((1, 2))(inputs[4]))

    outputs.append(MaxPooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(MaxPooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default MaxPoolingOp only supports NHWC on device type CPU
    outputs.append(
        MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(AveragePooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(AveragePooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default AvgPoolingOp only supports NHWC on device type CPU
    outputs.append(
        AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))

    outputs.append(GlobalAveragePooling2D()(inputs[4]))
    outputs.append(
        GlobalAveragePooling2D(data_format="channels_first")(inputs[4]))
    outputs.append(GlobalMaxPooling2D()(inputs[4]))
    outputs.append(GlobalMaxPooling2D(data_format="channels_first")(inputs[4]))

    outputs.append(Permute((3, 4, 1, 5, 2))(inputs[0]))
    outputs.append(Permute((1, 5, 3, 2, 4))(inputs[0]))
    outputs.append(Permute((3, 4, 1, 2))(inputs[2]))
    outputs.append(Permute((2, 1, 3))(inputs[4]))
    outputs.append(Permute((2, 1))(inputs[6]))
    outputs.append(Permute((1, ))(inputs[8]))

    outputs.append(Permute((3, 1, 2))(inputs[31]))
    outputs.append(Permute((3, 1, 2))(inputs[32]))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[31])))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[32])))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(axis=1)(inputs[0]))
    outputs.append(BatchNormalization(axis=2)(inputs[0]))
    outputs.append(BatchNormalization(axis=3)(inputs[0]))
    outputs.append(BatchNormalization(axis=4)(inputs[0]))
    outputs.append(BatchNormalization(axis=5)(inputs[0]))
    outputs.append(BatchNormalization()(inputs[2]))
    outputs.append(BatchNormalization(axis=1)(inputs[2]))
    outputs.append(BatchNormalization(axis=2)(inputs[2]))
    outputs.append(BatchNormalization(axis=3)(inputs[2]))
    outputs.append(BatchNormalization(axis=4)(inputs[2]))
    outputs.append(BatchNormalization()(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(BatchNormalization(axis=1)(inputs[4])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[4]))
    outputs.append(BatchNormalization(axis=3)(inputs[4]))
    outputs.append(BatchNormalization()(inputs[6]))
    outputs.append(BatchNormalization(axis=1)(inputs[6]))
    outputs.append(BatchNormalization(axis=2)(inputs[6]))
    outputs.append(BatchNormalization()(inputs[8]))
    outputs.append(BatchNormalization(axis=1)(inputs[8]))
    outputs.append(BatchNormalization()(inputs[27]))
    outputs.append(BatchNormalization(axis=1)(inputs[27]))
    outputs.append(BatchNormalization()(inputs[14]))
    outputs.append(BatchNormalization(axis=1)(inputs[14]))
    outputs.append(BatchNormalization(axis=2)(inputs[14]))
    outputs.append(BatchNormalization()(inputs[16]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(BatchNormalization(axis=1)(inputs[16])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[16]))
    outputs.append(BatchNormalization(axis=3)(inputs[16]))
    outputs.append(BatchNormalization()(inputs[18]))
    outputs.append(BatchNormalization(axis=1)(inputs[18]))
    outputs.append(BatchNormalization(axis=2)(inputs[18]))
    outputs.append(BatchNormalization(axis=3)(inputs[18]))
    outputs.append(BatchNormalization(axis=4)(inputs[18]))
    outputs.append(BatchNormalization()(inputs[20]))
    outputs.append(BatchNormalization(axis=1)(inputs[20]))
    outputs.append(BatchNormalization(axis=2)(inputs[20]))
    outputs.append(BatchNormalization(axis=3)(inputs[20]))
    outputs.append(BatchNormalization(axis=4)(inputs[20]))
    outputs.append(BatchNormalization(axis=5)(inputs[20]))

    outputs.append(Dropout(0.5)(inputs[4]))

    outputs.append(ZeroPadding2D(2)(inputs[4]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[4]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4]))
    outputs.append(Cropping2D(2)(inputs[4]))
    outputs.append(Cropping2D((2, 3))(inputs[4]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4]))

    outputs.append(Dense(3, use_bias=True)(inputs[13]))
    outputs.append(Dense(3, use_bias=True)(inputs[14]))
    outputs.append(Dense(4, use_bias=False)(inputs[16]))
    outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18]))
    outputs.append(Dense(4, use_bias=False)(inputs[20]))

    outputs.append(Reshape(((2 * 3 * 4 * 5 * 6), ))(inputs[0]))
    outputs.append(Reshape((2, 3 * 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5, 6))(inputs[0]))

    outputs.append(Reshape((16, ))(inputs[8]))
    outputs.append(Reshape((2, 8))(inputs[8]))
    outputs.append(Reshape((2, 2, 4))(inputs[8]))
    outputs.append(Reshape((2, 2, 2, 2))(inputs[8]))
    outputs.append(Reshape((2, 2, 1, 2, 2))(inputs[8]))

    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]]))
    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]]))
    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]]))
    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]]))
    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]]))
    for axis in [-1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))
    for axis in [-1, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))
    for axis in [-1, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))
    for axis in [-1, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]]))

    outputs.append(UpSampling1D(size=2)(inputs[6]))
    # outputs.append(UpSampling1D(size=2)(inputs[8])) # ValueError: Input 0 of layer up_sampling1d_1 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 16]

    outputs.append(Multiply()([inputs[10], inputs[11]]))
    outputs.append(Multiply()([inputs[11], inputs[10]]))
    outputs.append(Multiply()([inputs[11], inputs[13]]))
    outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]]))
    outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[23]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[24]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1),
                padding='valid')(up_scale_2(inputs[24]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(Add()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Subtract()([inputs[26], inputs[30]]))
    outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Average()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[25]),
        Activation('hard_sigmoid')(inputs[25]),
        Activation('selu')(inputs[25]),
        Activation('sigmoid')(inputs[25]),
        Activation('softplus')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('relu')(inputs[25]),
        LeakyReLU()(inputs[25]),
        ELU()(inputs[25]),
        PReLU()(inputs[24]),
        PReLU()(inputs[25]),
        PReLU()(inputs[26]),
        shared_activation(inputs[25]),
        Activation('linear')(inputs[26]),
        Activation('linear')(inputs[23]),
        x,
        shared_activation(x),
    ]

    model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 2
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
Пример #21
0
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0,
                                                    stratify=y)

X_train.shape, X_test.shape

vec_size = 300  #internally, DL keras makes vector representaions for all the tokens.
model = Sequential()
model.add(
    Embedding(vocab_size, vec_size, input_length=max_length)
)  #embedding takes input dimension, and that is vocab_size for us and how many output dim you want (vector representation), we want 300, then waht is max input length? we defined as 120
# now we need to add our CNN model
model.add(
    Conv1D(32, 2, activation='relu')
)  #we need to determine number of filters for our first layer, which is 32, then filter size, which is 2, then activiation function which is relu
model.add(
    MaxPool1D(2)
)  #this means it will see the text data, then it will select the max of those two after conv layer
model.add(Dropout(0.2))
#above we have only added one layer of ConvNN, because we have a smaller dataset
# below we are adding a fully connected dense layer with 32 units?
model.add(Dense(32, activation='relu'))
model.add(GlobalMaxPooling1D())
model.add(
    Dense(3, activation='softmax')
)  #this is output layer, tricky, we have 3 classes, its multiclass, so we use 3 units here, using activation function sfotmax bc multi

model.compile(optimizer=Adam(learning_rate=0.001),
              loss='categorical_crossentropy',
Пример #22
0
             
    Returns:
        Y - updated output array
    """
    X = X.copy()
    Y = Y.copy()
    for i in range(n_steps):
        prediction = model.predict(X)
        Y[:, i, np.newaxis] = prediction[:, -1, np.newaxis]
        X = np.concatenate((X[:, 1:3], prediction), axis=1)
    return Y


#%% Basic CNN seq2seq model
model_CNN = Sequential([
    Conv1D(filters=20, kernel_size=3, activation='relu', input_shape=[None,
                                                                      1]),
    TimeDistributed(Dense(1, activation='linear'))
])
model_CNN.compile(loss='mse', optimizer=Adam(lr=0.1))
model_CNN.fit(X_train, Y_train, epochs=10, batch_size=32)

# prediction on training and validation sequences - treated as a single-step ahead prediction
y_train_cnn = model_CNN.predict(X_train)
y_train_cnn = y_train_cnn[:, :, 0]
RMSE_tr_cnn = np.sqrt(mean_squared_error(Y_train.ravel(), y_train_cnn.ravel()))
print(f'Training score across all timesteps as an SSA: {RMSE_tr_cnn}')

y_valid_cnn = model_CNN.predict(X_valid)
y_valid_cnn = y_valid_cnn[:, :, 0]
RMSE_va_cnn = np.sqrt(mean_squared_error(Y_valid.ravel(), y_valid_cnn.ravel()))
print(f'Validation score across all timesteps as an SSA: {RMSE_va_cnn}')
Пример #23
0
def crnn(input_shape,
         conv_layers,
         lstm_units,
         kernel_size=3,
         pool_size=2,
         reg_rate=0.2,
         hidden_dense=100,
         num_classes=5) -> Model:
    """ Creates cnn model with specified number of convolutional layers

    Parameters
    ----------
    input_shape : 2 integer tuple
        Shape of the input for first convolutional layer
    conv_layers : list or array
        Vector of filters for convolutional layers. The number of layers
        depends on the length of the vector
    lstm_units : int
    	Number of units in LSTM layer
    kernel_size : int
        Kernel size for convolutional layers
    pool_size : int
        Window size for pooling layers
    reg_rate: float
        Regularization rate
    num_classes : int
        Number of classes for classification problem. 
        Needed for output layer
        
    Returns
    -------
    keras.models.Model
        
    """

    model_input = Input((None, input_shape[1]), name='input')
    layer = model_input
    for num_filters in conv_layers:
        # give name to the layers
        layer = Conv1D(filters=num_filters, kernel_size=kernel_size)(layer)
        layer = BatchNormalization(momentum=0.9)(layer)
        layer = Activation('relu')(layer)
        layer = MaxPooling1D(pool_size)(layer)
        layer = Dropout(reg_rate)(layer)

    ## LSTM Layer
    layer = LSTM(lstm_units, return_sequences=False)(layer)
    layer = Dropout(reg_rate)(layer)

    ## Dense Layer
    layer = Dense(hidden_dense, activation='relu')(layer)
    layer = Dropout(reg_rate)(layer)

    ## Softmax Output
    layer = Dense(5)(layer)
    layer = Activation('softmax', name='output_realtime')(layer)
    model_output = layer
    model = Model(model_input, model_output)
    # model = Sequential()

    # model.add(Conv1D(conv_layers[0], kernel_size, activation='relu', input_shape=input_shape+(1,)))
    # model.add(MaxPooling1D(pool_size))
    # model.add(Dropout(reg_rate))
    # for index, filters in enumerate(conv_layers[1:]):
    #     model.add(Conv1D(filters, kernel_size, activation='relu'))
    #     model.add(MaxPooling1D(pool_size))
    #     model.add(Dropout(reg_rate))
    # model.add(Reshape())
    # model.add(LSTM(lstm_units, return_sequences=False))
    # model.add(Dropout(reg_rate))

    # #model.add(Flatten())
    # model.add(Dense(100, activation='relu'))
    # model.add(Dense(units=num_classes, activation='softmax'))

    return model
		# store the weights in the model
		model.set_weights(weights)
		○ If we print the weights:
		# confirm they were stored
        print(model.get_weights())

        ○ Note: Note that the feature map has six elements, whereas our input has eight elements. This is an artefact of how the filter was applied to the input sequence. 
        ::we can also use SAME padding
'''

# define input data
data = asarray([0, 0, 0, 1, 1, 0, 0, 0])
data = data.reshape(1, 8, 1)
# create model
model = Sequential()
model.add(Conv1D(1, 3, input_shape=(8, 1)))
# define a vertical line detector
weights = [asarray([[[0]], [[1]], [[0]]]), asarray([0.0])]
# store the weights in the model
model.set_weights(weights)
# confirm they were stored
print(model.get_weights())

# apply filter to input data
#		○ We use. predict to perform the convolution
yhat = model.predict(data)
print(yhat)

print('two-d convolutional layer \n')
# define input data
data = [[0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0],
Пример #25
0
def commandsmodel():
    '''Contains the deep learning model for the commands classification
    Returns:
    model -- tensorflow.keras model instance'''

    model = Sequential()

    # first layer (Conv1D)
    model.add(
        Conv1D(8,
               kernel_size=13,
               strides=1,
               padding='valid',
               input_shape=(8000, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=3))
    model.add(Dropout(0.3))

    # Second layer(Second Conv1D layer)
    model.add(Conv1D(16, kernel_size=11, padding='valid', strides=1))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=3))
    model.add(Dropout(0.3))

    # third layer(third Conv1D layer)
    model.add(Conv1D(32, kernel_size=9, padding='valid', strides=1))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=3))
    model.add(Dropout(0.3))

    # fourth layer(fourth Conv1D layer)
    model.add(Conv1D(64, kernel_size=9, padding='valid', strides=1))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=3))
    model.add(Dropout(0.3))

    # fifth layer a Gru layer
    model.add(GRU(128, return_sequences=True))
    model.add(Dropout(0.8))
    model.add(BatchNormalization())

    # sixth layer (GRU)
    model.add(GRU(128, return_sequences=True))
    model.add(Dropout(0.8))
    model.add(BatchNormalization())

    # flatten layer
    model.add(Flatten())

    # Dense layer 1
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.3))

    # Dense layer 2
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.3))

    # output layer
    model.add(Dense(7, activation='softmax'))

    opt = Adam()
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    model.summary()

    return model
Пример #26
0
def keras_model_fn(_, config):
    """
    Creating a CNN model for sentiment modeling
    """
    # This step is to load the glove dictionary file into the memory
    embeddings = {}
    s3_access = boto3.resource('s3')
    glove_file = s3_access.Object(
        'aiops-assign6',
        config["embeddings_path"]).get()['Body'].read().decode("utf-8").split(
            '\n')
    #print('loading done')

    embedding_matrix = np.zeros((config["embeddings_dictionary_size"],
                                 config["embeddings_vector_size"]))

    ind = 0
    for line in glove_file:
        ind = ind + 1
        split_line = line.split()
        #print(split_line[0])
        #due to issue with last line
        if ind > 1193515:
            break
        dict_key = split_line[0]
        embeddings[dict_key] = np.array(split_line[1:], dtype='float32')
        #<unknown> has been manually removed
        embedding_matrix[ind] = embeddings[dict_key]

    print('Load ' + str(len(embeddings)) + ' word vectors.')

    # creating the cnn-model
    model = Sequential()
    #embedding layer with given input and output features
    model.add(Embedding(input_length = config["padding_size"], input_dim = config["embeddings_dictionary_size"]\
                        ,output_dim = 25, weights=[embedding_matrix], trainable=True, name='embedding'))

    #Convolution1D layer with given features
    model.add(
        Conv1D(100,
               kernel_size=2,
               strides=1,
               padding='valid',
               activation='relu'))

    #GLobalMaxPool1D layer
    model.add(GlobalMaxPool1D())

    #Dense layer1
    model.add(Dense(100, activation='relu'))

    #Dense Layer2
    model.add(Dense(1, activation='sigmoid'))

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    cnn_model = model

    return cnn_model
Пример #27
0
def create_unet(k=1, num_classes=6, input_shape=(None, 300)):

    # У кераса нет Conv1DTranspose, так что прописываем вручную. Нужен для разворота после maxpooling
    def Conv1DTranspose(input_tensor, filters, kernel_size=1, strides=2, padding='same'):
        """
        input_tensor: входной тензор (batch_size, time_steps, dims)
        filters: int, output dimension, выходной тензор будет иметь размер (batch_size, time_steps, filters)
        kernel_size: размер ядра свертки 
        strides: int, шаг ядра
        padding: 'same' | действительный
        """
        x = Lambda(lambda x: K.expand_dims(x, axis=2))(input_tensor)
        x = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding)(x)
        x = Lambda(lambda x: K.squeeze(x, axis=2))(x)
        return x
        
    img_input = Input(input_shape) 

    # Block 1
    x = Conv1D(64 * k , 3, padding='same')(img_input) 
    x = BatchNormalization()(x) 
    x = Activation('relu')(x)

    x = Conv1D(64 * k , 3, padding='same')(x)  
    x = BatchNormalization()(x)     
    block_1_out = Activation('relu')(x) 

    x = MaxPooling1D()(block_1_out)

    # Block 2
    x = Conv1D(128 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)  

    x = Conv1D(128 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    block_2_out = Activation('relu')(x)

    x = MaxPooling1D()(block_2_out)

    # Block 3
    x = Conv1D(256 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)               
    x = Activation('relu')(x)                     

    x = Conv1D(256 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(256 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    block_3_out = Activation('relu')(x)

    # x = block_3_out
    x = MaxPooling1D()(block_3_out)

    # Block 4
    x = Conv1D(512 * k , 3, padding='same')(x)
    x = BatchNormalization()(x) 
    x = Activation('relu')(x)

    x = Conv1D(512 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(512 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)      
    block_4_out = Activation('relu')(x)
    x = block_4_out 

    # UP 2
    x = Conv1DTranspose(x, 256, kernel_size=2, strides=2, padding='same')
    x = BatchNormalization()(x)
    x = Activation('relu')(x) 

    x = concatenate([x, block_3_out]) 
    x = Conv1D(256 * k , 3, padding='same')(x) 
    x = BatchNormalization()(x) 
    x = Activation('relu')(x)

    x = Conv1D(256 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    
    # UP 3
    x = Conv1DTranspose(x, 128, kernel_size=2, strides=2, padding='same')
    x = BatchNormalization()(x)
    x = Activation('relu')(x) 

    x = concatenate([x, block_2_out])
    x = Conv1D(128 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(128 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 4
    x = Conv1DTranspose(x, 64, kernel_size=2, strides=2, padding='same')
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_1_out])
    x = Conv1D(64 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(64 * k , 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(num_classes, 3, activation='sigmoid', padding='same')(x)

    model = Model(img_input, x) 

    model = Model(img_input, x)
    model.compile(optimizer=Adam(0.0025), 
                  loss='categorical_crossentropy',
                  metrics=[dice_coef])
  
    return model
Пример #28
0
def create_c3nn2_classifier(ninput=100,
                            nfilters=32,
                            kernel_size=4,
                            ndense=128,
                            pool_size=2,
                            dropout_rate=0.5,
                            noutput=1,
                            activation_hidden="relu",
                            activation_out="sigmoid"):
    """ An easy way of creating a CNN with 3 convolutional layers and 2 dense layers

    Parameters
    ----------
    ninput:
        input shape
    nfilters:
        number of filters
    kernel_size:
        kernel size
    ndense: tuple
        number of neurons in dense layers
    pool_size:
        pool size in MaxPooling
    dropout_rate:
        dropout rate
    noutput:
        output shape
    activation_hidden:
        the activation function used in hidden layers
    activation_out:
        the activation function used in output layers

    Returns
    -------

    """
    model = Sequential()
    model.add(
        Conv1D(filters=nfilters,
               kernel_size=kernel_size,
               strides=1,
               padding="valid",
               activation=activation_hidden,
               input_shape=(ninput, 1)))
    # ,data_format="channels_last"
    model.add(MaxPooling1D(pool_size, padding="valid"))
    model.add(BatchNormalization())

    model.add(
        Conv1D(nfilters,
               kernel_size,
               padding="valid",
               activation=activation_hidden))
    model.add(MaxPooling1D(pool_size, padding="valid"))
    model.add(BatchNormalization())

    model.add(
        Conv1D(nfilters,
               kernel_size,
               padding="valid",
               activation=activation_hidden))
    model.add(MaxPooling1D(pool_size, padding="valid"))
    model.add(BatchNormalization())
    # model.add(Dropout(dropout_rate))

    model.add(Flatten())
    model.add(Dense(
        ndense,
        activation=activation_hidden,
    ))  # input_shape=(4000,)
    model.add(BatchNormalization())
    model.add(Dropout(dropout_rate))

    # model.add(Dense(ndense[1], activation=activation_hidden))
    # model.add(BatchNormalization())
    # model.add(Dropout(dropout_rate))

    model.add(Dense(noutput, activation=activation_out))
    return model
        def build(self, hp):

            nb_filters_0 = hp.Choice("nb_filters_0", values=[8, 16, 32, 64])
            kernel_size = hp.Choice("kernel_size", values=[3, 5, 7])
            kernel_initializer = "glorot_uniform"
            lr = hp.Float(
                "learning_rate",
                min_value=1e-4,
                max_value=1e-2,
                sampling="LOG",
                default=1e-3,
            )

            nb_dense_neurons_1 = hp.Int("nb_dense_neurons_1",
                                        min_value=10,
                                        max_value=500,
                                        step=10,
                                        default=100)
            reg = hp.Float("regularization_value",
                           min_value=1e-4,
                           max_value=1,
                           sampling="LOG",
                           default=1e-2)
            reg_dense = hp.Float("reg_dense",
                                 min_value=1e-4,
                                 max_value=1,
                                 sampling="LOG",
                                 default=1e-2)
            dropout = hp.Float("dropout",
                               min_value=0.,
                               max_value=0.9,
                               step=0.1,
                               default=0.5)

            input_layer = Input(shape=(train_data.shape[1], 1))
            output_channels = 17

            x = Conv1D(filters=nb_filters_0,
                       kernel_size=kernel_size,
                       kernel_initializer=kernel_initializer,
                       kernel_regularizer=l2(reg),
                       padding='valid')(input_layer)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
            x = Dropout(dropout)(x)

            x = Conv1D(filters=nb_filters_0 * 2,
                       kernel_size=kernel_size,
                       kernel_initializer=kernel_initializer,
                       kernel_regularizer=l2(reg),
                       padding='valid')(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
            x = Dropout(dropout)(x)

            x = Conv1D(filters=nb_filters_0 * 4,
                       kernel_size=kernel_size,
                       kernel_initializer=kernel_initializer,
                       kernel_regularizer=l2(reg),
                       padding='valid')(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
            x = Dropout(dropout)(x)

            x = Flatten()(x)
            x = Dense(nb_dense_neurons_1, kernel_regularizer=l2(reg_dense))(x)

            x = Dropout(dropout)(x)

            output_layer = Dense(output_channels, activation="softmax")(x)

            model = Model(inputs=input_layer, outputs=output_layer)
            model.compile(loss="categorical_crossentropy",
                          optimizer=Adam(learning_rate=lr),
                          metrics=["accuracy"])

            return model
Пример #30
0
    def __create_model(self):
        ''' Utility function to create and train model '''
        
        if self.__model_type == 'complete':
             
            fp_pre_chain = keras.Input(
                shape=self._nnX['fp_pre_chain'][0].shape, 
                name='fp_pre_chain')
            
            fp_amino_acid = keras.Input(
                shape=self._nnX['fp_amino_acid'][0].shape,
                name='fp_amino_acid')

            coupling_agent = keras.Input(
                shape=self._nnX['coupling_agent'][0].shape, 
                name='coupling_agent')
            
            coupling_strokes = keras.Input(
                shape=self._nnX['coupling_strokes'][0].shape,
                name='coupling_strokes')
            
            temp_coupling = keras.Input(
                shape=self._nnX['temp_coupling'][0].shape, 
                name='temp_coupling')
            
            deprotection_strokes = keras.Input(
                shape=self._nnX['deprotection_strokes'][0].shape, 
                name='deprotection_strokes')

            flow_rate = keras.Input(
                shape=self._nnX['flow_rate'][0].shape, 
                name='flow_rate')
            
            machine = keras.Input(
                shape=self._nnX['machine'][0].shape, 
                name='machine')
            
            temp_reactor_1 = keras.Input(
                shape=self._nnX['temp_reactor_1'][0].shape, 
                name='temp_reactor_1')

            x_pre_chain = Conv1D(2**self.model_params['pre_chain_conv1_filter'], 
                                 2**self.model_params['pre_chain_conv1_kernel'])(fp_pre_chain)
            x_pre_chain = Dense(2**self.model_params['pre_chain_dense1'])(x_pre_chain)
            x_pre_chain = Dropout(self.model_params['pre_chain_dropout1'])(x_pre_chain)
            x_pre_chain = Conv1D(2**self.model_params['pre_chain_conv2_filter'], 
                                 2**self.model_params['pre_chain_conv2_kernel'])(x_pre_chain)
            x_pre_chain = Dropout(self.model_params['pre_chain_dropout2'])(x_pre_chain)
            x_pre_chain = Activation(self.model_params['pre_chain_activation1'])(x_pre_chain)
            x_pre_chain = Flatten()(x_pre_chain)
            x_pre_chain = Dense(2**self.model_params['pre_chain_amino_acid_dense_final'], 
                                activation=self.model_params['pre_chain_activation2'])(x_pre_chain)

            x_amino_acid = Dense(2**self.model_params['amino_acid_dense1'])(fp_amino_acid)
            x_amino_acid = Dense(2**self.model_params['amino_acid_dense2'], 
                                 activation=self.model_params['amino_acid_activation1'])(x_amino_acid)
            x_amino_acid = Dropout(self.model_params['amino_acid_dropout1'])(x_amino_acid)
            x_amino_acid = Dense(2**self.model_params['pre_chain_amino_acid_dense_final'], 
                                 activation=self.model_params['amino_acid_activation2'])(x_amino_acid)

            x_chemistry = concatenate([x_pre_chain, x_amino_acid])
            x_chemistry = Dense(2**self.model_params['chemistry_dense1'])(x_chemistry)
            x_chemistry = Dense(2**self.model_params['chemistry_dense2'])(x_chemistry)

            x_coupling_agent = Activation('sigmoid')(coupling_agent)
            x_coupling_strokes = Activation('sigmoid')(coupling_strokes)
            x_temp_coupling = Activation('sigmoid')(temp_coupling)
            x_deprotection_strokes = Activation('sigmoid')(deprotection_strokes)
            x_deprotection_strokes = Dense(4, activation='relu')(x_deprotection_strokes)

            x_coupling = concatenate(
                [x_coupling_agent, x_coupling_strokes, x_temp_coupling, x_deprotection_strokes])
            x_coupling = Dense(self.model_params['coupling_dense1'])(x_coupling)
            x_coupling = Dense(self.model_params['coupling_dense2'])(x_coupling)

            x_flow_rate = Activation('sigmoid')(flow_rate)
            x_machine = Activation('sigmoid')(machine)
            x_machine = Dense(3, activation='relu')(x_machine)
            x_temp_reactor_1 = Activation('sigmoid')(temp_reactor_1)

            x_machine_variables = concatenate([x_flow_rate, x_machine, x_temp_reactor_1])
            x_machine_variables = Dense(self.model_params['machine_dense1'])(x_machine_variables)
            x_machine_variables = Dense(self.model_params['machine_dense2'])(x_machine_variables)

            x = concatenate([x_chemistry, x_coupling, x_machine_variables])
            x = Dense(2**self.model_params['concat_dense1'])(x)
            x = Dense(2**self.model_params['concat_dense2'], 
                      activation=self.model_params['concat_activation2'])(x)
            x = Dropout(self.model_params['concat_dropout1'])(x)
            x = Dense(2**self.model_params['concat_dense3'], 
                      activation=self.model_params['concat_activation3'])(x)

            first_area = Dense(1,  activation='linear', name='first_area')(x)
            first_height = Dense(1,  activation='linear', name='first_height')(x)
            first_width = Dense(1,  activation='linear', name='first_width')(x)

            first_diff = Dense(1,  activation='linear', name='first_diff')(x)

            model = Model(
                inputs=[fp_pre_chain, fp_amino_acid, 
                        coupling_agent, coupling_strokes, temp_coupling, deprotection_strokes, 
                        flow_rate, machine, temp_reactor_1], 
                outputs=[first_area, first_height, first_width, first_diff]
            )

        elif self.__model_type == 'minimal':
            model = Sequential()
            model.add(Conv1D(
                2**self.model_params['pre_chain_conv1_filter'], 
                2**self.model_params['pre_chain_conv1_kernel'], 
                input_shape=(self._nnX[0].shape[0], self._nnX[0].shape[1])))
            model.add(Dense(2**self.model_params['pre_chain_dense1']))
            model.add(Dropout(self.model_params['pre_chain_dropout1']))
            model.add(Conv1D(
                2**self.model_params['pre_chain_conv2_filter'], 
                2**self.model_params['pre_chain_conv2_kernel']))
            model.add(Dropout(self.model_params['pre_chain_dropout2']))
#             model.add(Activation(self.model_params['pre_chain_activation1']))
            model.add(Flatten())
            model.add(Dense(
                2**self.model_params['pre_chain_amino_acid_dense_final'],
                activation=self.model_params['pre_chain_activation2']))
            model.add(Dense(
                2**self.model_params['concat_dense1']))
            model.add(Dense(
                2**self.model_params['concat_dense2']))
            model.add(Dropout(
                self.model_params['concat_dropout1']))
            model.add(Dense(
                2**self.model_params['concat_dense3']))
            model.add(Dense(
                1, activation='linear'))
        
        model.compile(
            optimizer = RMSprop(lr=self.model_params['opt_lr']),
            loss=mse)

        callbacks_list = []

        if self.model_params['save_checkpoint'] == True:
            checkpoint = ModelCheckpoint(
                self.model_params['checkpoint_filepath'] + 
                "predictor-epoch{epoch:02d}-loss{loss:.4f}-val_loss{val_loss:.4f}.hdf5", 
                monitor='val_loss', 
                save_best_only=True, 
                mode='min')
            callbacks_list = [checkpoint]
        
        model.fit(self._nnX, self._nnY, 
                  epochs=self.model_params['epochs'], 
                  batch_size=self.model_params['batch_size'], 
                  validation_split=self.model_params['val_split'], 
                  callbacks=callbacks_list, verbose=False
                 )
        
        self.model = model