Esempio n. 1
0
def construct_CNN(target_length,numConv,kernel_num,kernel_size,dropout,maxpool = False, 
                  maxpool_size=1 , add_dense_layer = False, dense_unit = None,normalization = False):
    # create model
    model=Sequential()
    model.add(Dropout(0.2))
    
    if numConv != len(kernel_size) and len(kernel_num) != len(kernel_size):
        print('Incompatible number of kernel sizes with number of Conv layer!')
        print('Incompatible number of filters with number of Conv layer!')
    
    #Construct Convolutional Layers
    for n in range(numConv):
        model.add(Conv1D(kernel_num[n], kernel_size = kernel_size[n],padding = 'same', 
                         input_shape = (4, target_length/(maxpool_size ** n)), activation = 'relu'))
        model.add(Dropout(dropout))
        
        if maxpool:
            model.add(MaxPooling1D(pool_size = maxpool_size, padding='same'))
            model.add(Dropout(dropout))
            
        if normalization:
            model.add(BatchNormalization())
    
    # Flatten the network
    model.add(Flatten())
    model.add(Dropout(dropout))
    
    #Construct Dense Layer
    if add_dense_layer:
        for n in range(len(dense_unit)):
            model.add(Dense(dense_unit[n],activation = 'relu'))
            model.add(Dropout(0.2))
    
    model.add(Dense(1))
    return model
def create_model1(optimizer,metric=['accuracy']):
    ##CNN 1
    model_1 = Sequential()

    #Convolutional Layers
    model_1.add(Reshape((62, 1), input_shape=(62,)))
    model_1.add(Conv1D(31, kernel_size=2, strides=2, padding="same", activation = 'relu'))
    
    #Dense Layers
    model_1.add(Flatten())
    model_1.add(Dense(units=15, activation='relu'))
    model_1.add(Dense(units=1, activation='sigmoid'))
    
    model_1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=metric)
    
    return model_1
Esempio n. 3
0
 def _init_conv(self, filters, name):
     return Conv1D(
         filters=filters,
         kernel_size=self.kernel_size,
         strides=self.strides,
         padding='same',
         dilation_rate=self.dilation_rate,
         activation=self.activation,
         use_bias=self.use_bias,
         kernel_initializer=self.kernel_initializer,
         bias_initializer=self.bias_initializer,
         kernel_regularizer=self.kernel_regularizer,
         bias_regularizer=self.bias_regularizer,
         activity_regularizer=self.activity_regularizer,
         kernel_constraint=self.kernel_constraint,
         bias_constraint=self.bias_constraint,
         name=name,
     )
def create_model(use_case,
                 model_num,
                 optimizer,
                 dropout=False,
                 metric=['accuracy']):
    if use_case == "V2_noClaim" or use_case == "V2_withClaim":
        if model_num == 1:
            model_1 = Sequential()

            #Convolutional Layers
            model_1.add(Reshape((62, 1), input_shape=(62, )))
            model_1.add(
                Conv1D(31,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_1.add(Flatten())
            model_1.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_1.add(Dropout(dropout))
            model_1.add(Dense(units=1, activation='sigmoid'))

            model_1.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=metric)

            return model_1

        elif model_num == 2:
            model_2 = Sequential()

            #Convolutional Layers
            model_2.add(Reshape((62, 1), input_shape=(62, )))
            model_2.add(
                Conv1D(60,
                       kernel_size=3,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_2.add(
                Conv1D(30,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_2.add(Flatten())
            model_2.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_2.add(Dropout(dropout))
            model_2.add(Dense(units=1, activation='sigmoid'))

            model_2.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_2

        elif model_num == 3:
            model_3 = Sequential()

            #Convolutional Layers
            model_3.add(Reshape((62, 1), input_shape=(62, )))
            model_3.add(
                Conv1D(61,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_3.add(
                Conv1D(60,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_3.add(
                Conv1D(30,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_3.add(Flatten())
            model_3.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_3.add(Dropout(dropout))
            model_3.add(Dense(units=1, activation='sigmoid'))

            model_3.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_3

        elif model_num == 4:
            model_4 = Sequential()

            #Convolutional Layers
            model_4.add(Reshape((62, 1), input_shape=(62, )))
            model_4.add(
                Conv1D(60,
                       kernel_size=3,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_4.add(
                Conv1D(30,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_4.add(Flatten())
            model_4.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_4.add(Dropout(dropout))
            model_4.add(Dense(units=10, activation='relu'))
            if dropout is not False:
                model_4.add(Dropout(dropout))
            model_4.add(Dense(units=5, activation='relu'))
            if dropout is not False:
                model_4.add(Dropout(dropout))
            model_4.add(Dense(units=1, activation='sigmoid'))

            model_4.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_4

        elif model_num == 5:
            model_5 = Sequential()

            #Convolutional Layers
            model_5.add(Reshape((62, 1), input_shape=(62, )))
            model_5.add(
                Conv1D(31,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_5.add(
                Conv1D(30,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_5.add(Flatten())
            model_5.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_5.add(Dropout(dropout))
            model_5.add(Dense(units=1, activation='sigmoid'))

            model_5.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_5

        elif model_num == 6:
            model_6 = Sequential()

            #Convolutional Layers
            model_6.add(Reshape((62, 1), input_shape=(62, )))
            model_6.add(
                Conv1D(61,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(60,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(30,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(10,
                       kernel_size=3,
                       strides=3,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_6.add(Flatten())
            model_6.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_6.add(Dropout(dropout))
            model_6.add(Dense(units=1, activation='sigmoid'))

            model_6.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_6

        elif model_num == 7:
            model_7 = Sequential()

            #Convolutional Layers
            model_7.add(Reshape((62, 1), input_shape=(62, )))
            model_7.add(
                Conv1D(61,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_7.add(
                Conv1D(60,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_7.add(
                Conv1D(60,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_7.add(
                Conv1D(20,
                       kernel_size=3,
                       strides=3,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_7.add(Flatten())
            model_7.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_7.add(Dropout(dropout))
            model_7.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_7.add(Dropout(dropout))
            model_7.add(Dense(units=1, activation='sigmoid'))

            model_7.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_7

        elif model_num == 8:
            model_8 = Sequential()

            #Convolutional Layers
            model_8.add(Reshape((62, 1), input_shape=(62, )))
            model_8.add(
                Conv1D(61,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_8.add(
                Conv1D(60,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_8.add(
                Conv1D(59,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_8.add(
                Conv1D(40,
                       kernel_size=20,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_8.add(
                Conv1D(20,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_8.add(Flatten())
            model_8.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_8.add(Dropout(dropout))
            model_8.add(Dense(units=1, activation='sigmoid'))

            model_8.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_8

        elif model_num == 9:
            model_9 = Sequential()

            #Convolutional Layers
            model_9.add(Reshape((62, 1), input_shape=(62, )))
            model_9.add(
                Conv1D(61,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_9.add(
                Conv1D(60,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_9.add(
                Conv1D(59,
                       kernel_size=2,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_9.add(
                Conv1D(40,
                       kernel_size=20,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_9.add(
                Conv1D(20,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_9.add(Flatten())
            model_9.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_9.add(Dropout(dropout))
            model_9.add(Dense(units=1, activation='sigmoid'))

            model_9.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_9

        elif model_num == 10:
            model_10 = Sequential()

            #Convolutional Layers
            model_10.add(Reshape((62, 1), input_shape=(62, )))
            model_10.add(
                Conv1D(31,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_10.add(
                Conv1D(20,
                       kernel_size=11,
                       strides=1,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_10.add(Flatten())
            model_10.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_10.add(Dropout(dropout))
            model_10.add(Dense(units=1, activation='sigmoid'))

            model_10.compile(loss='binary_crossentropy',
                             optimizer=optimizer,
                             metrics=['accuracy'])

            return model_10

        else:
            print("Wrong model num : architecture not defined")

    elif use_case == "V1_withClaim":
        if use_case == "V1_withClaim":
            input_dim = 97
#        elif use_case == "V1_noClaim":
#            input_dim = 55
        else:
            print("Error - Incorrect data")

        if model_num == 1:
            model_1 = Sequential()

            #Convolutional Layers
            model_1.add(Reshape((input_dim, 1), input_shape=(input_dim, )))
            model_1.add(
                Conv1D(20,
                       kernel_size=5,
                       strides=5,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_1.add(Flatten())
            model_1.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_1.add(Dropout(dropout))
            model_1.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_1.add(Dropout(dropout))
            model_1.add(Dense(units=1, activation='sigmoid'))

            model_1.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_1

        elif model_num == 2:
            model_2 = Sequential()

            #Convolutional Layers
            model_2.add(Reshape((input_dim, 1), input_shape=(input_dim, )))
            model_2.add(
                Conv1D(48,
                       kernel_size=3,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_2.add(
                Conv1D(20,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_2.add(Flatten())
            model_2.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_2.add(Dropout(dropout))
            model_2.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_2.add(Dropout(dropout))
            model_2.add(Dense(units=1, activation='sigmoid'))

            model_2.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_2

        elif model_num == 3:
            model_3 = Sequential()

            #Convolutional Layers
            model_3.add(Reshape((input_dim, 1), input_shape=(input_dim, )))
            model_3.add(
                Conv1D(48,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_3.add(
                Conv1D(40,
                       kernel_size=8,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_3.add(
                Conv1D(20,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_3.add(Flatten())
            model_3.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_3.add(Dropout(dropout))
            model_3.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_3.add(Dropout(dropout))
            model_3.add(Dense(units=1, activation='sigmoid'))

            model_3.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_3

        elif model_num == 4:
            model_4 = Sequential()

            #Convolutional Layers
            model_4.add(Reshape((input_dim, 1), input_shape=(input_dim, )))
            model_4.add(
                Conv1D(25,
                       kernel_size=4,
                       strides=4,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_4.add(Flatten())
            model_4.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_4.add(Dropout(dropout))
            model_4.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_4.add(Dropout(dropout))
            model_4.add(Dense(units=1, activation='sigmoid'))

            model_4.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_4

        elif model_num == 5:
            model_5 = Sequential()

            #Convolutional Layers
            model_5.add(Reshape((input_dim, 1), input_shape=(input_dim, )))
            model_5.add(
                Conv1D(33,
                       kernel_size=3,
                       strides=3,
                       padding="same",
                       activation='relu'))
            model_5.add(
                Conv1D(20,
                       kernel_size=14,
                       strides=1,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_5.add(Flatten())
            model_5.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_5.add(Dropout(dropout))
            model_5.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_5.add(Dropout(dropout))
            model_5.add(Dense(units=1, activation='sigmoid'))

            model_5.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_5

        elif model_num == 6:
            model_6 = Sequential()

            #Convolutional Layers
            model_6.add(Reshape((input_dim, 1), input_shape=(input_dim, )))
            model_6.add(
                Conv1D(48,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(40,
                       kernel_size=8,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(20,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(10,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_6.add(Flatten())
            model_6.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_6.add(Dropout(dropout))
            model_6.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_6.add(Dropout(dropout))
            model_6.add(Dense(units=1, activation='sigmoid'))

            model_6.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_6

        else:
            print("Wrong model num : architecture not defined")

    elif use_case == "V1_noClaim":
        if model_num == 1:
            model_1 = Sequential()

            #Convolutional Layers
            model_1.add(Reshape((55, 1), input_shape=(55, )))
            model_1.add(
                Conv1D(50,
                       kernel_size=6,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_1.add(
                Conv1D(40,
                       kernel_size=11,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_1.add(
                Conv1D(20,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_1.add(Flatten())
            model_1.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_1.add(Dropout(dropout))
            model_1.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_1.add(Dropout(dropout))
            model_1.add(Dense(units=1, activation='sigmoid'))

            model_1.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_1

        elif model_num == 2:
            model_2 = Sequential()

            #Convolutional Layers
            model_2.add(Reshape((55, 1), input_shape=(55, )))
            model_2.add(
                Conv1D(11,
                       kernel_size=5,
                       strides=5,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_2.add(Flatten())
            model_2.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_2.add(Dropout(dropout))
            model_2.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_2.add(Dropout(dropout))
            model_2.add(Dense(units=1, activation='sigmoid'))

            model_2.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_2

        elif model_num == 3:
            model_3 = Sequential()

            #Convolutional Layers
            model_3.add(Reshape((55, 1), input_shape=(55, )))
            model_3.add(
                Conv1D(50,
                       kernel_size=5,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_3.add(
                Conv1D(24,
                       kernel_size=4,
                       strides=5,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_3.add(Flatten())
            model_3.add(Dense(units=35, activation='relu'))
            if dropout is not False:
                model_3.add(Dropout(dropout))
            model_3.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_3.add(Dropout(dropout))
            model_3.add(Dense(units=1, activation='sigmoid'))

            model_3.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_3

        elif model_num == 4:
            model_4 = Sequential()

            #Convolutional Layers
            model_4.add(Reshape((55, 1), input_shape=(55, )))
            model_4.add(
                Conv1D(50,
                       kernel_size=5,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_4.add(
                Conv1D(24,
                       kernel_size=5,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_4.add(Flatten())
            model_4.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_4.add(Dropout(dropout))
            model_4.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_4.add(Dropout(dropout))
            model_4.add(Dense(units=1, activation='sigmoid'))

            model_4.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_4

        elif model_num == 5:
            model_5 = Sequential()

            #Convolutional Layers
            model_5.add(Reshape((55, 1), input_shape=(55, )))
            model_5.add(
                Conv1D(28,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_5.add(
                Conv1D(20,
                       kernel_size=9,
                       strides=1,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_5.add(Flatten())
            model_5.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_5.add(Dropout(dropout))
            model_5.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_5.add(Dropout(dropout))
            model_5.add(Dense(units=1, activation='sigmoid'))

            model_5.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_5

        elif model_num == 6:
            model_6 = Sequential()

            #Convolutional Layers
            model_6.add(Reshape((55, 1), input_shape=(55, )))
            model_6.add(
                Conv1D(48,
                       kernel_size=8,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(40,
                       kernel_size=8,
                       strides=1,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(20,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))
            model_6.add(
                Conv1D(10,
                       kernel_size=2,
                       strides=2,
                       padding="same",
                       activation='relu'))

            #Dense Layers
            model_6.add(Flatten())
            model_6.add(Dense(units=30, activation='relu'))
            if dropout is not False:
                model_6.add(Dropout(dropout))
            model_6.add(Dense(units=15, activation='relu'))
            if dropout is not False:
                model_6.add(Dropout(dropout))
            model_6.add(Dense(units=1, activation='sigmoid'))

            model_6.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])

            return model_6

        else:
            print("Wrong model num : architecture not defined")

    else:
        print("Wrong Use case! You entered :")
        print(use_case)
Esempio n. 5
0
#0.0    1463
#1.0     486
pd.value_counts(y_test)
#0.0    794
#1.0    256
############################################################################
###Modelling
os.chdir(model_path)
os.getcwd()
##CNN 1
model = Sequential()

#Convolutional Layers
model.add(Reshape((55, 1)))
model.add(
    Conv1D(50, kernel_size=5, strides=1, padding="same", activation='relu'))

#Dense Layers
model.add(Flatten())
model.add(Dense(units=60, input_dim=55, activation='relu'))
#model.add(Dropout(0.1))
model.add(Dense(units=30, activation='relu'))
#model.add(Dropout(0.1))
model.add(Dense(units=15, activation='relu'))
#model.add(Dropout(0.3))
model.add(Dense(units=1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Esempio n. 6
0
    def __init__(self, fl, mode, hparams):
        """
        Initialises new DNN model based on input features_dim, labels_dim, hparams
        :param features_dim: Number of input feature nodes. Integer
        :param labels_dim: Number of output label nodes. Integer
        :param hparams: Dict containing hyperparameter information. Dict can be created using create_hparams() function.
        hparams includes: hidden_layers: List containing number of nodes in each hidden layer. [10, 20] means 10 then 20 nodes.
        """
        # self.features_dim = fl.features_c_dim
        # self.labels_dim = fl.labels_dim  # Assuming that each task has only 1 dimensional output
        self.features_dim = fl.features_c_dim + 1  # 1 for the positional argument
        self.labels_dim = 1
        self.numel = fl.labels.shape[1] + 1
        self.hparams = hparams
        self.mode = mode
        self.normalise_labels = fl.normalise_labels
        self.labels_scaler = fl.labels_scaler
        features_in = Input(shape=(self.features_dim, ),
                            name='main_features_c_input')

        # Selection of model
        if mode == 'ann':
            model = ann(self.features_dim, self.labels_dim, self.hparams)
            x = model(features_in)
            self.model = Model(inputs=features_in, outputs=x)
        elif mode == 'ann2':
            model_1 = ann(self.features_dim, 50, self.hparams)
            x = model_1(features_in)
            model_end = ann(50, 50, self.hparams)
            end = model_end(x)
            end_node = Dense(units=1,
                             activation='linear',
                             kernel_regularizer=regularizers.l1_l2(
                                 l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                             name='output_layer')(end)

            model_2 = ann(50, self.labels_dim - 1, self.hparams)

            x = model_2(x)
            self.model = Model(inputs=features_in, outputs=[end_node, x])
        elif mode == 'ann3':
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(0))(features_in)
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(2))(x)
            # x = BatchNormalization()(x)
            x = Dense(units=1,
                      activation='linear',
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_set_19')(x)

            self.model = Model(inputs=features_in, outputs=x)
        elif mode == 'conv1':
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='shared' + str(1))(features_in)
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            #x = BatchNormalization()(x)
            x = Dense(units=19,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_set_19')(x)
            #x = BatchNormalization()(x)

            x = Reshape(target_shape=(19, 1))(x)
            x = Conv1D(filters=hparams['filters'],
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            #x = BatchNormalization()(x)
            x = Conv1D(filters=hparams['filters'] * 2,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            x = Conv1D(filters=hparams['filters'] * 4,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            #x = Permute((2,1))(x)
            #x = GlobalAveragePooling1D()(x)
            x = TimeDistributed(Dense(1, activation='linear'))(x)
            x = Reshape(target_shape=(19, ))(x)

            self.model = Model(inputs=features_in, outputs=x)

        elif mode == 'conv2':
            x = Dense(units=10,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(1))(features_in)
            x = Dense(units=10,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(2))(x)
            end = Dense(units=10,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(1))(x)
            end = Dense(units=10,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(2))(end)
            end_node = Dense(units=1,
                             activation='linear',
                             kernel_regularizer=regularizers.l1_l2(
                                 l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                             name='output_layer')(end)

            x = Dense(units=80,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            x = Reshape(target_shape=(80, 1))(x)
            x = Conv1D(filters=8,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)

            x = MaxPooling1D(pool_size=2)(x)
            x = Conv1D(filters=16,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            x = MaxPooling1D(pool_size=2)(x)
            #x = Permute((2,1))(x)
            #x = GlobalAveragePooling1D()(x)
            x = TimeDistributed(Dense(1, activation='linear'))(x)
            x = Reshape(target_shape=(20, ))(x)

            self.model = Model(inputs=features_in, outputs=[end_node, x])

        elif mode == 'lstm':
            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(1))(features_in)
            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(2))(x)
            end = Dense(units=20,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(1))(x)
            end = Dense(units=20,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(2))(end)
            end_node = Dense(units=1,
                             activation='linear',
                             kernel_regularizer=regularizers.l1_l2(
                                 l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                             name='output_layer')(end)

            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(2))(x)

            x = RepeatVector(n=20)(x)
            x = LSTM(units=30, activation='relu', return_sequences=True)(x)
            x = LSTM(units=30, activation='relu', return_sequences=True)(x)

            x = TimeDistributed(Dense(1))(x)
            x = Reshape(target_shape=(20, ))(x)
            '''
            x = Permute((2,1))(x)
            x = GlobalAveragePooling1D()(x)
            '''
            self.model = Model(inputs=features_in, outputs=[end_node, x])

        optimizer = Adam(clipnorm=1)

        self.model.compile(optimizer=optimizer, loss='mean_squared_error')
Esempio n. 7
0
    def __init__(self, fl, mode, hparams):
        """
        Initialises new DNN model based on input features_dim, labels_dim, hparams
        :param features_dim: Number of input feature nodes. Integer
        :param labels_dim: Number of output label nodes. Integer
        :param hparams: Dict containing hyperparameter information. Dict can be created using create_hparams() function.
        hparams includes: hidden_layers: List containing number of nodes in each hidden layer. [10, 20] means 10 then 20 nodes.
        """
        self.features_dim = fl.features_c_dim
        self.labels_dim = fl.labels_dim  # Assuming that each task has only 1 dimensional output
        self.hparams = hparams
        self.mode = mode
        self.normalise_labels = fl.normalise_labels
        self.labels_scaler = fl.labels_scaler
        features_in = Input(shape=(self.features_dim, ),
                            name='main_features_c_input')

        # Selection of model
        if mode == 'ann':
            model = ann(self.features_dim, self.labels_dim, self.hparams)
            x = model(features_in)
            self.model = Model(inputs=features_in, outputs=x)
        elif mode == 'ann2':
            model_1 = ann(self.features_dim, 50, self.hparams)
            x = model_1(features_in)
            model_end = ann(50, 50, self.hparams)
            end = model_end(x)
            end_node = Dense(units=1,
                             activation='linear',
                             kernel_regularizer=regularizers.l1_l2(
                                 l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                             name='output_layer')(end)

            model_2 = ann(50, self.labels_dim - 1, self.hparams)

            x = model_2(x)
            self.model = Model(inputs=features_in, outputs=[end_node, x])
        elif mode == 'ann3':
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(0))(features_in)
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(2))(x)
            # x = BatchNormalization()(x)
            x = Dense(units=self.labels_dim,
                      activation='linear',
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Final')(x)

            self.model = Model(inputs=features_in, outputs=x)
        elif mode == 'conv1':
            if fl.label_type == 'gf20':
                final_dim = 20
            else:
                final_dim = 19
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='shared' + str(1))(features_in)
            x = Dense(units=hparams['pre'],
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            #x = BatchNormalization()(x)
            x = Dense(units=final_dim,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_set_19')(x)
            #x = BatchNormalization()(x)

            x = Reshape(target_shape=(final_dim, 1))(x)
            x = Conv1D(filters=hparams['filters'],
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            #x = BatchNormalization()(x)
            x = Conv1D(filters=hparams['filters'] * 2,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            x = Conv1D(filters=hparams['filters'] * 4,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            #x = Permute((2,1))(x)
            #x = GlobalAveragePooling1D()(x)
            x = TimeDistributed(Dense(1, activation='linear'))(x)
            x = Reshape(target_shape=(final_dim, ))(x)

            self.model = Model(inputs=features_in, outputs=x)

        elif mode == 'conv2':
            x = Dense(units=10,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(1))(features_in)
            x = Dense(units=10,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(2))(x)
            end = Dense(units=10,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(1))(x)
            end = Dense(units=10,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(2))(end)
            end_node = Dense(units=1,
                             activation='linear',
                             kernel_regularizer=regularizers.l1_l2(
                                 l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                             name='output_layer')(end)

            x = Dense(units=80,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            x = Reshape(target_shape=(80, 1))(x)
            x = Conv1D(filters=8,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)

            x = MaxPooling1D(pool_size=2)(x)
            x = Conv1D(filters=16,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(x)
            x = MaxPooling1D(pool_size=2)(x)
            #x = Permute((2,1))(x)
            #x = GlobalAveragePooling1D()(x)
            x = TimeDistributed(Dense(1, activation='linear'))(x)
            x = Reshape(target_shape=(20, ))(x)

            self.model = Model(inputs=features_in, outputs=[end_node, x])

        elif mode == 'lstm':
            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(1))(features_in)
            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Shared_e_' + str(2))(x)
            end = Dense(units=20,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(1))(x)
            end = Dense(units=20,
                        activation=hparams['activation'],
                        kernel_regularizer=regularizers.l1_l2(
                            l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                        name='Dense_e_' + str(2))(end)
            end_node = Dense(units=1,
                             activation='linear',
                             kernel_regularizer=regularizers.l1_l2(
                                 l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                             name='output_layer')(end)

            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(1))(x)
            x = Dense(units=20,
                      activation=hparams['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          l1=hparams['reg_l1'], l2=hparams['reg_l2']),
                      name='Pre_' + str(2))(x)

            x = RepeatVector(n=20)(x)
            x = LSTM(units=30, activation='relu', return_sequences=True)(x)
            x = LSTM(units=30, activation='relu', return_sequences=True)(x)

            x = TimeDistributed(Dense(1))(x)
            x = Reshape(target_shape=(20, ))(x)
            '''
            x = Permute((2,1))(x)
            x = GlobalAveragePooling1D()(x)
            '''
            self.model = Model(inputs=features_in, outputs=[end_node, x])

        optimizer = Adam(learning_rate=hparams['learning_rate'], clipnorm=1)

        def weighted_mse(y_true, y_pred):
            loss_weights = np.sqrt(np.arange(1, 20))
            #loss_weights = np.arange(1, 20)
            return K.mean(K.square(y_pred - y_true) * loss_weights, axis=-1)

        def haitao_error(y_true, y_pred):
            diff = K.abs(
                (y_true - y_pred) /
                K.reshape(K.clip(K.abs(y_true[:, -1]), K.epsilon(), None),
                          (-1, 1)))
            return 100. * K.mean(diff, axis=-1)

        if hparams['loss'] == 'mape':
            self.model.compile(optimizer=optimizer,
                               loss=MeanAbsolutePercentageError())
        elif hparams['loss'] == 'haitao':
            self.model.compile(optimizer=optimizer, loss=haitao_error)
        elif hparams['loss'] == 'mse':
            self.model.compile(optimizer=optimizer, loss='mean_squared_error')
Esempio n. 8
0
#1.0     486
pd.value_counts(y_test)
#0.0    794
#1.0    256
############################################################################
###Modelling
os.chdir(model_path)
os.getcwd()

##CNN 1
model_1 = Sequential()

#Convolutional Layers
model_1.add(Reshape((55, 1), input_shape=(55, )))
model_1.add(
    Conv1D(50, kernel_size=6, strides=1, padding="same", activation='relu'))
model_1.add(
    Conv1D(40, kernel_size=11, strides=1, padding="same", activation='relu'))
model_1.add(
    Conv1D(20, kernel_size=2, strides=2, padding="same", activation='relu'))

#Dense Layers
model_1.add(Flatten())
model_1.add(Dense(units=30, activation='relu'))
model_1.add(Dense(units=15, activation='relu'))
model_1.add(Dense(units=1, activation='sigmoid'))

model_1.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])
Esempio n. 9
0
#1.0    138
pd.value_counts(y_test)
#0.0    106
#1.0     72
############################################################################
###Modelling
os.chdir(model_path)
os.getcwd()

##CNN 1
model_1 = Sequential()

#Convolutional Layers
model_1.add(Reshape((97, 1), input_shape=(97, )))
model_1.add(
    Conv1D(20, kernel_size=5, strides=5, padding="same", activation='relu'))

#Dense Layers
model_1.add(Flatten())
model_1.add(Dense(units=30, activation='relu'))
model_1.add(Dense(units=15, activation='relu'))
model_1.add(Dense(units=1, activation='sigmoid'))

model_1.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])

# Fit the model_1
model_1.fit(X_train, y_train, epochs=20, batch_size=20)

#predictions
Esempio n. 10
0
def model_self_learning(java_matrix, parola_segno):
    import numpy as np
    import pandas as pd
    from csv import writer
    from pandas import read_csv
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense
    from tensorflow.keras.layers import Flatten
    from tensorflow.keras.layers import Dropout
    from tensorflow.python.keras.layers.convolutional import Conv1D
    from tensorflow.python.keras.layers.convolutional import MaxPooling1D
    from tensorflow.keras.models import Model
    from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
    from tensorflow.keras.utils import to_categorical
    from tensorflow.keras.models import model_from_json
    from os.path import dirname, join

    python_matrix = [[]]
    row = 0
    for x in java_matrix:
        if (row == 0):
            python_matrix = [x]
        else:
            python_matrix = python_matrix + [x]
        row = row + 1

    i = 0
    dataset_path = join(dirname(__file__), 'Dataset_segni.csv')
    with open(dataset_path, 'a+', newline='') as write_obj:
        csv_writer = writer(write_obj)
        for x in python_matrix:
            python_matrix[i] = python_matrix[i] + [parola_segno]
            csv_writer.writerow(python_matrix[i])
            i = i + 1
    write_obj.close()

    df = pd.read_csv(dataset_path)

    trad = df["Traduzione"]
    trad.head()

    gest = []
    gest = gest + [trad[0]]
    num = 0
    numTrad = 0
    check = False

    for x in trad:
        for i in gest:
            if i == x:
                check = True
        if check == False:
            if x != gest[num]:
                num = num + 1
                gest = gest + [trad[numTrad]]
        numTrad = numTrad + 1
        check = False

    i = 0
    for x in gest:
        df.loc[df["Traduzione"] == gest[i], "label"] = i
        i = i + 1

    y = df.iloc[:, -1]

    print(y)

    x = df.iloc[:, :-2]

    print(x)

    y = to_categorical(y)

    x = np.array(x[:])
    print(x)

    from sklearn.model_selection import train_test_split
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=120)

    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
    x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)

    batch_size = 64
    num_classes = len(gest)
    epochs = 10
    input_shape = (x_train.shape[1], 1)

    model = Sequential()
    model.add(
        Conv1D(filters=32,
               kernel_size=3,
               activation='linear',
               input_shape=input_shape))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling1D(pool_size=(2), padding='same'))
    model.add(
        Conv1D(filters=64, kernel_size=3, activation='linear', padding='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling1D(pool_size=(2), padding='same'))
    model.add(
        Conv1D(filters=128, kernel_size=3, activation='linear',
               padding='same'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(MaxPooling1D(pool_size=(2), padding='same'))
    model.add(Flatten())
    model.add(Dense(128, activation='linear'))
    model.add(LeakyReLU(alpha=0.1))
    model.add(Dense(num_classes, activation='softmax'))

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    # fit model
    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=10,
                        verbose=1,
                        validation_data=(x_test, y_test))

    model_path = join(dirname(__file__), 'model_cnn.h5')
    model.save(model_path)

    return python_matrix
def create_model(model_num, optimizer, dropout=False, metric=['accuracy']):
    if model_num == 1:
        model_1 = Sequential()

        #Convolutional Layers
        model_1.add(Reshape((62, 1), input_shape=(62, )))
        model_1.add(
            Conv1D(31,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_1.add(Flatten())
        model_1.add(Dense(units=15, activation='relu'))
        if dropout is not False:
            model_1.add(Dropout(dropout))
        model_1.add(Dense(units=1, activation='sigmoid'))

        model_1.compile(loss='binary_crossentropy',
                        optimizer=optimizer,
                        metrics=metric)

        return model_1

    elif model_num == 2:
        model_2 = Sequential()

        #Convolutional Layers
        model_2.add(Reshape((62, 1), input_shape=(62, )))
        model_2.add(
            Conv1D(60,
                   kernel_size=3,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_2.add(
            Conv1D(30,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_2.add(Flatten())
        model_2.add(Dense(units=15, activation='relu'))
        if dropout is not False:
            model_2.add(Dropout(dropout))
        model_2.add(Dense(units=1, activation='sigmoid'))

        model_2.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_2

    elif model_num == 3:
        model_3 = Sequential()

        #Convolutional Layers
        model_3.add(Reshape((62, 1), input_shape=(62, )))
        model_3.add(
            Conv1D(61,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_3.add(
            Conv1D(60,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_3.add(
            Conv1D(30,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_3.add(Flatten())
        model_3.add(Dense(units=15, activation='relu'))
        if dropout is not False:
            model_3.add(Dropout(dropout))
        model_3.add(Dense(units=1, activation='sigmoid'))

        model_3.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_3

    elif model_num == 4:
        model_4 = Sequential()

        #Convolutional Layers
        model_4.add(Reshape((62, 1), input_shape=(62, )))
        model_4.add(
            Conv1D(60,
                   kernel_size=3,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_4.add(
            Conv1D(30,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_4.add(Flatten())
        model_4.add(Dense(units=15, activation='relu'))
        if dropout is not False:
            model_4.add(Dropout(dropout))
        model_4.add(Dense(units=10, activation='relu'))
        if dropout is not False:
            model_4.add(Dropout(dropout))
        model_4.add(Dense(units=5, activation='relu'))
        if dropout is not False:
            model_4.add(Dropout(dropout))
        model_4.add(Dense(units=1, activation='sigmoid'))

        model_4.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_4

    elif model_num == 5:
        model_5 = Sequential()

        #Convolutional Layers
        model_5.add(Reshape((62, 1), input_shape=(62, )))
        model_5.add(
            Conv1D(31,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))
        model_5.add(
            Conv1D(30,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_5.add(Flatten())
        model_5.add(Dense(units=15, activation='relu'))
        if dropout is not False:
            model_5.add(Dropout(dropout))
        model_5.add(Dense(units=1, activation='sigmoid'))

        model_5.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_5

    elif model_num == 6:
        model_6 = Sequential()

        #Convolutional Layers
        model_6.add(Reshape((62, 1), input_shape=(62, )))
        model_6.add(
            Conv1D(61,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_6.add(
            Conv1D(60,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_6.add(
            Conv1D(30,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))
        model_6.add(
            Conv1D(10,
                   kernel_size=3,
                   strides=3,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_6.add(Flatten())
        model_6.add(Dense(units=15, activation='relu'))
        if dropout is not False:
            model_6.add(Dropout(dropout))
        model_6.add(Dense(units=1, activation='sigmoid'))

        model_6.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_6

    elif model_num == 7:
        model_7 = Sequential()

        #Convolutional Layers
        model_7.add(Reshape((62, 1), input_shape=(62, )))
        model_7.add(
            Conv1D(61,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_7.add(
            Conv1D(60,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_7.add(
            Conv1D(60,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_7.add(
            Conv1D(20,
                   kernel_size=3,
                   strides=3,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_7.add(Flatten())
        model_7.add(Dense(units=30, activation='relu'))
        if dropout is not False:
            model_7.add(Dropout(dropout))
        model_7.add(Dense(units=15, activation='relu'))
        if dropout is not False:
            model_7.add(Dropout(dropout))
        model_7.add(Dense(units=1, activation='sigmoid'))

        model_7.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_7

    elif model_num == 8:
        model_8 = Sequential()

        #Convolutional Layers
        model_8.add(Reshape((62, 1), input_shape=(62, )))
        model_8.add(
            Conv1D(61,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_8.add(
            Conv1D(60,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_8.add(
            Conv1D(59,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_8.add(
            Conv1D(40,
                   kernel_size=20,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_8.add(
            Conv1D(20,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_8.add(Flatten())
        model_8.add(Dense(units=30, activation='relu'))
        if dropout is not False:
            model_8.add(Dropout(dropout))
        model_8.add(Dense(units=1, activation='sigmoid'))

        model_8.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_8

    elif model_num == 9:
        model_9 = Sequential()

        #Convolutional Layers
        model_9.add(Reshape((62, 1), input_shape=(62, )))
        model_9.add(
            Conv1D(61,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_9.add(
            Conv1D(60,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_9.add(
            Conv1D(59,
                   kernel_size=2,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_9.add(
            Conv1D(40,
                   kernel_size=20,
                   strides=1,
                   padding="same",
                   activation='relu'))
        model_9.add(
            Conv1D(20,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_9.add(Flatten())
        model_9.add(Dense(units=30, activation='relu'))
        if dropout is not False:
            model_9.add(Dropout(dropout))
        model_9.add(Dense(units=1, activation='sigmoid'))

        model_9.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

        return model_9

    elif model_num == 10:
        model_10 = Sequential()

        #Convolutional Layers
        model_10.add(Reshape((62, 1), input_shape=(62, )))
        model_10.add(
            Conv1D(31,
                   kernel_size=2,
                   strides=2,
                   padding="same",
                   activation='relu'))
        model_10.add(
            Conv1D(20,
                   kernel_size=11,
                   strides=1,
                   padding="same",
                   activation='relu'))

        #Dense Layers
        model_10.add(Flatten())
        model_10.add(Dense(units=30, activation='relu'))
        if dropout is not False:
            model_10.add(Dropout(dropout))
        model_10.add(Dense(units=1, activation='sigmoid'))

        model_10.compile(loss='binary_crossentropy',
                         optimizer='adam',
                         metrics=['accuracy'])

        return model_10

    else:
        print("Wrong model num : architecture not defined")
#0.0    627
#1.0    523
pd.value_counts(y_test)
#0.0    360
#1.0    260
############################################################################
###Modelling
os.chdir(model_path)
os.getcwd()

##CNN 1
model_1 = Sequential()

#Convolutional Layers
model_1.add(Reshape((62, 1), input_shape=(62,)))
model_1.add(Conv1D(31, kernel_size=2, strides=2, padding="same", activation = 'relu'))

#Dense Layers
model_1.add(Flatten())
model_1.add(Dense(units=15, activation='relu'))
model_1.add(Dense(units=1, activation='sigmoid'))

model_1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

#Fit the model
model_1.fit(X_train, y_train, epochs=20, batch_size=20)

#Report Info
model_name = "CNN"
defined_params = ""
file_name = "CNN_1"
Esempio n. 13
0
def main():
    print("Loading samples and labels")
    samples, labels, _ = load_files("data")
    print("Loaded {} samples".format(samples.shape[0]))

    sequence_dim = 100
    print("Converting to sequences of length {}".format(sequence_dim))
    samples, labels = make_sequences(samples, labels, sequence_dim)

    print("Number of samples from sequences: {}".format(samples.shape[0]))

    lb = LabelBinarizer()
    labels = lb.fit_transform(labels)

    # flattened samples for Decision Tree
    flatSamples = samples.reshape(samples.shape[0], -1)  #tree!
    (trainSamples, testSamples, trainLabels,
     testLabels) = train_test_split(flatSamples,
                                    labels,
                                    test_size=0.25,
                                    random_state=42)

    print("=" * 20)
    print("Building DecisionTree model")
    model = DecisionTreeClassifier()
    model.fit(trainSamples, trainLabels)
    treeResults = model.predict(testSamples)
    print(
        confusion_matrix(testLabels.argmax(axis=1),
                         treeResults.argmax(axis=1)))
    print(
        classification_report(testLabels.argmax(axis=1),
                              treeResults.argmax(axis=1)))
    treeAcc = accuracy_score(testLabels.argmax(axis=1),
                             treeResults.argmax(axis=1))
    print("Accuracy Tree: {:.2f}".format(treeAcc))
    print("Cohen's Kappa {:.2f}".format(
        cohen_kappa_score(testLabels.argmax(axis=1),
                          treeResults.argmax(axis=1))))

    print("=" * 20)
    print("Building CNN model")

    (trainSamples, testSamples, trainLabels,
     testLabels) = train_test_split(samples,
                                    labels,
                                    test_size=0.25,
                                    random_state=42)
    inputShape = (samples.shape[1], samples.shape[2])
    model = Sequential()
    model.add(Conv1D(32, 10, padding="same", input_shape=inputShape))
    model.add(Activation("relu"))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D(64, 10, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Conv1D(128, 10, padding="same"))
    model.add(Activation("relu"))
    model.add(Dropout(0.2))
    model.add(Flatten(input_shape=inputShape))
    model.add(Dense(128, activation='sigmoid'))
    model.add(Dense(64, activation='sigmoid'))
    model.add(Dense(labels.shape[1], activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer="adam",
                  metrics=['accuracy'])

    EPOCHS = 10
    BATCH = 128
    model.fit(trainSamples,
              trainLabels,
              batch_size=BATCH,
              epochs=EPOCHS,
              validation_data=(testSamples, testLabels))

    cnnResults = model.predict(testSamples)

    print(
        confusion_matrix(testLabels.argmax(axis=1), cnnResults.argmax(axis=1)))
    print(
        classification_report(testLabels.argmax(axis=1),
                              cnnResults.argmax(axis=1),
                              target_names=lb.classes_))
    print("CNN Accuracy: {:.2f}".format(
        accuracy_score(testLabels.argmax(axis=1), cnnResults.argmax(axis=1))))
    print("Cohen's Kappa {:.2f}".format(
        cohen_kappa_score(testLabels.argmax(axis=1),
                          cnnResults.argmax(axis=1))))
    input("")
Esempio n. 14
0
def evaluate_model(trainX, trainy, testX, testy, testy_norm):
    """
    Create, fit and evaluate a model
    :param trainX: (array)
    :param trainy: (array)
    :param testX: (array)
    :param testy: (array)
    :param testy_norm: (array)
    :return:
        accurancy (float)
        loss (float)
    """
    verbose, epochs, batch_size = 1, 60, 16  # 16
    trainX, testX = scale_data(trainX, testX)
    #    trainX, testX = Magnitude(trainX,testX)
    #    trainX, testX = AutoCorallation(trainX, testX)
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    print(testX.shape)
    print(testy.shape)
    model = Sequential()

    # Small structure
    model.add(
        Conv1D(32,
               5,
               activation='relu',
               padding='same',
               input_shape=(n_timesteps, n_features)))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(64, 5, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(128, 5, activation='relu', padding='same'))
    model.add(SpatialDropout1D(0.5))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    model.summary()
    plot_model(model, 'model_info.png', show_shapes=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    tensorboard = TensorBoard(log_dir="logs_3xconv/{}".format(time()),
                              histogram_freq=1,
                              write_images=True)
    history = model.fit(trainX,
                        trainy,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=verbose,
                        validation_split=0.15,
                        shuffle=True,
                        callbacks=[tensorboard])
    # evaluate model
    loss, accuracy = model.evaluate(testX,
                                    testy,
                                    batch_size=batch_size,
                                    verbose=0)
    export_model(model)
    predictions = model.predict_classes(testX)
    print(metrics.classification_report(testy_norm, predictions))
    confusion_matrix = metrics.confusion_matrix(y_true=testy_norm,
                                                y_pred=predictions)
    print(confusion_matrix)
    normalised_confusion_matrix = np.array(
        confusion_matrix, dtype=np.float32) / np.sum(confusion_matrix) * 100
    print("")
    print("Confusion matrix (normalised to % of total test data):")
    print(normalised_confusion_matrix)

    width = 12
    height = 12
    # fig, ax = plt.subplots()
    plt.figure(figsize=(width, height))
    plt.imshow(normalised_confusion_matrix,
               interpolation='nearest',
               cmap=plt.cm.rainbow)
    plt.title("Confusion matrix \n(normalized to the entire test set [%])")
    plt.colorbar()
    tick_marks = np.arange(2)
    LABELS = ["Dynamic", "Static"]
    plt.xticks(tick_marks, LABELS, rotation=90)
    plt.yticks(tick_marks, LABELS)
    plt.tight_layout()
    plt.ylabel('Real value')
    plt.xlabel('Prediction value')

    plt.figure()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Training', 'Validation'], loc='upper left')
    plt.figure()
    plt.plot(history.history['accuracy'])
    plt.plot(history.history['val_accuracy'])
    plt.title('Model accurancy')
    plt.ylabel('Accurancy')
    plt.xlabel('Epoch')
    plt.legend(['Training', 'Validation'], loc='upper left')
    plt.show()
    return accuracy, loss
Esempio n. 15
0
#0.0    171
pd.value_counts(y_test)
#1.0    152
#0.0     88
############################################################################
###Modelling
os.chdir(model_path)
os.getcwd()

##CNN 1
model_1 = Sequential()

#Convolutional Layers
model_1.add(Reshape((62, 1), input_shape=(62, )))
model_1.add(
    Conv1D(31, kernel_size=2, strides=2, padding="same", activation='relu'))

#Dense Layers
model_1.add(Flatten())
model_1.add(Dense(units=15, activation='relu'))
model_1.add(Dense(units=1, activation='sigmoid'))

model_1.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])

#Fit the model
model_1.fit(X_train, y_train, epochs=20, batch_size=20)

#Report Info
model_name = "CNN"
Esempio n. 16
0
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.layers.embeddings import Embedding
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.keras.layers.convolutional import Conv1D
from tensorflow.python.keras.layers.convolutional import MaxPooling1D

top_words = 3000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)

max_words = 300
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)

model = Sequential()
model.add(Embedding(top_words, 32, input_length=max_words))
model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu'))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(250, activation='relu'))
model.add(Dropout(0.001, seed=0))
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train,
          y_train,
          validation_data=(X_test, y_test),
          epochs=5,
          batch_size=128,