예제 #1
0
train_img_feature =  np.array(train_img_feature)[:, :, :, 0].astype(np.float32)
test_img_feature =  np.array(test_img_feature)[:, :, :, 0].astype(np.float32)
train_raw_feature = np.array(train_raw_feature)[:,:]
test_raw_feature = np.array(test_raw_feature)[:,:]

train_raw_feature = np.reshape(train_raw_feature, train_raw_feature.shape+(1,))
test_raw_feature = np.reshape(test_raw_feature, test_raw_feature.shape+(1,))


image_input_shape = train_img_feature[0].shape
value_input_shape = train_value_feature[0].shape
raw_input_shape = train_raw_feature[0].shape

raw_input = Input(shape=raw_input_shape)
raw_stack = Conv1D(filters=20, kernel_size=5, name="convolution0", padding='same', activation='relu')(raw_input)
raw_stack = MaxPooling1D(pool_size=2, name="maxpooling0")(raw_stack)
raw_stack = Conv1D(filters=40, kernel_size=5, name="convolution1", padding='same', activation='relu')(raw_input)
raw_stack = MaxPooling1D(pool_size=2, name="maxpooling1")(raw_stack)

raw_stack = Flatten()(raw_stack)

value_input = Input(shape=value_input_shape)
value_stack = Dense(20, activation='relu', name="dense0")(value_input)
value_stack = Dense(40, activation='relu', name="dense1")(value_stack)
value_stack = Dense(80, activation='relu', name="dense2")(value_stack)
value_stack = Dense(160, activation='relu', name="dense3")(value_stack)

merged = concatenate([raw_stack, value_stack])
merged = Dropout(0.5)(merged)
merged = Dense(10, activation='softmax', name="output")(merged)
data_slice = data[0:1][:][:]
print(data.shape)
print(data_slice.shape)



window_length = data.shape[1]


#TODO: Normalize Data

#Encoder
input_window = Input(shape=(window_length,3))
x = Conv1D(16, 3, activation="relu", padding="same")(input_window) # Full Dimension
x = BatchNormalization()(x)
x = MaxPooling1D(3, padding="same")(x)
x = Conv1D(1, 3, activation="relu", padding="same")(x)
x = BatchNormalization()(x)
encoded = MaxPooling1D(2, padding="same")(x) # 3 dims... I'm not super convinced this is actually 3 dimensions

encoder = Model(input_window, encoded)

# 3 dimensions in the encoded layer

x = Conv1D(1, 3, activation="relu", padding="same")(encoded) # Latent space
x = BatchNormalization()(x)
x = UpSampling1D(2)(x) # 6 dims
x = Conv1D(16, 3, activation='relu', padding='same')(x) # 5 dims
x = BatchNormalization()(x)
x = UpSampling1D(3)(x) # 10 dims
decoded = Conv1D(3, 3, activation='sigmoid', padding='same')(x) # 10 dims
# embedding matrix with pretrained GloVe representations
with open('./embedding_matrix.p', 'rb') as file:
    embedding_matrix = pickle.load(file)

pretrained_embedding_layer = Embedding(input_dim=input_dim,
                                       output_dim=embeddings_dim,
                                       weights=[embedding_matrix],
                                       input_length=dict_to_export['max_len'],
                                       embeddings_initializer=None,
                                       trainable=False)

model = Sequential()
model.add(pretrained_embedding_layer)
model.add(Conv1D(filters=64, kernel_size=5, padding='same', activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(filters=32, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Flatten())
model.add(Dense(units=5, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam',\
                metrics=['acc', 'mse'])
model.summary()

# prepare a path for the best model
file_name_w_ext = os.path.basename(sys.argv[0])
file_name = file_name_w_ext.split('.')[0]
save_name_path = './saved_models/' + file_name + '.h5'

history_name_path = './histories/' + file_name + '_history.p'
data = series_to_supervised(train, n_input)
data
#%%
train_x, train_y = data[:, :-1], data[:, -1]
train_x
#%%
train_x = train_x.reshape((train_x.shape[0], n_seq, n_steps, 1))
train_x
#%%
# define model
model = Sequential()
model.add(TimeDistributed(Conv1D(filters=n_filters, kernel_size=n_kernel,
    activation='relu', input_shape=(None,n_steps,1))))
model.add(TimeDistributed(Conv1D(filters=n_filters, kernel_size=n_kernel,
    activation='relu')))
model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(n_nodes, activation='relu'))
model.add(Dense(n_nodes, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
# fit
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0)
#%%
# Para un round:
history = [x for x in train]
predictions = list()
# step over each time-step in the test set
for i in range(len(test)):
    # fit model and make forecast for history
    yhat = model_predict(model, history, config)
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
from tensorflow.keras.optimizers import Adam

model = Sequential()
model.add(
    Conv1D(20,
           4,
           strides=2,
           activation='relu',
           input_shape=(num_data_points, 1)))
model.add(BatchNormalization())
model.add(Conv1D(20, 4, strides=2, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling1D(2))
model.add(Conv1D(40, 4, strides=2, activation='relu'))
model.add(BatchNormalization())
model.add(Conv1D(40, 4, strides=2, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling1D(2))
model.add(Conv1D(80, 4, strides=2, activation='relu'))
model.add(BatchNormalization())
model.add(Conv1D(80, 4, strides=2, activation='relu'))
model.add(BatchNormalization())
model.add(GlobalAveragePooling1D())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(BatchNormalization())
예제 #6
0
def train_cnn(features=None,labels=None):
    print('Training CNN')
    channels = len(features)
    frequencies = len(features[0][0])
    reshape = (-1, channels, frequencies)

    print("split features into training and testing datasets")
    
    # Defining training and test data
    indices = np.arange(len(labels))
    if len(indices)%2 != 0:
        indices = indices[0:-1]
    np.random.shuffle(indices)
    train_inds,test_inds = np.split(indices,2)
    traindata,categories = get_data(features=features[:,train_inds,:],LABELS=labels[test_inds])
    
    # Getting training and test data
    train_X = []
    train_y = []
    testdata, categories = get_data(features=features[:,test_inds,:],LABELS=labels[test_inds])
    test_X = []
    test_y = []
    for X, y in traindata:
        train_X.append(X)
        train_y.append(y)
    for X, y in testdata:
        test_X.append(X)
        test_y.append(y)

    print(len(train_X))
    print(len(test_X))


    print(np.array(train_X).shape)
    train_X = np.array(train_X).reshape(reshape)
    test_X = np.array(test_X).reshape(reshape)

    train_y = np.array(train_y)
    test_y = np.array(test_y)

    model = Sequential()

    model.add(Conv1D(64, (3), input_shape=train_X.shape[1:]))
    model.add(Activation('relu'))

    model.add(Conv1D(64, (2)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=(2)))

    model.add(Conv1D(64, (2)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=(2), padding='same'))

    model.add(Flatten())

    model.add(Dense(512))

    model.add(Dense(3))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])

    epochs = 10
    batch_size = 32
    for epoch in range(epochs):
        model.fit(train_X, train_y, batch_size=batch_size, epochs=1, validation_data=(test_X, test_y))
        score = model.evaluate(test_X, test_y, batch_size=batch_size)
    MODEL_NAME = f"models/{round(score[1]*100,2)}-{epoch}epoch-{int(time.time())}-loss-{round(score[0],2)}.model"
    model.save(MODEL_NAME)
    print("saved:")
    print(MODEL_NAME)

    training_params = {}
    training_params['categories'] = categories
    training_params['train_inds'] = train_inds
    training_params['train_inds'] = test_inds

    return model, training_params
예제 #7
0
def prep_classifier():

    model = Sequential()

    model.add(Reshape((16, 16)))
    model.add(Bidirectional(LSTM(16, return_sequences=True)))
    model.add(Bidirectional(LSTM(16, return_sequences=True), merge_mode="ave"))

    model.add(Reshape((256, 1)))

    model.add(Conv1D(16, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Conv1D(32, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Conv1D(64, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Conv1D(128, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=batch_norm))
    model.add(Activation("relu"))
    model.add(MaxPooling1D())
    model.add(Dropout(dropout))

    model.add(Flatten())
    qdata = Sequential()
    qdata.add(Embedding(164, 64, input_length=3))
    qdata.add(Flatten())

    join = Sequential()
    join.add(Concatenate())

    ###PRUEBA###########################################
    join.add(Dense(1024))
    join.add(BatchNormalization(momentum=batch_norm))
    join.add(Activation("relu"))
    ###PRUEBA###########################################

    join.add(Dense(512))
    join.add(BatchNormalization(momentum=batch_norm))
    join.add(Activation("relu"))
    join.add(Dense(64))
    join.add(BatchNormalization(momentum=batch_norm))
    join.add(Activation("relu"))

    join.add(Dense(9, activation="softmax"))

    signal = Input(shape=(256, 1))
    qualdata = Input(shape=(3, ))
    feat_signal = model(signal)
    feat_qdata = qdata(qualdata)

    out = join([feat_signal, feat_qdata])

    classifier = Model([signal, qualdata], out)

    return classifier
예제 #8
0
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))


# In[ ]:

input_cnn = Input(shape=(MAX_SEQUENCE_LENGTH, ), name='input_cnn')
input_tfidf = Input(shape=(len(tfidf_train[0]), ), name='input_tfidf')
input_em = Input(shape=(len(em_values_train[0]), ), name='input_em')
input_lda = Input(shape=(len(lda_values_train[0]), ), name='input_lda')
input_other = Input(shape=(len(other_values_train[0]), ), name='input_other')

#cnn
x = Embedding(29531, 300, input_length=250, trainable=True,
              name='300')(input_cnn)
x = Conv1D(64, 5, padding='same')(x)
x = MaxPooling1D(pool_size=(20), strides=(10))(x)
x = Conv1D(64, 5, padding='same', name='64_1D')(x)
x = MaxPooling1D(pool_size=(20), strides=(10), name='20')(x)
x = Flatten(name='flatten')(x)
x = Model(inputs=input_cnn, outputs=x)  #64

#tfidf 5000
y = Dense(1024, activation='relu', name='1024')(input_tfidf)
#y = Dropout(0.1)(y)
y = Dense(256, activation='relu')(y)
#y = Dropout(0.1)(y)
y = Dense(64, activation='relu', name='64')(y)
#y = Dropout(0.1)(y)
y = Model(inputs=input_tfidf, outputs=y)  #64

#emotions 63
# # plt.imshow(x_train[0])
# plt.show()

x_test = x_test.reshape(-1, 784, 1)
x_train = x_train.reshape(-1, 784, 1)

from tensorflow.keras.utils import to_categorical

y_test = to_categorical(y_test)
y_train = to_categorical(y_train)

#2

model = Sequential()
model.add(Conv1D(64, 4, padding='same', strides=2, input_shape=(784, 1)))
model.add(MaxPooling1D(pool_size=5))
model.add(Dropout(0.4))
model.add(Conv1D(356, 4, padding='same', strides=2))
model.add(MaxPooling1D(pool_size=4))
model.add(Dropout(0.4))
model.add(Conv1D(356, 3))
model.add(MaxPooling1D(pool_size=3))
model.add(Dropout(0.4))
model.add(Conv1D(128, 3, padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(512))
model.add(Dropout(0.3))
model.add(Dense(10, activation='softmax'))
model.summary()
예제 #10
0
def build_model():

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    #3x1D Convolutional Hidden Layers with BatchNormalization and MaxPooling
    conv_layer1 = Convolution1D(64, 7, kernel_regularizer="l2",
                                padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    max_pool_2D_1 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer2 = Convolution1D(128, 7, padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    max_pool_2D_2 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer3 = Convolution1D(256,
                                7,
                                kernel_regularizer="l2",
                                padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    max_pool_2D_3 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    #concatenate convolutional layers
    conv_features = Concatenate(axis=-1)(
        [max_pool_2D_1, max_pool_2D_2, max_pool_2D_3])

    #Dense Fully-Connected DNN layers
    dense_1 = Dense(300, activation='relu')(conv_features)
    dense_1_dropout = Dropout(dense_dropout)(dense_1)
    dense_2 = Dense(100, activation='relu')(dense_1_dropout)
    dense_2_dropout = Dropout(dense_dropout)(dense_2)
    dense_3 = Dense(50, activation='relu')(dense_2_dropout)
    dense_3_dropout = Dropout(dense_dropout)(dense_3)
    dense_4 = Dense(16, activation='relu')(dense_3_dropout)
    dense_4_dropout = Dropout(dense_dropout)(dense_4)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(protein_features_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])
    #use Adam optimizer
    adam = Adam(lr=lr)

    #Adam is fast, but tends to over-fit
    #SGD is low but gives great results, sometimes RMSProp works best, SWA can easily improve quality, AdaTune

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])
    model.summary()

    #set earlyStopping and checkpoint callback
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  patience=5,
                                  verbose=1,
                                  mode='min')
    checkpoint_path = "/3x1DConv_dnn_" + str(datetime.date(
        datetime.now())) + ".h5"
    checkpointer = ModelCheckpoint(filepath=checkpoint_path,
                                   verbose=1,
                                   save_best_only=True,
                                   monitor='val_acc',
                                   mode='max')

    return model
예제 #11
0
    def trainModel(self,
                   x_train,
                   y_train,
                   validation_data=(None, None),
                   classes=[],
                   epochs=50,
                   batch_size=10,
                   verbose=0):
        disp = PrettyTable(['Training Breast Cancer CNN model.....'])

        x_test, y_test = validation_data
        self.epochs = epochs

        #X shape must be (num_of_rows,1,num_of_features)
        x_train = x_train.reshape(len(x_train), 1, len(x_train[0]))
        x_test = x_test.reshape(len(x_test), 1, len(x_test[0]))

        if (len(classes) == 0):
            print("No classes provided....")
            print("Exiting program....")
            exit()

        self.classes = classes

        #input shape = (1,num_of_feautures)
        inp_shape = (x_train.shape[1], x_train.shape[2])
        """Architecture 1"""
        self.model = Sequential()
        self.model.add(
            Conv1D(filters=64,
                   kernel_size=1,
                   activation='relu',
                   input_shape=inp_shape))
        self.model.add(Conv1D(filters=32, kernel_size=1, activation='relu'))
        self.model.add(MaxPooling1D(pool_size=1))
        self.model.add(Flatten())
        self.model.add(Dense(100, activation='relu'))
        self.model.add(Dense(len(self.classes), activation='sigmoid'))
        """Architecture 2 (more complex)"""
        # self.model = Sequential()
        # self.model.add(Conv1D(filters=128, kernel_size=1, activation='relu', input_shape=inp_shape))
        # self.model.add(BatchNormalization(axis=1))
        # self.model.add(MaxPooling1D(pool_size=1))
        # self.model.add(Conv1D(64,kernel_size=1, activation='relu'))
        # self.model.add(BatchNormalization(axis=1))
        # self.model.add(MaxPooling1D(pool_size=1))
        # self.model.add(Conv1D(32, kernel_size=1, activation='relu'))
        # self.model.add(MaxPooling1D(pool_size=1))
        # self.model.add(Flatten())
        # self.model.add(Dense(16, activation='relu'))
        # self.model.add(BatchNormalization())
        # self.model.add(Dense(len(self.classes), activation='sigmoid'))

        self.model.compile(optimizer="adam",
                           loss='sparse_categorical_crossentropy',
                           metrics=['accuracy'])

        disp.add_row(['Laying the pipeline for the model'])
        print("\n{}\n".format(disp))
        self.model.summary()
        print("\n\n")
        time.sleep(1)

        self.modelHistory = self.model.fit(x_train,
                                           y_train,
                                           epochs=epochs,
                                           batch_size=batch_size,
                                           validation_data=(x_test, y_test),
                                           verbose=verbose)

        path_to_model = model_name_2
        path = os.path.join(path_to_model, model_version)

        try:
            os.makedirs(path, exist_ok=True)
            tf.saved_model.save(self.model, path)

            print(
                "\n\n-----+-----+-----+-----+-----+-----+-----+------+------+-----+------+------"
            )
            print(
                "                         Saving trained Model version {}......"
                .format(model_version))
            print(
                "-----+-----+-----+-----+-----+-----+-----+------+------+-----+------+------"
            )
            print("Model saved in disc as \'saved_model.pb\' file in path: {}".
                  format(model_name_2 + "/" + model_version))
            print(
                "-----+-----+-----+-----+-----+-----+-----+------+------+-----+------+------\n"
            )
        except OSError as error:
            print("Path already exists")
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten

model = Sequential([
    Conv1D(filters=16,
           kernel_size=3,
           input_shape=(128, 64),
           kernel_initializer="random_uniform",
           bias_initializer="zeros",
           activation="relu"),
    MaxPooling1D(pool_size=4),
    Flatten(),
    Dense(units=64,
          activation="relu",
          kernel_initializer="he_uniform",
          bias_initializer="ones")
])

model.add(
    Dense(64,
          kernel_initializer=tf.keras.initializers.RandomNormal(mean=0.0,
                                                                stddev=0.05),
          bias_initializer=tf.keras.initializers.Constant(value=0.4),
          activation="relu"))

model.add(
    Dense(64,
          kernel_initializer=tf.keras.initializers.Orthogonal(gain=1.0,
예제 #13
0
c3 = Conv1D(512,1)(x)
x = FPAC_Layer(xyz = xyz128, cin = 128, cout = 512, m1=[3,9,1], m2=[65536,64,32], mr=[4096,64], mid=32, maxn=32, framepoints=framepoints3,numframe=num_framepoint,N=2048, l2=l2, dtype=dtype)(x)
x = tf.add(x,c3)
x = PointPooling(batch_sample_xyz=xyz128,sampling=xyz32, poolN=9)(x)
x = BatchNormalization()(x)

c4 = Conv1D(1024,1)(x)
x = FPAC_Layer(xyz = xyz32, cin = 512, cout = 1024, m1=[3,9,1], m2=[524288,64,32], mr=[16384,64], mid=32, maxn=32, framepoints=framepoints4,numframe=num_framepoint,N=2048, l2=l2, dtype=dtype)(x)
x = tf.add(x,c4)
x = PointPooling(batch_sample_xyz=xyz32,sampling=xyz8,poolN=8)(x)
x = BatchNormalization()(x)



x = MaxPooling1D(pool_size=8)(x)

x = Dense(512)(x)
x = Mish()(x)
x = BatchNormalization()(x)
x = Dropout(rate=0.3)(x)
x = Dense(256)(x)
x = Mish()(x)
x = BatchNormalization()(x)
x = Dropout(rate=0.3)(x)
x = Dense(128)(x)
x = Mish()(x)
x = BatchNormalization()(x)
x = Dropout(rate=0.3)(x)
x = Dense(40, activation = 'softmax')(x)
prediction = Flatten()(x)
    if isfile(join(test_dataset_path, f))
]
print(test_data_files)
test_X_data, test_Y_data, test_ID_user = load_data(test_data_files[:100])
test_X_data = test_X_data.reshape(test_X_data.shape[0],
                                  input_shape).astype("float32")
test_Y_data = test_Y_data.astype("float32")
test_Y_data = tensorflow.keras.utils.to_categorical(test_Y_data, num_classes)
print('nummm', num_classes)
"""Model architecture"""
model_m = Sequential()
model_m.add(Reshape((TIME_PERIODS, num_sensors), input_shape=(input_shape, )))
model_m.add(
    Conv1D(80, 10, activation='relu', input_shape=(TIME_PERIODS, num_sensors)))
model_m.add(Conv1D(100, 10, activation='relu'))
model_m.add(MaxPooling1D(3))
model_m.add(Conv1D(160, 10, activation='relu'))
model_m.add(Conv1D(180, 10, activation='relu'))
model_m.add(MaxPooling1D(3))
model_m.add(Conv1D(220, 10, activation='relu'))
model_m.add(Conv1D(240, 10, activation='relu'))
model_m.add(GlobalMaxPooling1D())
model_m.add(Dropout(0.5))
model_m.add(Dense(num_classes, activation='softmax'))

callbacks_list = [
    ModelCheckpoint(filepath=model_checkpoint_path +
                    '/best_model.{epoch:03d}-{val_loss:.2f}.h5',
                    monitor='val_loss',
                    save_best_only=True),
    TensorBoard(log_dir='logs\\{}'.format(time())),
def CNNResBlockModelComplex(config):
    def activation(activation_name, x):
        if activation_name == 'leaky_relu':
            return LeakyReLU(alpha=config.alpha)(x)
        else:
            return Activation(activation_name)(x)

    def highway_layer(value, gate_bias=-3):
        # https://towardsdatascience.com/review-highway-networks-gating-function-to-highway-image-classification-5a33833797b5
        nonlocal i_hidden  # to keep i_hidden "global" to all functions under CNNResBlockModel()
        dim = K.int_shape(value)[-1]
        # gate_bias_initializer = tensorflow.keras.initializers.Constant(gate_bias)
        # gate = Dense(units=dim, bias_initializer=gate_bias_initializer)(value)
        # gate = Activation("sigmoid")(gate)
        # TODO (just for yellow color...) NOTE: to keep dimensions matched, convolution gate instead of regular sigmoid
        # gate (T in paper)
        gate = Conv2D(size_list[i_hidden + config.CNN_ResBlock_conv_per_block -
                                1],
                      kernel_size=filt_list[-1],
                      padding='same',
                      activation='sigmoid',
                      bias_initializer=tensorflow.keras.initializers.Constant(
                          gate_bias))(value)
        # negated (C in paper)
        negated_gate = Lambda(lambda x: 1.0 - x,
                              output_shape=(size_list[-1], ))(gate)
        # use ResBlock as the Transformation
        transformed = ResBlock(x=value)
        transformed_gated = Multiply()([gate, transformed])
        # UpSample value if needed
        if value.shape.as_list()[-1] != negated_gate.shape.as_list()[-1]:
            r = negated_gate.shape.as_list()[-1] / value.shape.as_list()[-1]
            assert not (bool(r % 1))
            value = tf.keras.layers.UpSampling3D(size=(1, 1, int(r)))(value)
        identity_gated = Multiply()([negated_gate, value])
        value = Add()([transformed_gated, identity_gated])
        return value

    def ResBlock(x):
        for i in range(config.CNN_ResBlock_conv_per_block):
            nonlocal i_hidden  # to keep i_hidden "global" to all functions under CNNResBlockModel()
            lamda_cnn = 0.0 if config.use_l2_in_cnn is False else lamda
            x = Conv2D(size_list[i_hidden],
                       kernel_size=filt_list[i_hidden],
                       padding='same',
                       bias_regularizer=keras.regularizers.l2(lamda_cnn),
                       kernel_regularizer=keras.regularizers.l2(lamda_cnn))(x)
            x = activation(activation_name, x)
            x = BatchNormalization()(x)
            i_hidden = i_hidden + 1
        return x

    if config.with_iq_matrices is False:
        raise Exception(
            'This model support only operation for IQ representation')
    global background_implicit_inference
    # parameters
    lamda = config.Regularization_term
    p_dropout = config.dropout
    activation_name = config.activation
    filt_dim2_list = config.Filter_shape_dim1 if config.Filter_shape_symmetric else config.Filter_shape_dim2
    filt_list = [(x, y)
                 for x, y in zip(config.Filter_shape_dim1, filt_dim2_list)]
    pool_list = [
        (x, y) for x, y in zip(config.Pool_shape_dim1, config.Pool_shape_dim2)
    ]
    size_list = config.hidden_size
    dense_list = config.Dense_size
    input_shape = config.model_input_dim
    p_dropout_conv1d = config.CNN_ResBlock_dropout_conv1d
    p_dropout_after_all_conv2d = config.dropout_after_all_conv2d
    i_hidden = 0

    # Input Layer
    input_layer = Input(shape=input_shape)
    assert len(size_list) == len(filt_list)
    assert len(pool_list) == len(config.CNN_ResBlock_highway) == len(
        config.CNN_ResBlock_dropout)
    assert config.CNN_ResBlock_conv_per_block * len(
        config.CNN_ResBlock_highway) == len(size_list)
    assert len(config.Conv1D_size) == len(config.Conv1D_kernel)

    x = input_layer
    real_part = tf.expand_dims(x[:, :, :, 0], axis=-1)
    imag_part = tf.expand_dims(x[:, :, :, 1], axis=-1)

    real_part_output = Conv2D(size_list[0],
                              kernel_size=filt_list[0],
                              padding='same')(real_part)
    imag_part_output = Conv2D(size_list[0],
                              kernel_size=filt_list[0],
                              padding='same')(imag_part)

    real = tf.expand_dims(real_part_output, axis=-1)
    imag = tf.expand_dims(imag_part_output, axis=-1)
    filter_output = tf.concat([real, imag], axis=-1)
    x = complex_activation()(filter_output)
    # ResBlocks
    for i in range(len(config.CNN_ResBlock_highway)):
        if config.CNN_ResBlock_highway[i]:
            # True = use Highway
            x = highway_layer(value=x)
        else:
            # False = don't use Highway
            x = ResBlock(x=x)
        # MaxPool and Dropout
        if config.CNN_ResBlock_dropout[i] != 0:
            x = Dropout(rate=config.CNN_ResBlock_dropout[i])(x)
        x = MaxPooling2D(pool_size=pool_list[i])(x)
    # Flatten
    x = Flatten()(x)

    # Conv1D
    if len(config.Conv1D_size) != 0:
        x = tf.expand_dims(x, axis=-1)
    for i in range(len(config.Conv1D_size)):
        x = Conv1D(filters=config.Conv1D_size[i],
                   kernel_size=config.Conv1D_kernel[i])(x)
        x = activation(activation_name, x)
        x = BatchNormalization()(x)
        if p_dropout_conv1d[i] != 0.0:
            x = Dropout(rate=p_dropout_conv1d[1])(x)
    # post-Conv1D
    if len(config.Conv1D_size) != 0:
        x = MaxPooling1D(pool_size=config.Conv1D_pool)(x)
        # x = BatchNormalization()(x)
        x = Flatten()(x)

    # Dense
    for i in range(len(dense_list)):
        x = Dense(dense_list[i],
                  kernel_regularizer=keras.regularizers.l2(lamda))(x)
        x = activation(activation_name, x)
        if p_dropout_after_all_conv2d != 0 and len(config.Conv1D_size) == 0:
            x = Dropout(rate=p_dropout_after_all_conv2d)(x)
        x = BatchNormalization()(x)
    x = Dropout(rate=p_dropout)(x)
    # x = BatchNormalization()(x)
    if config.learn_background:
        x = Dense(3, activation='softmax')(x)
    else:
        x = Dense(1, activation='sigmoid')(x)
    output_layer = x
    model = Model(input_layer, output_layer)
    if config.learn_background:
        if config.background_implicit_inference:
            background_implicit_inference = True
        model = BlockBackgroundModel(input_layer, output_layer)
    # else:
    #     model = Model(input_layer, output_layer)
    # model.summary()
    return model
 kf = KFold(n_splits=10,shuffle = True,random_state = i+2)
 for train_index, test_index in kf.split(x):     
     x_train, x_test = x[train_index], x[test_index]
     y_train, y_test = y_1h[train_index], y_1h[test_index]
     
     scaler = StandardScaler()
     scaler.fit(x_train)
     x_train = scaler.transform(x_train)
     x_test = scaler.transform(x_test)
             
     x_train_LSTM = np.reshape(x_train, (x_train.shape[0],1,x.shape[1]))
     x_test_LSTM = np.reshape(x_test, (x_test.shape[0],1,x.shape[1]))
     
     model = Sequential()
     model.add(Conv1D(j, 3, padding='same',activation='relu',input_shape=(1,x_train_LSTM.shape[2])))
     model.add(MaxPooling1D(2, padding='same'))
     model.add(Conv1D(j, 3, padding='same', activation='relu'))
     model.add(MaxPooling1D(2, padding='same'))
     model.add(Flatten())
     model.add(Dense(64, activation='sigmoid'))
     model.add(Dropout(0.2))
     model.add(Dense(3, activation='softmax'))
     model.compile(loss = 'categorical_crossentropy', optimizer = "adam", metrics = ['accuracy'])
     
     train = model.fit(x_train_LSTM, y_train,    # Training
                   epochs=5, batch_size=1,
                   validation_data=(x_test_LSTM, y_test),verbose=0)
     
     test = model.evaluate(x_test_LSTM,y_test)   # Testing
     
     preds = model.predict(x_test_LSTM)
def cnn_model(FILTER_SIZES,
              MAX_NB_WORDS,
              MAX_DOC_LEN,
              NAME='cnn_base',
              EMBEDDING_DIM=200,
              NUM_FILTERS=64,
              PRETRAINED_WORD_VECTOR=None,
              trainable_switch=True,
              bert_embedding=True):
    model = None

    main_input = Input(shape=(MAX_DOC_LEN, ), dtype='int32', name='main_input')

    if (PRETRAINED_WORD_VECTOR is not None):
        embed_1 = Embedding(input_dim=MAX_NB_WORDS,
                            output_dim=EMBEDDING_DIM,
                            embeddings_initializer='uniform',
                            input_length=MAX_DOC_LEN,
                            name='pretrained_embedding_trainable',
                            weights=[PRETRAINED_WORD_VECTOR],
                            trainable=trainable_switch)(main_input)

    else:  # 默认trainable
        embed_1 = Embedding(input_dim=MAX_NB_WORDS,
                            output_dim=EMBEDDING_DIM,
                            embeddings_initializer='uniform',
                            input_length=MAX_DOC_LEN,
                            name='embedding_trainable',
                            trainable=True)(main_input)

        # 这个+1 留到外面做
        # embed_1 = Embedding(input_dim=MAX_NB_WORDS + 1, output_dim=EMBEDDING_DIM, embeddings_initializer='uniform',
        #                     input_length=MAX_DOC_LEN, name='embedding_trainable'
        #                     , trainable=True)(main_input)    # Convolution-pooling-flat block
    conv_blocks = []
    for f in FILTER_SIZES:  # For every filter
        conv = Conv1D(
            filters=NUM_FILTERS,
            kernel_size=f,
            name='conv_' + str(f) + '_gram',
            strides=1,
            activation='relu'
        )(
            embed_1
        )  # convolution  # filter-kernal extracting 64 features with ReLU activation function
        pool = MaxPooling1D(
            pool_size=MAX_DOC_LEN - f + 1, name='pool_' + str(f) + '_gram')(
                conv)  # maxpooling size = MAX_DOC_LEN - filter_size + 1
        flat = Flatten(name='flat_' + str(f) + '_gram')(
            pool)  # flatten filters extracting features (size*number = 3*64)
        conv_blocks.append(flat)

    if len(conv_blocks) > 1:
        z = Concatenate(name='concate')(
            conv_blocks
        )  # Concatenate的 input 是一个 list [flat_1, flat_2, flat_3]
    else:
        z = conv_blocks[0]

    #     pred = Dense(3, activation='softmax')(z)
    model = Model(inputs=main_input, outputs=z, name=NAME)

    return model
예제 #18
0
def model_fit(x_train, y_train, x_test, y_test, x_valid, numclasses,
              input_shape, saved_model_path):
    '''
    load data, compile and train CNN model, apply data shape trasformation for ANN inputs
    Parameters
    Input: 
        x_train, y_train - train data: qrs segments and labels
        y_test, y_test - test data: qrs segments and labels
        x_valid - validation data
        numclasses - the number of classes (labels)
        input_shape - the unput shape of the chosen ANN
    Output: 
        model - sequential model
        history - training history parameters
        x_valid - reshaped validation data
    '''

    x_train, x_test, x_valid = map(lambda x: get_transformed_input(x),
                                   [x_train, x_test, x_valid])

    epochs = 100

    model = Sequential()

    # Convolutional layers
    model.add(
        Convolution1D(100,
                      4,
                      1,
                      activation='tanh',
                      input_shape=input_shape,
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(MaxPooling1D(pool_size=2))
    model.add(
        Convolution1D(200,
                      2,
                      1,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(MaxPooling1D(pool_size=4))
    model.add(
        Convolution1D(300,
                      1,
                      1,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(0.001)))
    model.add(MaxPooling1D(pool_size=2))
    model.add(
        Convolution1D(400,
                      1,
                      1,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(0.001)))

    model.add(Flatten())
    model.add(Dropout(0.9))
    model.add(Dense(3000, activation='tanh'))
    model.add(Dense(numclasses, activation='softmax'))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    reduce_lr = tensorflow.keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                             factor=0.5,
                                                             patience=50,
                                                             min_lr=0.0001)
    callbacks = [
        ModelCheckpoint(filepath=saved_model_path,
                        monitor='categorical_crossentropy'), reduce_lr
    ]

    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        epochs=epochs,
                        verbose=1,
                        callbacks=callbacks)

    return model, history, x_valid
예제 #19
0
embedding_layer = Embedding(vocab_size + 1,
                            embedding_size,
                            input_length=input_size,
                            weights=[embedding_weights])

# Model Construction
# Input
inputs = Input(shape=(input_size,), name='input', dtype='int64')  # shape=(?, 1014)
# Embedding
x = embedding_layer(inputs)
# Conv
for filter_num, filter_size, pooling_size in conv_layers:
    x = Conv1D(filter_num, filter_size)(x)
    x = Activation('relu')(x)
    if pooling_size != -1:
        x = MaxPooling1D(pool_size=pooling_size)(x)  # Final shape=(None, 34, 256)
x = Flatten()(x)  # (None, 8704)
# Fully connected layers
for dense_size in fully_connected_layers:
    x = Dense(dense_size, activation='relu')(x)  # dense_size == 1024
    x = Dropout(dropout_p)(x)
# Output Layer
predictions = Dense(num_of_classes, activation='softmax')(x)
# Build model
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])  # Adam, categorical_crossentropy
model.summary()
import pdb
pdb.set_trace()
# # 1000 training samples and 100 testing samples
# indices = np.arange(train_data.shape[0])
예제 #20
0
train_labels=train_labels1
test_labels=test_labels1'''

#Building and compiling the model.
print("Building Model.")

tf.keras.backend.clear_session()
model = Sequential()
#conv1d layer 1
model.add(
    Conv1D(filters=64,
           kernel_size=1,
           activation='relu',
           input_shape=(num_features - 1, 1)))
#Maxpool1d layer 1
model.add(MaxPooling1D(pool_size=2, strides=None))
#dropout layer to fight with overfitting.
model.add(Dropout(0.25))
#conv1d layer 2
model.add(Conv1D(filters=64, kernel_size=1, activation='relu'))
#Maxpool1d layer 2
model.add(MaxPooling1D(pool_size=1, strides=None))
#Flatten layer to make data's dimension easily fit to dense layer.
model.add(tf.keras.layers.Flatten())
#first dense layer
model.add(Dense(128, activation='relu'))
#secon dense layer
model.add(Dense(9, activation='softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='rmsprop',
예제 #21
0
import tensorflow as tf
from tensorflow.keras.layers import Conv1D, MaxPooling1D
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras import Model

embedding_method = Embedding(max(lengths) + 1, 186, input_length=37427)
embedded_sequence = embedding_method(padded_input)
print('Embedding Layer Property:')
print(embedded_sequence)

Convlayer_1 = Conv1D(62, 5, activation='relu')(embedded_sequence)
print('Convolutional Layer 1 Property:')
print(Convlayer_1)
print('')
Maxpooling_1 = MaxPooling1D(5)(Convlayer_1)
print('Maxpooling Layer 1 Property:')
print(Maxpooling_1)
print('')
Convlayer_2 = Conv1D(124, 5, activation='relu')(Maxpooling_1)
print('Convolutional Layer 2 Property:')
print(Convlayer_2)
print('')
Maxpooling_2 = MaxPooling1D(5)(Convlayer_2)
print('Maxpooling Layer 2 Property:')
print(Maxpooling_2)
print('')
Convlayer_3 = Conv1D(186, 5, activation='relu')(Maxpooling_2)
print('Convolutional Layer 3 Property:')
print(Convlayer_3)
print('')
예제 #22
0
    def create_model(self):
        # different metric functions
        def coeff_determination(y_true, y_pred):
            SS_res = K.sum(K.square(y_true - y_pred))
            SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
            return (1 - SS_res / (SS_tot + K.epsilon()))

        def auroc(y_true, y_pred):
            return tf.py_func(roc_auc_score, (y_true, y_pred), tf.double)

        # building model
        prep = preprocess(self.fasta_file, self.readout_file)
        # if want mono-nucleotide sequences
        dict = prep.one_hot_encode()
        # if want dinucleotide sequences
        #dict = prep.dinucleotide_encode()

        readout = dict["readout"]
        fw_fasta = dict["forward"]
        rc_fasta = dict["reverse"]

        dim_num = fw_fasta.shape

        # To build this model with the functional API,
        # you would start by creating an input node:
        forward = keras.Input(shape=(dim_num[1], dim_num[2]), name='forward')
        reverse = keras.Input(shape=(dim_num[1], dim_num[2]), name='reverse')

        #first_layer = Conv1D(filters=self.filters, kernel_size=self.kernel_size, data_format='channels_last', input_shape=(dim_num[1],dim_num[2]), use_bias = False)
        ## with trainable = False
        #first_layer = Conv1D(filters=self.filters, kernel_size=self.kernel_size, kernel_initializer = my_init, data_format='channels_last', input_shape=(dim_num[1],dim_num[2]), use_bias = False, trainable=False)
        first_layer = ConvolutionLayer(filters=self.filters,
                                       kernel_size=self.kernel_size,
                                       data_format='channels_last',
                                       use_bias=True,
                                       alpha=self.alpha)

        fw = first_layer(forward)
        bw = first_layer(reverse)

        concat = concatenate([fw, bw], axis=1)

        pool_size_input = concat.shape[1]

        #concat = ReLU()(concat)
        #concat = Dense(1, activation= 'sigmoid')(concat)

        if self.pool_type == 'Max':
            pool_layer = MaxPooling1D(pool_size=pool_size_input)(concat)
        elif self.pool_type == 'Ave':
            pool_layer = AveragePooling1D(pool_size=pool_size_input)(concat)
        elif self.pool_type == 'custom':

            def out_shape(input_shape):
                shape = list(input_shape)
                print(input_shape)
                shape[0] = 10
                return tuple(shape)

            #model.add(Lambda(top_k, arguments={'k': 10}))
            def top_k(inputs, k):
                # tf.nn.top_k Finds values and indices of the k largest entries for the last dimension
                print(inputs.shape)
                inputs2 = tf.transpose(inputs, [0, 2, 1])
                new_vals = tf.nn.top_k(inputs2, k=k, sorted=True).values
                # transform back to (None, 10, 512)
                return tf.transpose(new_vals, [0, 2, 1])

            pool_layer = Lambda(top_k, arguments={'k': 2})(concat_relu)
            pool_layer = AveragePooling1D(pool_size=2)(pool_layer)
        elif self.pool_type == 'custom_sum':
            ## apply relu function before custom_sum functions
            def summed_up(inputs):
                #nonzero_vals = tf.keras.backend.relu(inputs)
                new_vals = tf.math.reduce_sum(inputs, axis=1, keepdims=True)
                return new_vals

            pool_layer = Lambda(summed_up)(concat_relu)
        else:
            raise NameError('Set the pooling layer name correctly')

        flat = Flatten()(pool_layer)

        after_flat = Dense(32)(flat)

        # Binary classification with 2 output neurons
        if self.regularizer == 'L_1':
            #outputs = Dense(1, kernel_initializer='normal', kernel_regularizer=regularizers.l1(0.001), activation= self.activation_type)(flat)
            ## trainable = False with learned bias

            #outputs = Dense(1, kernel_initializer='normal', kernel_regularizer=regularizers.l1(0.001), activation= self.activation_type)(after_flat)
            outputs = Dense(2,
                            kernel_initializer='normal',
                            kernel_regularizer=regularizers.l1(0.001),
                            activation='sigmoid')(after_flat)
        elif self.regularizer == 'L_2':
            #outputs = Dense(1, kernel_initializer='normal', kernel_regularizer=regularizers.l1(0.001), activation= self.activation_type)(flat)
            ## trainable = False with learned bias
            outputs = Dense(2,
                            kernel_initializer='normal',
                            kernel_regularizer=regularizers.l2(0.001),
                            activation=self.activation_type)(after_flat)
        else:
            raise NameError('Set the regularizer name correctly')

        #weight_forwardin_0=model.layers[0].get_weights()[0]
        #print(weight_forwardin_0)
        model = keras.Model(inputs=[forward, reverse], outputs=outputs)

        #print model summary
        model.summary()

        #model.compile(loss='mean_squared_error', optimizer='adam', metrics = ['accuracy'])
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        #model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', auroc])

        return model
예제 #23
0
def build_model():
    """
    Description:
        Building DCBLSTM model
    Args:
        None
    Returns:
        None

    """

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    ######## 3x1D-Convolutional Layers with BatchNormalization, Dropout and MaxPooling ########

    conv_layer1 = Conv1D(16, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_1 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer2 = Conv1D(32, 7, padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_2 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer3 = Conv1D(64, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_3 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    ##maybe try removing dropout after batchnorm - batchnorm acts as a form of regularisation thus reduces need for dropout
    ############################################################################################

    #concatenate convolutional layers
    conv_features = Concatenate(axis=-1)(
        [max_pool_1D_1, max_pool_1D_2, max_pool_1D_3])
    # conv_features = Concatenate(axis=-1)([conv1_dropout, conv2_dropout, conv3_dropout])

    #dense layer before LSTM's
    lstm_dense = Dense(600, activation='relu',
                       name="after_cnn_dense")(conv_features)

    ######## Recurrent Bi-Directional Long-Short-Term-Memory Layers ########
    lstm_f1 = Bidirectional(
        LSTM(200,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(lstm_dense)

    lstm_f2 = Bidirectional(
        LSTM(200,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(lstm_f1)

    ############################################################################################

    #concatenate LSTM with convolutional layers
    concat_features = Concatenate(axis=-1)([lstm_f1, lstm_f2, lstm_dense])
    concat_features = Dropout(0.4)(concat_features)

    #Dense Fully-Connected DNN layers
    after_lstm_dense = Dense(600, activation='relu')(concat_features)
    after_lstm_dense_dropout = Dropout(0.3)(after_lstm_dense)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(after_lstm_dense_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #use Adam optimizer
    adam = Adam(lr=0.00015)

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision(),
                      AUC()
                  ])

    #print model summary
    model.summary()

    return model
def evaluate_model(x_train,y_train,x_test,y_test,nnType,batch_size=1,epochs=60):
    backend.clear_session()
    n_timesteps, n_features, n_outputs = x_train.shape[0], x_train.shape[1], y_train.shape[1]
    nb_neurons_lstm=100
    if args.type=="cnn+lstm":
        
        # reshape data into time steps of sub-sequences
        #[batch, timesteps, feature].
        
        epochs=25
        n_steps, n_length = 4, 32
        
        
        # define model
        model = Sequential()
        model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'), input_shape=(None,n_length,n_features)))
        model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu')))
        model.add(TimeDistributed(Dropout(0.5)))
        model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
        model.add(TimeDistributed(Flatten()))
        model.add(LSTM(units=nb_neurons_lstm))
        model.add(Dropout(0.5))
        model.add(Dense(100, activation='relu'))
        model.add(Dense(n_outputs, activation='softmax'))
        #model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        x_train_reshape=x_train.reshape((x_train.shape[0], n_steps, n_length, n_features))
        x_test_reshape=x_train.reshape((x_test.shape[0], n_steps, n_length, n_features))
    
    elif args.type=="lstm":
        
        '''64 windows of data will be exposed to the model before the weights of the model are updated.'''
        
        nb_classes=y_train.shape[1]
        print('nb_classes',nb_classes)
        
        
        model = Sequential()
        model.add(LSTM(units=nb_neurons_lstm, return_sequences=True,input_shape=(1,n_features)))
        model.add(LSTM(units=nb_neurons_lstm, return_sequences=True))
        model.add(LSTM(units=nb_neurons_lstm))
        
        
        #model.add(LSTM(units=nb_neurons_lstm))
        '''This is followed by a dropout layer intended to reduce overfitting of the model to the training data.'''
        model.add(Dropout(0.5))
        '''Activation function is softmax for multi-class classification.'''
        #model.add(Dense(100, activation='relu'))
        model.add(Dense(units=n_outputs, activation='softmax'))
        model.summary()
        '''Because it is a multi-class classification problem, categorical_crossentropy is used as the loss function.  '''
        # reshape pour avoir un shape: (sample, timestamps, features)
        
        print('x_train-------------',x_train.shape[0])
        x_train_reshape = x_train.reshape(x_train.shape[0],1,x_train.shape[1])#60 features
        x_test_reshape = x_test.reshape(x_test.shape[0],1,x_test.shape[1])#60 features
        
    elif args.type=="convlstm":
        n_steps, n_length = 4, 32
        # define model
        model = Sequential()
        model.add(ConvLSTM2D(filters=64, kernel_size=(1,3), activation='relu', input_shape=(n_steps, 1, n_length, n_features)))
        model.add(Dropout(0.5))
        model.add(Flatten())
        model.add(Dense(100, activation='relu'))
        model.add(Dense(n_outputs, activation='softmax'))
        model.summary()

        # reshape into subsequences (samples, time steps, rows, cols, channels)
        x_train_reshape = x_train.reshape((x_train.shape[0], n_steps, 1, n_length, n_features))
        x_test_reshape = x_test.reshape((x_test.shape[0], n_steps, 1, n_length, n_features))

    filepath = 'modelcheckpoint_'+str(args.db) + '.hdf5'
    saveto = 'csvLogger_'+str(args.db) + '.csv'
    #optimizer = Adam(lr=lr, clipnorm=args.clip)
    #pred_dir = os.path.join(rootdir, str(case) + '_pred.txt')
    """spécifier d'utiliser une partie du train pour la validation des hyper-paramèteres"""
    percentage_of_train_as_validation = 0.3
    if args.train:
        early_stop = EarlyStopping(monitor='val_accuracy', patience=15, mode='auto',min_delta=0.0001)
        reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=5, mode='auto', cooldown=3., verbose=1)
        checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='auto')
        csv_logger = CSVLogger(saveto)
        callbacks_list = [csv_logger, checkpoint, early_stop, reduce_lr]
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        '''Here we do shuffle the windows of input data during training (the default). In this problem, we are interested in harnessing the LSTMs ability to learn and extract features across the time steps in a window, not across windows.'''
        
        history = model.fit(x_train_reshape, y_train, epochs=20,verbose=True, batch_size=10,validation_split=percentage_of_train_as_validation,callbacks=callbacks_list)
        #model.fit(train_x, train_y, validation_data=[valid_x, valid_y], epochs=args.epochs,batch_size=args.batch_size, callbacks=callbacks_list, verbose=2)
        
        
        epoch_loss_acc('accuracy',history)#loss or accuracy
        epoch_loss_acc('loss',history)#loss or accuracy
        
        loss,accuracy = model.evaluate(x_train_reshape,y_train,verbose=False)
        print("L'accuracy sur l'ensemble du train est:",accuracy)
        print("Le loss sur l'ensemble du train est:",loss)
    elif args.test:
        model.load_weights(filepath)
        #model = keras.models.load_model('best-model.h5')
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        # evaluate model
        loss,accuracy = model.evaluate(x_test_reshape, y_test, batch_size=batch_size, verbose=False)
        print("L'accuracy sur l'ensemble du test est:",accuracy)
        print("Le loss sur l'ensemble du train est:",loss)
        
        #scores = get_activation(model, test_x, test_y, pred_dir, VA=10, par=9)
        #results.append(round(scores, 2))
    return accuracy
예제 #25
0
n_timesteps = 50
n_features = 2
n_outputs = 2
import json
import numpy as np

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Input, Conv1D, Dropout, MaxPooling1D, Flatten
from tensorflow.keras.models import Model, Sequential

model = Sequential()
model.add(
    Conv1D(filters=16,
           kernel_size=3,
           activation='relu',
           input_shape=(n_timesteps, n_features)))
model.add(Conv1D(filters=32, kernel_size=3, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
예제 #26
0
import os

os.environ["PATH"] += os.pathsep + 'C:\\Program Files\\Graphviz\\bin'

# GLOVE VECTORS
word_vector_matrix = produce_glove_vector_matrix(clf_config.EMBEDDING_DIM,
                                                 data.vocab_size, data.token)

# MODEL 1
input_1 = Input(shape=(data.max_length, ))
embedding_layer = Embedding(data.vocab_size,
                            clf_config.EMBEDDING_DIM,
                            weights=[word_vector_matrix])(input_1)
conv1d_layer = Conv1D(filters=64, kernel_size=10,
                      activation="relu")(embedding_layer)
max_pooling_layer = MaxPooling1D()(conv1d_layer)
flatten_layer = Flatten()(max_pooling_layer)
dense_text_layer = Dense(32, activation="sigmoid")(flatten_layer)

# MODEL 2
input_2 = Input(shape=(data.user_attributes_count, ))
dense_layer_1 = Dense(128, activation='sigmoid')(input_2)
dense_layer_2 = Dense(128, activation='sigmoid')(dense_layer_1)
dense_layer_3 = Dense(128, activation='sigmoid')(dense_layer_2)
dense_user_layer = Dense(128, activation='sigmoid')(dense_layer_3)

# concatenate the two outputs
concat_layer = Concatenate()([dense_text_layer, dense_user_layer])

# construct output layer
concatenate_dense_layer_1 = Dense(128, activation="sigmoid")(concat_layer)
예제 #27
0
            img_line, mask_line = line_generator.get_line()
            batch_input += [img_line]
            batch_output += [mask_line]
        batch_input = np.array(batch_input)
        batch_output = np.array(batch_output)
        yield batch_input, batch_output


# constructing the model 
vertical_input = Input(shape=(224, 3), name='ver_input')
line_feature_layers_vertical = [
    vertical_input,
    Conv1D(filters=32, kernel_size=3, padding='same', input_shape=(224, 3)),
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(alpha=0.3),
    MaxPooling1D(pool_size=2),
    Conv1D(filters=64, kernel_size=3, padding='same', input_shape=(112, 32)),
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(alpha=0.3),
    MaxPooling1D(pool_size=2),
    Conv1D(filters=128, kernel_size=3, padding='same', input_shape=(56, 64)),
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(alpha=0.3),
    MaxPooling1D(pool_size=2),
    Conv1D(filters=256, kernel_size=3, padding='same', input_shape=(28, 128)),
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(alpha=0.3),
    MaxPooling1D(pool_size=2),
    Conv1D(filters=512, kernel_size=3, padding='same', input_shape=(14, 256)),
    BatchNormalization(),
    tf.keras.layers.LeakyReLU(alpha=0.3),
예제 #28
0
    def __init__(self, num_actions):
        super().__init__('mlp_policy')
        embeddings = []
        embeddings_shape = []

        # Generations
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        generations_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                          embedding_size,
                                          input_length=1)
        embeddings.append(generations_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Game Types
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        gametypes_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                        embedding_size,
                                        input_length=1)
        embeddings.append(gametypes_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        #Tiers
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        tiers_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                    embedding_size,
                                    input_length=1)
        embeddings.append(tiers_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Weather
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        weather_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                      embedding_size,
                                      input_length=1)
        embeddings.append(weather_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Terrain
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        terrain_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                      embedding_size,
                                      input_length=1)
        embeddings.append(terrain_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Room
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        room_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                   embedding_size,
                                   input_length=1)
        embeddings.append(room_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Effective p1 a
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        effective_p1_a_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(effective_p1_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Effective p2 a
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        effective_p2_a_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(effective_p2_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # p1 Pending Attacks A
        embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
        seen_attacks_a_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(seen_attacks_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # p2 Pending Attacks A
        embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
        seen_attacks_a_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(seen_attacks_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # for each pokemon for player and agent
        for i in range(6):

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            player_pokemon_name_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                      embedding_size,
                                                      input_length=1)
            embeddings.append(player_pokemon_name_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            player_pokemon_status_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_status_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            player_pokemon_first_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_first_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            player_pokemon_second_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_second_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            player_pokemon_abilities_embedding = Embedding(
                POKEMON_MEDIUM_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_abilities_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            player_pokemon_items_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                       embedding_size,
                                                       input_length=1)
            embeddings.append(player_pokemon_items_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            player_pokemon_genders_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_genders_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            # 4 attack slots
            for j in range(4):
                embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
                player_attack_slot_1_embedding = Embedding(
                    POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(player_attack_slot_1_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
                player_attack_slot_1_element_embedding = Embedding(
                    POKEMON_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(player_attack_slot_1_element_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = 1
                player_attack_slot_1_category_embedding = Embedding(
                    POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                    embedding_size,
                    input_length=1)
                embeddings.append(player_attack_slot_1_category_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

        # for each pokemon for player and agent
        for i in range(6):

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            agent_pokemon_name_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                     embedding_size,
                                                     input_length=1)
            embeddings.append(agent_pokemon_name_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            agent_pokemon_status_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_status_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            agent_pokemon_first_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_first_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            agent_pokemon_second_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_second_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            agent_pokemon_abilities_embedding = Embedding(
                POKEMON_MEDIUM_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_abilities_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            agent_pokemon_items_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                      embedding_size,
                                                      input_length=1)
            embeddings.append(agent_pokemon_items_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            agent_pokemon_genders_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_genders_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            # 4 attack slots
            for j in range(4):
                embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
                agent_attack_slot_1_embedding = Embedding(
                    POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(agent_attack_slot_1_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
                agent_attack_slot_1_element_embedding = Embedding(
                    POKEMON_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(agent_attack_slot_1_element_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = 1
                agent_attack_slot_1_category_embedding = Embedding(
                    POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                    embedding_size,
                    input_length=1)
                embeddings.append(agent_attack_slot_1_category_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

        merged = Concatenate(axis=-1)  #(embeddings)

        self.conv1_1 = Conv1D(256, 10, activation='relu')
        #    conv1 = Conv1D(100, 10, activation='relu', batch_input_shape=(None, ob_space.shape[1]))(field_inputs_)
        self.conv1_2 = Conv1D(256, 10, activation='relu')
        self.max_1 = MaxPooling1D(8)
        self.conv1_3 = Conv1D(128, 4, activation='relu')
        self.conv1_4 = Conv1D(128, 4, activation='relu')
        self.max_2 = MaxPooling1D(8)
        self.conv1_5 = Conv1D(256, 10, activation='relu')
        self.conv1_6 = Conv1D(256, 10, activation='relu')
        self.glob_1 = GlobalAveragePooling1D()
        self.drop = Dropout(0.3)

        # This returns a tensor
        non_category_data_input_keras = tf.keras.layers.Input(
            POKEMON_FIELD_REMAINDER, name="non_category_data_input")
        categorical_dense = tf.keras.layers.Dense(512,
                                                  activation='relu')  #(merged)
        #    categorical_dense = Reshape(target_shape=(512,))(categorical_dense)
        non_categorical_dense_1 = tf.keras.layers.Dense(
            512, activation='relu')  #(non_category_data_input_keras)
        non_categorical_dense_2 = tf.keras.layers.Dense(
            1024, activation='relu')  #(non_category_data_input_keras)
        non_categorical_dense_3 = tf.keras.layers.Dense(
            512, activation='relu')  #(non_category_data_input_keras)

        combined_fields = Concatenate(
            axis=-1)  #([non_categorical_dense, categorical_dense])

        self.combined_dense_1 = tf.keras.layers.Dense(256, activation='relu')
        self.combined_dense_2 = tf.keras.layers.Dense(512, activation='relu')
        self.combined_dense_3 = tf.keras.layers.Dense(256, activation='relu')

        self.embeddings = embeddings
        self.embeddings_shape = embeddings_shape
        self.merged = merged
        self.categorical_dense = categorical_dense
        self.non_categorical_dense_1 = non_categorical_dense_1
        self.non_categorical_dense_2 = non_categorical_dense_2
        self.non_categorical_dense_3 = non_categorical_dense_3
        self.non_category_data_input_keras = non_category_data_input_keras
        self.combined_fields = combined_fields

        # Note: no tf.get_variable(), just simple Keras API!
        self.hidden1 = kl.Dense(256, activation='relu')  #(combined_fields)
        self.hidden2 = kl.Dense(128, activation='relu')
        self.value = kl.Dense(1, name='value')
        # Logits are unnormalized log probabilities.
        self.logits = kl.Dense(num_actions, name='policy_logits')
        self.dist = ProbabilityDistribution()
예제 #29
0
    def bulid_discrimintor(self):

        signal = Input(shape=self.input_shape)
        
        if self.minibatch:
            
            flat = Flatten()(signal)
            mini_disc = MinibatchDiscrimination(10, 3)(flat)

            md = Conv1D(8, kernel_size=8, strides=1, input_shape=self.input_shape, padding='same')(signal)
            md = LeakyReLU(alpha=0.2)(md)
            md = Dropout(0.25)(md)
            md = MaxPooling1D(3)(md)

            md = Conv1D(16, kernel_size=8, strides=1, input_shape=self.input_shape, padding='same')(md)
            md = LeakyReLU(alpha=0.2)(md)
            md = Dropout(0.25)(md)
            md = MaxPooling1D(3, strides=2)(md)

            md = Conv1D(32, kernel_size=8, strides=2, input_shape=self.input_shape, padding='same')(md)
            md = LeakyReLU(alpha=0.2)(md)
            md = Dropout(0.25)(md)
            md = MaxPooling1D(3, strides=2)(md)

            md = Conv1D(64, kernel_size=8, strides=2, input_shape=self.input_shape, padding='same')(md)
            md = LeakyReLU(alpha=0.2)(md)
            md = Dropout(0.25)(md)
            md = MaxPooling1D(3, strides=2)(md)
            md = Flatten()(md)
            concat = Concatenate()([md, mini_disc])
            validity = Dense(1, activation='sigmoid')(concat)

            return Model(inputs=signal, outputs=validity, name = "Discriminator")
            # return Model(inputs=signal, outputs=validity)



        else:
            model = Sequential(name='Discriminator')
            # model = Sequential()
            model.add(Conv1D(8, kernel_size=8, strides=1, input_shape=self.input_shape, padding='same'))
            model.add(LeakyReLU(alpha=0.2))
            model.add(Dropout(0.25))
            model.add(MaxPooling1D(3))

            model.add(Conv1D(16, kernel_size=8, strides=1, input_shape=self.input_shape, padding='same'))
            model.add(Dropout(0.25))
            model.add(MaxPooling1D(3, strides=2))

            model.add(Conv1D(32, kernel_size=8, strides=2, input_shape=self.input_shape, padding='same'))
            model.add(LeakyReLU(alpha=0.2))
            model.add(Dropout(0.25))
            model.add(MaxPooling1D(3, strides=2))

            model.add(Conv1D(64, kernel_size=8, strides=2, input_shape=self.input_shape, padding='same'))
            model.add(LeakyReLU(alpha=0.2))
            model.add(Dropout(0.25))
            model.add(MaxPooling1D(3, strides=2))

            model.add(Flatten())
            model.add(Dense(1, activation='sigmoid'))

            model.summary()

            validity = model(signal)

            return Model(inputs=signal, outputs=validity)
def build_model(input_shape):
    # Model Definition
    activ = 'relu'
    init = 'he_uniform'
    num_tags = 4

    input = tf.keras.Input(shape=(input_shape))

    conv0 = Conv1D(filters=128,
                   kernel_size=3,
                   strides=3,
                   padding='valid',
                   kernel_initializer=init)(input)
    bn0 = BatchNormalization()(conv0)
    activ0 = Activation(activ)(bn0)

    conv1 = Conv1D(128, 3, padding='same', kernel_initializer=init)(activ0)
    bn1 = BatchNormalization()(conv1)
    activ1 = Activation(activ)(bn1)
    MP1 = MaxPooling1D(pool_size=3)(activ1)

    conv2 = Conv1D(128, 3, padding='same', kernel_initializer=init)(MP1)
    bn2 = BatchNormalization()(conv2)
    activ2 = Activation(activ)(bn2)
    MP2 = MaxPooling1D(pool_size=3)(activ2)

    conv3 = Conv1D(256, 3, padding='same', kernel_initializer=init)(MP2)
    bn3 = BatchNormalization()(conv3)
    activ3 = Activation(activ)(bn3)
    MP3 = MaxPooling1D(pool_size=3)(activ3)

    conv4 = Conv1D(256, 3, padding='same', kernel_initializer=init)(MP3)
    bn4 = BatchNormalization()(conv4)
    activ4 = Activation(activ)(bn4)
    MP4 = MaxPooling1D(pool_size=3)(activ4)

    conv5 = Conv1D(256, 3, padding='same', kernel_initializer=init)(MP4)
    bn5 = BatchNormalization()(conv5)
    activ5 = Activation(activ)(bn5)
    MP5 = MaxPooling1D(pool_size=3)(activ5)

    conv6 = Conv1D(256, 3, padding='same', kernel_initializer=init)(MP5)
    bn6 = BatchNormalization()(conv6)
    activ6 = Activation(activ)(bn6)
    MP6 = MaxPooling1D(pool_size=3)(activ6)

    conv7 = Conv1D(256, 3, padding='same', kernel_initializer=init)(MP6)
    bn7 = BatchNormalization()(conv7)
    activ7 = Activation(activ)(bn7)
    MP7 = MaxPooling1D(pool_size=3)(activ7)

    conv8 = Conv1D(256, 3, padding='same', kernel_initializer=init)(MP7)
    bn8 = BatchNormalization()(conv8)
    activ8 = Activation(activ)(bn8)
    MP8 = MaxPooling1D(pool_size=3)(activ8)

    conv9 = Conv1D(512, 3, padding='same', kernel_initializer=init)(MP8)
    bn9 = BatchNormalization()(conv9)
    activ9 = Activation(activ)(bn9)
    MP9 = MaxPooling1D(pool_size=3)(activ9)  # use for 59050 samples

    conv10 = Conv1D(512, 1, padding='same', kernel_initializer=init)(MP9)
    bn10 = BatchNormalization()(conv10)
    activ10 = Activation(activ)(bn10)
    dropout1 = Dropout(0.5)(activ10)

    flattened = Flatten()(dropout1)

    output = Dense(num_tags, activation='sigmoid')(flattened)

    model = tf.keras.Model(input, output)

    # Print model metadata
    print('input shape:', input_shape)
    for i, layer in enumerate(model.layers):
        print('layer {} shape: {}'.format(
            i,
            layer.get_output_at(0).get_shape().as_list()))
    model.summary()

    return model