Beispiel #1
0
    def build_discriminator(self):

        d_in = Input(shape=IMG_SIZE)

        d = Conv2D(filters=16,
                   kernel_size=9,
                   padding='same')(d_in)
        d = Dropout(rate=DROPOUT_RATE)(d)
        d = LeakyReLU(alpha=LRELU_FACTOR)(d)

        d = MaxPooling2D(pool_size=4)(d)

        d = Conv2D(filters=32,
                   kernel_size=5,
                   padding='same')(d)
        d = Dropout(rate=DROPOUT_RATE)(d)
        d = LeakyReLU(alpha=LRELU_FACTOR)(d)

        d = MaxPooling2D(pool_size=2)(d)

        d = Conv2D(filters=32,
                   kernel_size=5,
                   padding='same')(d)
        d = Dropout(rate=DROPOUT_RATE)(d)
        d = LeakyReLU(alpha=LRELU_FACTOR)(d)

        d = MaxPooling2D(pool_size=2)(d)

        d = Conv2D(filters=64,
                   kernel_size=5,
                   padding='same')(d)
        d = Dropout(rate=DROPOUT_RATE)(d)
        d = LeakyReLU(alpha=LRELU_FACTOR)(d)

        d = Flatten()(d)

        d = Dense(units=128)(d)
        d = Dropout(rate=DROPOUT_RATE)(d)
        d = LeakyReLU(alpha=LRELU_FACTOR)(d)

        d = Dense(units=1, activation='sigmoid')(d)

        d = Model(inputs=d_in, outputs=d)
        print("SUMMARY DISCRIMINATOR: ")
        d.summary()

        return d
def main(data_path, output_path):

    X_trainS1, Y_train, X_valS1, Y_val = load_data(data_path)

    epochs = 10
    batch_size = 256
    dropout_rate = 0.15
    n_classes = 6

    # 三个子模型的输入数据
    main_input1 = Input(shape=(128, 3), name='main_input1')

    def lstm_cell(main_input):
        """
        基于DeepConvLSTM算法, 创建子模型
        :param main_input: 输入数据
        :return: 子模型
        """
        sub_model = TimeDistributed(Dense(384),
                                    input_shape=(128, 3))(main_input)
        #       sub_model = Flatten()(main_input)
        print(sub_model)
        sub_model = LSTM(256, return_sequences=True)(sub_model)

        sub_model = LSTM(128, return_sequences=True)(sub_model)

        sub_model = LSTM(128)(sub_model)

        main_output = Dropout(dropout_rate)(sub_model)

        return main_output

    model = lstm_cell(main_input1)

    model = Dropout(0.4)(model)
    model = Dense(n_classes)(model)
    model = BatchNormalization()(model)
    output = Activation('softmax', name="softmax")(model)

    model = Model([main_input1], output)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    print(model.summary())

    #    graph_path = os.path.join(output_path, "merged_model.png")
    #    plot_model(model, to_file=graph_path, show_shapes=True)  # 绘制模型图

    metrics = Metrics()  # 度量FPR
    history = model.fit(X_trainS1,
                        Y_train,
                        batch_size=batch_size,
                        validation_data=(X_valS1, Y_val),
                        epochs=epochs,
                        callbacks=[metrics])  # 增加FPR输出

    model_path = os.path.join(output_path, "merged_dcl.h5")
    model.save(model_path)  # 存储模型
    print(history.history)
def genre_model(num_genres, input_shape):
    input = Input(shape=input_shape)
    model = Conv1D(filters=128,
                   kernel_size=9,
                   activation='relu',
                   dilation_rate=1)(input)
    model = MaxPooling1D(pool_size=3)(model)
    model = Dropout(0.25)(model)
    model = Conv1D(filters=128,
                   kernel_size=9,
                   activation='relu',
                   dilation_rate=2)(model)
    model = MaxPooling1D(pool_size=3)(model)
    model = Dropout(0.25)(model)
    model = Conv1D(filters=64,
                   kernel_size=9,
                   activation='relu',
                   dilation_rate=4)(model)
    model = MaxPooling1D(pool_size=3)(model)
    model = Dropout(0.25)(model)
    model = Conv1D(filters=64,
                   kernel_size=9,
                   activation='relu',
                   dilation_rate=8)(model)
    model = MaxPooling1D(pool_size=3)(model)
    model = Dropout(0.25)(model)
    model = Conv1D(filters=32,
                   kernel_size=9,
                   activation='relu',
                   dilation_rate=16)(model)
    model = MaxPooling1D(pool_size=3)(model)
    model = Dropout(0.25)(model)
    model = Conv1D(filters=32,
                   kernel_size=7,
                   activation='relu',
                   dilation_rate=32)(model)
    model = MaxPooling1D(pool_size=3)(model)
    model = Flatten()(model)
    model = Dropout(0.25)(model)
    output = Dense(num_genres, activation='softmax')(model)
    model = Model(input, output)
    model.summary()
    return (model)
Beispiel #4
0
def get_model():

    inp = Input(shape=(MAX_TEXT_LENGTH, 1))
    #model = Embedding(MAX_FEATURES, EMBED_SIZE)(inp)
    model = Dropout(0.5)(inp)
    model = Conv1D(filters=32,
                   kernel_size=2,
                   padding='same',
                   activation='relu')(model)
    model = MaxPooling1D(pool_size=2)(model)
    model = Flatten()(model)
    model = Dense(1024, activation='relu')(model)
    model = Dropout(0.5)(model)
    model = Dense(NB_CLASS, activation="softmax")(model)
    model = Model(inputs=inp, outputs=model)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    return model
Beispiel #5
0
tangential_strain_layer = Activation('relu')(tangential_strain_layer)
tangential_strain_layer = (Conv1D(filters=filters, kernel_size=kernel_size))(tangential_strain_layer)
tangential_strain_layer = Activation('relu')(tangential_strain_layer)
tangential_strain_layer = (Dense(32, activation='relu'))(tangential_strain_layer)
tangential_strain_layer = Dropout(0.5)(tangential_strain_layer)
tangential_strain_layer = Flatten()(tangential_strain_layer)

merge = concatenate([energy_layer, maximum_amplitude_layer, radial_strain_layer, tangential_strain_layer])
model = (Dense(32, activation='relu'))(merge)
model = Dropout(0.5)(model)
model = (Dense(1, activation='sigmoid'))(model)

model = Model(inputs=[energy_input, maximum_amplitude_input, radial_strain_input, tangential_strain_input],
              outputs=model)
model.compile(loss='binary_crossentropy', optimizer="rmsprop", metrics=['accuracy'])
print model.summary()

print "Training....................................................."
model.fit([energies, maximum_amplitudes, radial_strains, tangential_strains], train_labels, epochs=epochs, verbose=1,
          batch_size=batch_size, validation_split=0.2)

print("--- train %s seconds ---" % (time.time() - start_time))

original_classified = model.predict([energies, maximum_amplitudes, radial_strains, tangential_strains],
                                    batch_size=batch_size, verbose=0)
best_threshold = find_best_threshold(train_labels, original_classified)

if run_test:
    print 'test'
    for test in test_sets:
        print test
Beispiel #6
0
cnn = Conv2D(256, cnn_kernel, activation=cnn_act, padding='same'
                kernel_initializer=kernel_init, name='conv4')(cnn)
cnn = MaxPooling2D(pool_size=(1,2), strides=2)(cnn)
cnn = Dropout(0.25)(cnn)
cnn = Conv2D(512, cnn_kernel, activation=cnn_act, padding='same'
                kernel_initializer=kernel_init, name='conv5')(cnn)
cnn = BatchNormalization(axis=1)(cnn)
cnn = Conv2D(512, cnn_kernel, activation=cnn_act, padding='same'
                kernel_initializer=kernel_init, name='conv6')(cnn)
cnn = BatchNormalization(axis=1)(cnn)
cnn = MaxPooling2D(pool_size=(1,2), strides=2)(cnn)
cnn = Conv2D(512, cnn_kernel, activation=cnn_act, padding='same'
                kernel_initializer=kernel_init, name='conv6')(cnn)
#cnn.add(Conv2D(512,(1,2), activation=cnn_act))
cnn = Dropout(0.25)(cnn)
cnn.summary()

conv_to_rnn_dims = (img_width//(1**2), (img_height // (2**2))*512)

rnn = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(cnn)
rnn = Dense(32, activation=cnn_act, name='dense1')(rnn)
rnn = Bidirectional(LSTM(rnn_size, return_sequences=True,
          kernel_initializer=kernel_init, name='lstm1'))(rnn)
rnn = Dropout(0.25)(rnn)
rnn = Bidirectional(LSTM(rnn_size, return_sequences=True,
          kernel_initializer=kernel_init, name='lstm2'))(rnn)
rnn = Dense(list_chars_len+1, kernel_initializer=kernel_init
          ,name='dense2')(rnn)
y_pred = Activation('softmax', name='softmax')
Model(inputs=Input_data, outputs=y_pred).summary()
Beispiel #7
0
        model = Flatten()(model)
        dense_layer = Dense(nclass, activation=activations.softmax)(model)
        model = models.Model(inputs=input_layer, outputs=dense_layer)

        # Compile model
        epochs = 10
        opt = optimizers.Adam(
        )  # Default: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False,
        model.compile(optimizer=opt,
                      loss=losses.binary_crossentropy,
                      metrics=['accuracy'])

        print(get_available_gpus())
        # Model Summary
        model.summary()
        # Start timer
        start_time = time.time()
        # Fit model
        model.fit(x_train,
                  y_train,
                  batch_size=16,
                  validation_data=(x_valid, y_valid),
                  epochs=epochs,
                  shuffle=True,
                  verbose=2,
                  callbacks=[metrics])
        # Get time
        total_train_time = time.time() - start_time
        print('Total training time in seconds: ')
        print(total_train_time)
def BiLSTM_model(filename,
                 train,
                 output,
                 X_train,
                 X_test,
                 word2ind,
                 maxWords,
                 y_train,
                 y_test,
                 ind2label,
                 validation=False,
                 X_valid=None,
                 y_valid=None,
                 word_embeddings=True,
                 pretrained_embedding="",
                 word_embedding_size=100,
                 maxChar=0,
                 char_embedding_type="",
                 char2ind="",
                 char_embedding_size=50,
                 lstm_hidden=32,
                 nbr_epochs=5,
                 batch_size=32,
                 dropout=0,
                 optimizer='rmsprop',
                 early_stopping_patience=-1,
                 folder_path="model_results",
                 gen_confusion_matrix=False):
    """
        Build, train and test a BiLSTM Keras model. Works for multi-tasking learning.
        The model architecture looks like:
            
            - Words representations: 
                - Word embeddings
                - Character-level representation [Optional]
            - Dropout
            - Bidirectional LSTM
            - Dropout
            - Softmax/CRF for predictions


        :param filename: File to redirect the printing
        :param train: Boolean if the model must be trained or not. If False, the model's wieght are expected to be stored in "folder_path/filename/filename.h5" 
        :param otput: "crf" or "softmax". Type of prediction layer to use
        
        :param X_train: Data to train the model
        :param X_test: Data to test the model
        :param word2ind: Dictionary containing all words in the training data and a unique integer per word
        :param maxWords: Maximum number of words in a sequence 

        :param y_train: Labels to train the model for the prediction task
        :param y_test: Labels to test the model for the prediction task
        :param ind2label: Dictionary where all labels for task 1 are mapped into a unique integer

        :param validation: Boolean. If true, the validation score will be computed from 'X_valid' and 'y_valid'
        :param X_valid: Optional. Validation dataset
        :param y_valid: Optional. Validation dataset labels

        :param word_embeddings: Boolean value. Add word embeddings into the model.
        :param pretrained_embedding: Use the pretrained word embeddings. 
                                     Three values: 
                                            - "":    Do not use pre-trained word embeddings (Default)
                                            - False: Use the pre-trained embedding vectors as the weights in the Embedding layer
                                            - True:  Use the pre-trained embedding vectors as weight initialiers. The Embedding layer will still be trained.
        :param word_embedding_size: Size of the pre-trained word embedding to use (100 or 300)

        :param maxChar: The maximum numbers of characters in a word. If set to 0, the model will not use character-level representations of the words
        :param char_embedding_type: Type of model to use in order to compute the character-level representation of words: Two values: "CNN" or "BILSTM"
        :param char2ind: A dictionary where each character is maped into a unique integer
        :param char_embedding_size: size of the character-level word representations

        :param lstm_hidden: Dimentionality of the LSTM output space
        :param nbr_epochs: Number of epochs to train the model
        :param batch_size: Size of batches while training the model
        :param dropout: Rate to apply for each Dropout layer in the model
        :param optimizer: Optimizer to use while compiling the model
        :param early_stopping_patience: Number of continuous tolerated epochs without improvement during training.

        :param folder_path: Path to the directory storing all to-be-generated files
        :param gen_confusion_matrix: Boolean value. Generated confusion matrices or not.


        :return: The classification scores for both tasks.
    """
    print("====== {0} start ======".format(filename))
    end_string = "====== {0} end ======".format(filename)

    # Create directory to store results
    os.makedirs(folder_path + "/" + filename)
    filepath = folder_path + "/" + filename + "/" + filename

    # Set print outputs file
    file, stdout_original = setPrintToFile("{0}.txt".format(filepath))

    # Model params
    nbr_words = len(word2ind) + 1
    out_size = len(ind2label) + 1
    best_results = ""

    embeddings_list = []
    inputs = []

    # Input - Word Embeddings
    if word_embeddings:
        word_input = Input((maxWords, ))
        inputs.append(word_input)
        if pretrained_embedding == "":
            word_embedding = Embedding(nbr_words,
                                       word_embedding_size)(word_input)
        else:
            # Retrieve embeddings
            embedding_matrix = word2VecEmbeddings(word2ind,
                                                  word_embedding_size)
            word_embedding = Embedding(nbr_words,
                                       word_embedding_size,
                                       weights=[embedding_matrix],
                                       trainable=pretrained_embedding,
                                       mask_zero=False)(word_input)
        embeddings_list.append(word_embedding)

    # Input - Characters Embeddings
    if maxChar != 0:
        character_input = Input((
            maxWords,
            maxChar,
        ))
        char_embedding = character_embedding_layer(char_embedding_type,
                                                   character_input, maxChar,
                                                   len(char2ind) + 1,
                                                   char_embedding_size)
        embeddings_list.append(char_embedding)
        inputs.append(character_input)

    # Model - Inner Layers - BiLSTM with Dropout
    embeddings = concatenate(embeddings_list) if len(
        embeddings_list) == 2 else embeddings_list[0]
    model = Dropout(dropout)(embeddings)
    model = Bidirectional(
        LSTM(lstm_hidden, return_sequences=True, dropout=dropout))(model)
    model = Dropout(dropout)(model)

    if output == "crf":
        # Output - CRF
        crfs = [[CRF(out_size), out_size]
                for out_size in [len(x) + 1 for x in ind2label]]
        outputs = [x[0](Dense(x[1])(model)) for x in crfs]
        model_loss = [x[0].loss_function for x in crfs]
        model_metrics = [x[0].viterbi_acc for x in crfs]

    if output == "softmax":
        outputs = [
            Dense(out_size, activation='softmax')(model)
            for out_size in [len(x) + 1 for x in ind2label]
        ]
        model_loss = ['categorical_crossentropy' for x in outputs]
        model_metrics = None

    # Model
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss=model_loss,
                  metrics=model_metrics,
                  optimizer=get_optimizer(optimizer))
    print(model.summary(line_length=150), "\n\n\n\n")

    # Training Callbacks:
    callbacks = []
    value_to_monitor = 'val_f1'
    best_model_weights_path = "{0}.h5".format(filepath)

    #    1) Classifition scores
    classification_scores = Classification_Scores([X_train, y_train],
                                                  ind2label,
                                                  best_model_weights_path)
    callbacks.append(classification_scores)

    #    2) EarlyStopping
    if early_stopping_patience != -1:
        early_stopping = EarlyStopping(monitor=value_to_monitor,
                                       patience=early_stopping_patience,
                                       mode='max')
        callbacks.append(early_stopping)

    # Train
    if train:
        # Train the model. Keras's method argument 'validation_data' is referred as 'testing data' in this code.
        hist = model.fit(X_train,
                         y_train,
                         validation_data=[X_test, y_test],
                         epochs=nbr_epochs,
                         batch_size=batch_size,
                         callbacks=callbacks,
                         verbose=2)

        print()
        print('-------------------------------------------')
        print(
            "Best F1 score:", early_stopping.best,
            "  (epoch number {0})".format(
                1 + np.argmax(hist.history[value_to_monitor])))

        # Save Training scores
        save_model_training_scores("{0}".format(filepath), hist,
                                   classification_scores)

        # Print best testing classification report
        best_epoch = np.argmax(hist.history[value_to_monitor])
        print(classification_scores.test_report[best_epoch])

        # Best epoch results
        best_results = model_best_scores(classification_scores, best_epoch)

    # Load weigths from best training epoch into model
    save_load_utils.load_all_weights(model, best_model_weights_path)

    # Create confusion matrices
    if gen_confusion_matrix:
        for i, y_target in enumerate(y_test):
            # Compute predictions, flatten
            predictions, target = compute_predictions(model, X_test, y_target,
                                                      ind2label[i])
            # Generate confusion matrices
            save_confusion_matrix(
                target, predictions, list(ind2label[i].values()),
                "{0}_task_{1}_confusion_matrix_test".format(
                    filepath, str(i + 1)))

    # Validation dataset
    if validation:
        print()
        print("Validation dataset")
        print("======================")
        # Compute classification report
        for i, y_target in enumerate(y_valid):
            # Compute predictions, flatten
            predictions, target = compute_predictions(model,
                                                      X_valid,
                                                      y_target,
                                                      ind2label[i],
                                                      nbrTask=i)

            # Only for multi-task
            if len(y_train) > 1:
                print("For task " + str(i + 1) + "\n")
                print(
                    "===================================================================================="
                )

            print("")
            print("With padding into account")
            print(
                metrics.flat_classification_report([target], [predictions],
                                                   digits=4))
            print("")
            print('----------------------------------------------')
            print("")
            print("Without the padding:")
            print(
                metrics.flat_classification_report([target], [predictions],
                                                   digits=4,
                                                   labels=list(
                                                       ind2label[i].values())))

            # Generate confusion matrices
            save_confusion_matrix(
                target, predictions, list(ind2label[i].values()),
                "{0}_task_{1}_confusion_matrix_validation".format(
                    filepath, str(i + 1)))

    # Close file
    closePrintToFile(file, stdout_original)
    print(end_string)

    return best_results
    def __init__(self, model_name="Xception", train_class_name=None, training_batch_size=100, existing_weight=None, test_percentage=0.02, learning_rate=0.000, validation_every_X_batch=5):

        if train_class_name == None:
            print("You must specify train_class_name")
            return

        self.validation_every_X_batch = validation_every_X_batch
        self.Y = []

        self.model_file = model_name + "-{date:%Y-%m-%d-%H-%M-%S}".format( date=datetime.datetime.now())
        print("model_folder: ", self.model_file)

        self.train_class_name = train_class_name
        if not os.path.exists(os.path.join("models", train_class_name)):
            os.makedirs(os.path.join("models", train_class_name))

        self.training_batch_size = training_batch_size

        # We know that MNIST images are 28 pixels in each dimension.
        img_size = 512

        self.img_size_flat = img_size * img_size * 3

        self.img_shape_full = (img_size, img_size, 3)

        self.test = {}

        with open('base/Annotations/label.csv', 'r') as csvfile:
            reader = csv.reader(csvfile)
            all_class_samples = []
            for row in reader:
                if row[1] == self.train_class_name:
                    self.num_classes = len(row[2])
                    break

        # Start construction of the Keras Sequential model.
        input_tensor = Input(shape=self.img_shape_full)
        if model_name == "Xception":
            base_model = Xception(input_tensor=input_tensor, weights='imagenet', include_top=False, classes=self.num_classes)
        elif model_name == "MobileNet":
            base_model = MobileNet(input_tensor=input_tensor, weights='imagenet', include_top=False, classes=self.num_classes)
        elif model_name == "DenseNet121":
            base_model = DenseNet121(input_tensor=input_tensor, weights='imagenet', include_top=False, classes=self.num_classes)


        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        model = Dropout(0.2)(x)
        predictions = Dense(self.num_classes, activation='softmax')(x)

        # this is the model we will train
        model = Model(inputs=base_model.input, outputs=predictions)

        self.model = model
        print(model.summary())

        self.optimizer = optimizers.Adam(lr=learning_rate)

        model.compile(optimizer=self.optimizer, loss='categorical_crossentropy', metrics=['accuracy'])


        with open('base/Annotations/label.csv', 'r') as csvfile:
            reader = csv.reader(csvfile)
            all_class_samples = []
            for row in reader:
                if row[1] != self.train_class_name:
                    continue
                all_class_samples.append(row)

            self.X = np.zeros((len(all_class_samples), img_size, img_size, 3))
            # self.X = np.zeros((10, img_size, img_size, 3))
            test_count = int(test_percentage * len(all_class_samples))
            index = 0
            print("Training " + train_class_name + " with: " + str(int((1 - test_percentage) * len(all_class_samples))) + ", Testing with: " + str(test_count), str(self.num_classes), "Classes")
            print("Loading images...")
            for row in all_class_samples:
                image = Image.open("base/" + row[0])
                img_array = np.asarray(image)
                if img_array.shape != self.img_shape_full:
                    image = image.resize((img_size, img_size), Image.ANTIALIAS)
                    img_array = np.asarray(image)
                self.X[index] = img_array
                self.Y.append(row[2].index("y"))
                if index % 500 == 0:
                    print(index)
                index += 1

        self.Y = to_categorical(self.Y, num_classes=self.num_classes)

        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.Y, test_size = test_percentage, random_state=42)
        del self.X
        class_weight = {}
        class_count = np.sum(self.y_train, axis=0)
        print("Training Sample for each Class", class_count)
        for class_index in range(self.num_classes):
            class_weight[class_index] = 1 /(class_count[class_index] / np.sum(class_count)) / self.num_classes
        self.class_weight = class_weight
        print("Class weights: ", self.class_weight)
        os.makedirs(os.path.join("models", train_class_name, self.model_file))
        model.save(os.path.join("models", train_class_name, self.model_file, train_class_name + "_" + "model.h5"))
Beispiel #10
0
    vgg = cnn_algo()
    # vgg.summary()

    for layers in vgg.layers:  # we are keeping the weights constant, it is already trained by the thousands of weights that is in imagenet
        layers.trainable = False  # we are not training all the layers

    x = Flatten()(
        vgg.output
    )  # need to add last layer, after flattning we can add the last layer
    folders = glob.glob(
        cwd + '/train/*')  # check number of folders inside training folder
    print(folders)

    prediction = Dense(len(folders), activation='softmax')(
        x)  # its the sigmoid activation function
    model = Dropout(0.2)
    model = input_model()
    model.summary()  # view the structure of model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    training_set = aggumentation_training()
    test_set = aggumentation_training()
    r = model.fit_generator(training_set,
                            epochs=3,
                            steps_per_epoch=len(training_set),
                            validation_steps=len(test_set))
    model.save('My_face_features_model.h5')
def main(data_path, output_path):

    X_trainS1, Y_train, X_valS1, Y_val = load_data(data_path)

    epochs = 20
    batch_size = 256
    kernel_size = 3
    pool_size = 2
    dropout_rate = 0.15
    n_classes = 6

    f_act = 'relu'

    # 三个子模型的输入数据
    main_input1 = Input(shape=(128, 3), name='main_input1')

    def cnn_cell(main_input):
        """
        基于CNN-Model算法, 创建子模型
        :param main_input: 输入数据
        :return: 子模型
        """
        sub_model = Conv1D(512,
                           kernel_size,
                           input_shape=(128, 3),
                           activation=f_act,
                           padding='same')(main_input)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        sub_model = Dropout(dropout_rate)(sub_model)
        sub_model = Conv1D(64, kernel_size, activation=f_act,
                           padding='same')(sub_model)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        sub_model = Dropout(dropout_rate)(sub_model)
        print('sub_model322:', sub_model)

        sub_model = Conv1D(32, kernel_size, activation=f_act,
                           padding='same')(sub_model)
        print('sub_model322:', sub_model)

        sub_model = BatchNormalization()(sub_model)

        sub_model = Flatten()(sub_model)
        print('sub_modelFlatten:', sub_model)

        return sub_model

    model = cnn_cell(main_input1)

    model = Dropout(0.4)(model)
    model = Dense(n_classes)(model)
    model = BatchNormalization()(model)
    output = Activation('softmax', name="softmax")(model)

    model = Model([main_input1], output)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    #    graph_path = os.path.join(output_path, "merged_model.png")
    #    plot_model(model, to_file=graph_path, show_shapes=True)  # 绘制模型图
    print(model.summary())

    metrics = Metrics()  # 度量FPR
    history = model.fit(X_trainS1,
                        Y_train,
                        batch_size=batch_size,
                        validation_data=(X_valS1, Y_val),
                        epochs=epochs,
                        callbacks=[metrics])  # 增加FPR输出

    model_path = os.path.join(output_path, "merged_dcl.h5")
    model.save(model_path)  # 存储模型
    print(history.history)