def CreateCustomizedResNetModel(self, input_shape, classes):
        X_input = Input(name='the_input', shape=input_shape)
        level_h1 = block(32)(X_input)
        level_m1 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(level_h1)  # 池化层
        level_h2 = block(64)(level_m1)
        level_m2 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(level_h2)  # 池化层
        level_h3 = block(128)(level_m2)
        level_m3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(level_h3)  # 池化层

        flayer = GlobalAveragePooling2D()(level_m3)
        fc = Dense(classes, use_bias=True,
                   kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc)
        # self.interlayer = Model(inputs=X_input, outputs=flayer)
        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        # model.compile(optimizer=optimizer, loss='binary_crossentropy')# [focal_loss])
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Customized resnet model estabished.')
        self.modelname = 'residual'
        return model
    def CreateCustomizedInceptionModel(self, input_shape, classes):
        X_input = Input(name='the_input', shape=input_shape)
        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(X_input)  # 卷积层
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=3, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        # inception_3a = inception_model(input=layer_h3, filters_1x1=32, filters_3x3_reduce=48, filters_3x3=64,filters_5x5_reduce=8, filters_5x5=16, filters_pool_proj=16)
        # inception_3b = inception_model(input=inception_3a, filters_1x1=85, filters_3x3_reduce=85, filters_3x3=128,filters_5x5_reduce=21, filters_5x5=64, filters_pool_proj=42)
        inception_3a = inception_model(input=layer_h3,
                                       filters_1x1=64,
                                       filters_3x3_reduce=96,
                                       filters_3x3=128,
                                       filters_5x5_reduce=16,
                                       filters_5x5=32,
                                       filters_pool_proj=32)
        inception_3b = inception_model(input=inception_3a,
                                       filters_1x1=128,
                                       filters_3x3_reduce=128,
                                       filters_3x3=192,
                                       filters_5x5_reduce=32,
                                       filters_5x5=96,
                                       filters_pool_proj=64)
        layer_h3 = MaxPooling2D(pool_size=3, strides=None,
                                padding="valid")(inception_3b)  # 池化层

        transmute = core.Flatten()(layer_h3)
        linear = Dense(units=classes,
                       activation='softmax',
                       kernel_regularizer=l2(0.01))(transmute)
        last = linear

        model = Model(inputs=X_input, outputs=last)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        # model.compile(optimizer=optimizer,loss='binary_crossentropy')
        print('Customized inception neural network established. ')
        self.modelname = 'inception'
        return model
    def TrainModel(self,
                   datapath,
                   epoch=2,
                   batch_size=32,
                   load_weights=False,
                   filename='model_set/speech_model25'):
        assert (batch_size % CLASS_NUM == 0)
        data = DataSpeech(datapath, 'train')
        num_data = sum(data.DataNum)  # 获取数据的数量

        os.system('pkill tensorboard')
        os.system('rm -rf ./checkpoints/files_summary/* ')
        train_writter = tf.summary.FileWriter(
            os.path.join(os.getcwd(), 'checkpoints', 'files_summary'))
        os.system(
            'tensorboard --logdir=/home/zhaok14/example/PycharmProjects/setsail/individual_spp/checkpoints/files_summary/ &'
        )
        print('\n')
        print(90 * '*')
        print(90 * '*')

        iterations_per_epoch = min(
            data.DataNum) // (batch_size // CLASS_NUM) + 1
        # iterations_per_epoch = 0
        print('trainer info:')
        print('training data size: %d' % num_data)
        print('increased epoches: ', epoch)
        print('minibatch size: %d' % batch_size)
        print('iterations per epoch: %d' % iterations_per_epoch)

        with k.get_session() as sess:
            train_writter.add_graph(sess.graph)
            if load_weights == True:
                try:
                    # modelpath = os.path.join(os.getcwd(), 'network&&weights', 'spectrogram', 'inception','spec_inception.h5')
                    self.model = load_model(modelpath,
                                            custom_objects={
                                                'focal_loss': focal_loss,
                                                'focal_loss_fixed':
                                                focal_loss()
                                            })
                    print('Successfully loading the model.')
                except:
                    print('Loading weights failed. Train from scratch.')
            sess.run(tf.global_variables_initializer())
            best_score = 0
            # epoch = 0
            duration = 0
            for i in range(0, epoch):
                iteration = 0
                yielddatas = data.data_genetator(batch_size, epoch)
                pbar = tqdm(yielddatas)
                for input, labels in pbar:
                    stime = time.time()
                    loss = self.model.train_on_batch(input[0], labels)
                    temp = self.model.predict_on_batch(input[0])
                    dtime = time.time() - stime
                    duration = duration + dtime
                    # okay = self.model.predict_on_batch(input[0])
                    # compare = self.interlayer.predict_on_batch(input[0])
                    train_summary = tf.Summary()
                    train_summary.value.add(tag='loss', simple_value=loss)
                    train_writter.add_summary(
                        train_summary, iteration + i * iterations_per_epoch)
                    pr = 'epoch:%d/%d,iteration: %d/%d ,loss: %s' % (
                        epoch, i, iterations_per_epoch, iteration, loss)
                    pbar.set_description(pr)
                    if iteration == iterations_per_epoch:
                        break
                    else:
                        iteration += 1
                pbar.close()
                if i % 1 == 0:
                    tmetrics = self.TestModel(sess=sess,
                                              datapath=datapath,
                                              str_dataset='train',
                                              data_count=1000,
                                              out_report=False,
                                              writer=train_writter,
                                              step=i)
                    metrics = self.TestModel(sess=sess,
                                             datapath=datapath,
                                             str_dataset='eval',
                                             data_count=-1,
                                             out_report=False,
                                             writer=train_writter,
                                             step=i)
                    if PRIOR_ART == True:
                        condition = tmetrics['score'] > metrics[
                            'score'] and i > 0 and tmetrics[
                                'sensitivity'] >= 90 and tmetrics[
                                    'specificity'] >= 90
                    else:
                        condition = tmetrics['score'] > metrics[
                            'score'] and i > 0
                    if condition:
                        if metrics['score'] >= best_score:
                            self.metrics = metrics
                            self.metrics['epoch'] = i
                            best_score = metrics['score']
                            self.savpath = []
                            self.savpath.append((self.baseSavPath[0] +
                                                 '_epoch' + str(i) + '.h5'))
                            self.savpath.append((self.baseSavPath[1] +
                                                 '_epoch' + str(i) + '.h5'))
                            self.model.save(self.savpath[0])
                            self.model.save_weights(self.savpath[1])
            if 'epoch' in self.metrics.keys():
                print(
                    'The best metric after restriction took place in the epoch: ',
                    self.metrics['epoch'])
                print(
                    'Sensitivity: {}; Specificity: {}; Score: {}; Accuracy: {}'
                    .format(self.metrics['sensitivity'],
                            self.metrics['specificity'], self.metrics['score'],
                            self.metrics['accuracy']))
                self.TestGenerability(weightspath=self.savpath[1])
            else:
                print('The restricted best metric is not found. Done!')
                # path_test = '/home/zhaok14/example/PycharmProjects/setsail/individual_spp/network&&weights/spectrogram/mlp/spec_mlp_weights_epoch12.h5'
                # self.TestGenerability(weightspath=path_test)
            print('Training duration: {}s'.format(round(duration, 2)))
    def CreateClassicModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(X_input)  # 卷积层
        # layer_h1 = Dropout(0.1)(layer_h1)
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        # layer_h3 = Dropout(0.1)(layer_h3)
        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h3)  # 卷积层
        # layer_h4 = Dropout(0.2)(layer_h4)
        layer_h5 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h4)  # 卷积层
        layer_h6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h5)  # 池化层

        # layer_h6 = Dropout(0.2)(layer_h6)
        layer_h7 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h6)  # 卷积层
        # layer_h7 = Dropout(0.2)(layer_h7)
        ## layer_h8 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',kernel_initializer='he_normal')(layer_h7)  # 卷积层
        layer_h9 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h7)  # 池化层

        # layer_h9 = Dropout(0.3)(layer_h9)
        ## layer_h10 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',kernel_initializer='he_normal')(layer_h9)  # 卷积层
        # layer_h10 = Dropout(0.4)(layer_h10)
        ## layer_h11 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',kernel_initializer='he_normal')(layer_h10)  # 卷积层

        ## flayer = Flatten()(layer_h11)
        flayer = Flatten()(layer_h9)
        # flayer = Dropout(0.4)(flayer)
        fc1 = Dense(units=32,
                    activation="relu",
                    use_bias=True,
                    kernel_initializer='he_normal')(flayer)
        # fc1 = Dropout(0.5)(fc1)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(fc1)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Classic cnn neural network established. ')
        self.modelname = 'cnn+dnn'
        return model
    def CreateSimplifiedClassicModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(X_input)  # 卷积层
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h3)  # 卷积层
        layer_h5 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h4)  # 卷积层
        layer_h6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h5)  # 池化层

        layer_h7 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h6)  # 卷积层
        layer_h8 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h7)  # 池化层

        layer_h9 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h8)  # 卷积层
        layer_h10 = MaxPooling2D(pool_size=2, strides=None,
                                 padding="valid")(layer_h9)  # 池化层

        flayer = Flatten()(layer_h10)
        fc1 = Dense(units=32,
                    activation="relu",
                    use_bias=True,
                    kernel_initializer='he_normal')(flayer)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(fc1)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        # model.compile(optimizer=optimizer, loss='binary_crossentropy')# [focal_loss])
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Simplified cnn model estabished.')
        return model
    def CreateSimplifiedIntensifiedClassicModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        # temp = BatchNormalization()(X_input)
        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.0005))(X_input)  # 卷积层
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.0005))(layer_h1)  # 卷积层
        layer_p2 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        layer_h3 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.0005))(layer_p2)  # 卷积层
        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.0005))(layer_h3)  # 卷积层
        layer_p4 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h4)  # 池化层

        layer_h5 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.0005))(layer_p4)  # 卷积层
        layer_h6 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.0005))(layer_h5)  # 卷积层
        layer_p6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h6)  # 池化层

        # layer_h7 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_p6)  # 卷积层
        # layer_h8 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_h7)  # 卷积层
        # layer_p8 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h8)  # 池化层

        flayer = Flatten()(layer_p6)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        # sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        # model.compile(optimizer=optimizer, loss='categorical_crossentropy')# [focal_loss])
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Simplified and intensified cnn model estabished.')
        self.modelname = 'cnn+dnn'

        self.intelayer = Model(inputs=X_input, outputs=fc2)

        return model
    def CreatePureXceptionModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        # temp = BatchNormalization()(X_input)
        layer_h1 = SeparableConv2D(32, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       X_input)  # 卷积层
        layer_h2 = SeparableConv2D(32, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_h1)  # 卷积层
        layer_p2 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        layer_h3 = SeparableConv2D(64, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_p2)  # 卷积层
        layer_h4 = SeparableConv2D(64, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_h3)  # 卷积层
        layer_p4 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h4)  # 池化层

        layer_h5 = SeparableConv2D(128, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_p4)  # 卷积层
        layer_h6 = SeparableConv2D(128, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_h5)  # 卷积层
        layer_p6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h6)  # 池化层

        # layer_h7 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_p6)  # 卷积层
        # layer_h8 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_h7)  # 卷积层
        # layer_p8 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h8)  # 池化层

        flayer = Flatten()(layer_p6)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Pure Xception cnn model estabished.')
        self.modelname = 'inception'

        self.intelayer = Model(inputs=X_input, outputs=fc2)

        return model