def CreateCustomizedResNetModel(self, input_shape, classes):
        X_input = Input(name='the_input', shape=input_shape)
        level_h1 = block(32)(X_input)
        level_m1 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(level_h1)  # 池化层
        level_h2 = block(64)(level_m1)
        level_m2 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(level_h2)  # 池化层
        level_h3 = block(128)(level_m2)
        level_m3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(level_h3)  # 池化层
        # level_h4 = block(64)(level_m3)
        # level_m4 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(level_h4)  # 池化层
        flayer = Flatten()(level_m3)

        fc = Dense(classes, use_bias=True,
                   kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        # model.compile(optimizer=optimizer, loss='binary_crossentropy')# [focal_loss])
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Customized resnet model estabished.')
        self.modelname = 'residual'
        return model
Exemplo n.º 2
0
    def regularCNNForest(self, input_shape, classes, layer_counts):
        X_input = Input(name='the_input', shape=input_shape)

        layer_h1 = Conv2D(32, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=l2(0.0005))(X_input)
        layer_h2 = Conv2D(32, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=l2(0.0005))(layer_h1)
        layer_p2 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h2)
        layer_h3 = Conv2D(64, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=l2(0.0005))(layer_p2)
        layer_h4 = Conv2D(64, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=l2(0.0005))(layer_h3)
        layer_p4 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h4)
        layer_h5 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=l2(0.0005))(layer_p4)
        layer_h6 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=l2(0.0005))(layer_h5)
        layer_p6 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h6)
        layer_h7 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_p6)
        layer_h8 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_h7)
        layer_p8 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h8)
        layer_h9 = Conv2D(512, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_p8)
        layer_h10 = Conv2D(512, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_h9)
        # layer_p10 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h10)
        layers = [layer_p2,layer_p4,layer_p6,layer_p8,layer_h10]
        if layer_counts in (2,4,6,8,10):
            output = layers[layer_counts//2-1]
        else:
            print('[ERROR]unresolved parameters: layer_counts.')
            assert(0)
        flayer = GlobalAveragePooling2D()(output)
        fc2 = Dense(classes, use_bias=True, kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer, loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Regular cnn model with {} layers estabished.'.format(layer_counts))
        self.modelname = 'layerInv'
        return model
    def CreateResNetModel(self, input_shape, classes):
        X_input = Input(name='the_input', shape=input_shape)
        x = Conv2D(kernel_size=3,
                   filters=16,
                   strides=1,
                   padding='same',
                   kernel_regularizer=regularizers.l2(0.01))(X_input)
        x = BatchNormalization()(x)
        x = Activation(relu)(x)

        x = block(16)(x)
        x = block(16)(x)

        x = BatchNormalization()(x)
        x = Activation(relu)(x)

        # 28x28x48 -> 1x48
        x = GlobalAveragePooling2D()(x)
        # dropout for more robust learning
        x = Dropout(0.2)(x)
        last = Dense(units=classes,
                     activation='softmax',
                     kernel_regularizer=regularizers.l2(0.01))(x)

        model = Model(inputs=X_input, outputs=last)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Original resnet neural network established. ')
        return model
    def CreateCustomizedInceptionModel(self, input_shape, classes):
        X_input = Input(name='the_input', shape=input_shape)
        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(X_input)  # 卷积层
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=3, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        # inception_3a = inception_model(input=layer_h3, filters_1x1=32, filters_3x3_reduce=48, filters_3x3=64,filters_5x5_reduce=8, filters_5x5=16, filters_pool_proj=16)
        # inception_3b = inception_model(input=inception_3a, filters_1x1=85, filters_3x3_reduce=85, filters_3x3=128,filters_5x5_reduce=21, filters_5x5=64, filters_pool_proj=42)
        inception_3a = inception_model(input=layer_h3,
                                       filters_1x1=64,
                                       filters_3x3_reduce=96,
                                       filters_3x3=128,
                                       filters_5x5_reduce=16,
                                       filters_5x5=32,
                                       filters_pool_proj=32)
        inception_3b = inception_model(input=inception_3a,
                                       filters_1x1=128,
                                       filters_3x3_reduce=128,
                                       filters_3x3=192,
                                       filters_5x5_reduce=32,
                                       filters_5x5=96,
                                       filters_pool_proj=64)
        layer_h3 = MaxPooling2D(pool_size=3, strides=None,
                                padding="valid")(inception_3b)  # 池化层

        transmute = core.Flatten()(layer_h3)
        linear = Dense(units=classes,
                       activation='softmax',
                       kernel_regularizer=l2(0.01))(transmute)
        last = linear

        model = Model(inputs=X_input, outputs=last)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        # model.compile(optimizer=optimizer,loss='binary_crossentropy')
        print('Customized inception neural network established. ')
        self.modelname = 'inception'
        return model
Exemplo n.º 5
0
    def bnRegularCNN(self, input_shape, classes):
        X_input = Input(name='the_input', shape=input_shape)
        level_h1 = ReguBlock(32)(X_input)
        level_m1 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(level_h1)  # 池化层
        level_h2 = ReguBlock(64)(level_m1)
        level_m2 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(level_h2)  # 池化层
        level_h3 = ReguBlock(128)(level_m2)
        level_m3 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(level_h3)  # 池化层

        flayer = GlobalAveragePooling2D()(level_m3)
        fc = Dense(classes, use_bias=True, kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer, loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Regular toy cnn model within BN layers estabished.')
        self.modelname = 'layerInv'
        return model
    def TrainModel(self,
                   datapath,
                   epoch=2,
                   batch_size=32,
                   load_weights=False,
                   filename='model_set/speech_model25'):
        assert (batch_size % CLASS_NUM == 0)
        data = DataSpeech(datapath, 'train')
        num_data = sum(data.DataNum)  # 获取数据的数量

        os.system('pkill tensorboard')
        os.system('rm -rf ./checkpoints/files_summary/* ')
        train_writter = tf.summary.FileWriter(
            os.path.join(os.getcwd(), 'checkpoints', 'files_summary'))
        os.system(
            'tensorboard --logdir=/home/zhaok14/example/PycharmProjects/setsail/individual_spp/checkpoints/files_summary/ &'
        )
        print('\n')
        print(90 * '*')
        print(90 * '*')

        iterations_per_epoch = min(
            data.DataNum) // (batch_size // CLASS_NUM) + 1
        # iterations_per_epoch = 2
        print('trainer info:')
        print('training data size: %d' % num_data)
        print('increased epoches: ', epoch)
        print('minibatch size: %d' % batch_size)
        print('iterations per epoch: %d' % iterations_per_epoch)

        with k.get_session() as sess:
            train_writter.add_graph(sess.graph)
            if load_weights == True:
                try:
                    # modelpath = os.path.join(os.getcwd(), 'network&&weights', 'spectrogram', 'inception','spec_inception.h5')
                    self.model = load_model(modelpath,
                                            custom_objects={
                                                'focal_loss': focal_loss,
                                                'focal_loss_fixed':
                                                focal_loss()
                                            })
                    print('Successfully loading the model.')
                except:
                    print('Loading weights failed. Train from scratch.')
            sess.run(tf.global_variables_initializer())

            best_score = 0
            for i in range(0, epoch):
                iteration = 0
                yielddatas = data.data_genetator(batch_size, epoch)
                pbar = tqdm(yielddatas)
                for input, labels in pbar:
                    loss = self.model.train_on_batch(input[0], labels)
                    train_summary = tf.Summary()
                    train_summary.value.add(tag='loss', simple_value=loss)
                    train_writter.add_summary(
                        train_summary, iteration + i * iterations_per_epoch)
                    pr = 'epoch:%d/%d,iteration: %d/%d ,loss: %s' % (
                        epoch, i, iterations_per_epoch, iteration, loss)
                    pbar.set_description(pr)
                    if iteration == iterations_per_epoch:
                        break
                    else:
                        iteration += 1
                pbar.close()
                if i % 1 == 0:
                    self.TestModel(sess=sess,
                                   datapath=self.datapath,
                                   str_dataset='train',
                                   data_count=1000,
                                   out_report=False,
                                   writer=train_writter,
                                   step=i)
                    metrics = self.TestModel(sess=sess,
                                             datapath=self.datapath,
                                             str_dataset='eval',
                                             data_count=-1,
                                             out_report=False,
                                             writer=train_writter,
                                             step=i)
                    if (metrics['score'] >= best_score and i > 0):
                        self.metrics = metrics
                        self.metrics['epoch'] = i
                        best_score = metrics['score']
                        self.model.save(self.savpath)

        print('The best metrics took place in the epoch: ',
              self.metrics['epoch'])
        print(
            'Sensitivity: {}; Specificity: {}; Score: {}; Accuracy: {}'.format(
                self.metrics['sensitivity'], self.metrics['specificity'],
                self.metrics['score'], self.metrics['accuracy']))
    def CreateInceptionModel(self, input_shape, classes):

        # Define the input layer
        X_input = Input(name='the_input', shape=input_shape)

        # Stage 1 - layers before inception modules
        conv1_7x7_s2 = Conv2D(filters=64,
                              kernel_size=(7, 7),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              kernel_regularizer=l2(0.01))(X_input)
        maxpool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                       strides=(2, 2),
                                       padding='same')(conv1_7x7_s2)
        conv2_3x3_reduce = Conv2D(filters=64,
                                  kernel_size=(1, 1),
                                  padding='same',
                                  activation='relu',
                                  kernel_regularizer=l2(0.01))(maxpool1_3x3_s2)
        conv2_3x3 = Conv2D(filters=192,
                           kernel_size=(3, 3),
                           padding='same',
                           activation='relu',
                           kernel_regularizer=l2(0.01))(conv2_3x3_reduce)
        maxpool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                       strides=(2, 2),
                                       padding='same')(conv2_3x3)

        # Stage 2 - 2 inception modules and max pooling
        inception_3a = inception_model(input=maxpool2_3x3_s2,
                                       filters_1x1=64,
                                       filters_3x3_reduce=96,
                                       filters_3x3=128,
                                       filters_5x5_reduce=16,
                                       filters_5x5=32,
                                       filters_pool_proj=32)
        inception_3b = inception_model(input=inception_3a,
                                       filters_1x1=128,
                                       filters_3x3_reduce=128,
                                       filters_3x3=192,
                                       filters_5x5_reduce=32,
                                       filters_5x5=96,
                                       filters_pool_proj=64)  #224*25*480

        # Stage 3 - another type of ending layers.
        # drop1 = Dropout(rate=0.4)(inception_3b)
        transmute = core.Flatten()(inception_3b)
        linear = Dense(units=classes,
                       activation='softmax',
                       kernel_regularizer=l2(0.01))(transmute)
        last = linear

        # Stage 3 - ending layers
        # conv_1x1 = Conv2D(filters=classes, kernel_size=(1, 1), padding='same', activation='relu',kernel_regularizer=l2(0.01))(inception_3b)
        # last = GlobalAveragePooling2D()(conv_1x1)
        # last = Activation(softmax)(last)

        # Create model
        model = Model(inputs=X_input, outputs=last)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        # model.compile(optimizer=optimizer,loss='binary_crossentropy')
        print('Inception neural network established. ')

        return model
    def CreateClassicModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(X_input)  # 卷积层
        # layer_h1 = Dropout(0.1)(layer_h1)
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        # layer_h3 = Dropout(0.1)(layer_h3)
        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h3)  # 卷积层
        # layer_h4 = Dropout(0.2)(layer_h4)
        layer_h5 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h4)  # 卷积层
        layer_h6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h5)  # 池化层

        # layer_h6 = Dropout(0.2)(layer_h6)
        layer_h7 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h6)  # 卷积层
        # layer_h7 = Dropout(0.2)(layer_h7)
        ## layer_h8 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',kernel_initializer='he_normal')(layer_h7)  # 卷积层
        layer_h9 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h7)  # 池化层

        # layer_h9 = Dropout(0.3)(layer_h9)
        ## layer_h10 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',kernel_initializer='he_normal')(layer_h9)  # 卷积层
        # layer_h10 = Dropout(0.4)(layer_h10)
        ## layer_h11 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',kernel_initializer='he_normal')(layer_h10)  # 卷积层

        ## flayer = Flatten()(layer_h11)
        flayer = Flatten()(layer_h9)
        # flayer = Dropout(0.4)(flayer)
        fc1 = Dense(units=32,
                    activation="relu",
                    use_bias=True,
                    kernel_initializer='he_normal')(flayer)
        # fc1 = Dropout(0.5)(fc1)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(fc1)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        # model.compile(optimizer=optimizer, loss='binary_crossentropy')# [focal_loss])
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        return model
    def CreateSimplifiedClassicModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(X_input)  # 卷积层
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h3)  # 卷积层
        layer_h5 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h4)  # 卷积层
        layer_h6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h5)  # 池化层

        layer_h7 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h6)  # 卷积层
        layer_h8 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h7)  # 池化层

        layer_h9 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(0.01))(layer_h8)  # 卷积层
        layer_h10 = MaxPooling2D(pool_size=2, strides=None,
                                 padding="valid")(layer_h9)  # 池化层

        flayer = Flatten()(layer_h10)
        fc1 = Dense(units=32,
                    activation="relu",
                    use_bias=True,
                    kernel_initializer='he_normal')(flayer)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(fc1)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        # model.compile(optimizer=optimizer, loss='binary_crossentropy')# [focal_loss])
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Simplified cnn model estabished.')
        return model
    def CreateSimplifiedIntensifiedClassicModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        # temp = BatchNormalization()(X_input)
        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(X_input)  # 卷积层
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(layer_h1)  # 卷积层
        layer_p2 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        layer_h3 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(layer_p2)  # 卷积层
        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(layer_h3)  # 卷积层
        layer_p4 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h4)  # 池化层

        layer_h5 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(layer_p4)  # 卷积层
        layer_h6 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(layer_h5)  # 卷积层
        layer_p6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h6)  # 池化层

        layer_h7 = Conv2D(256, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(layer_p6)  # 卷积层
        layer_h8 = Conv2D(256, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=None)(layer_h7)  # 卷积层
        layer_p8 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h8)  # 池化层

        flayer = Flatten()(layer_p8)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        # sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        # model.compile(optimizer=optimizer, loss='categorical_crossentropy')# [focal_loss])
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Simplified and intensified cnn model estabished.')
        self.modelname = 'cnn+dnn'

        self.intelayer = Model(inputs=X_input, outputs=fc2)

        return model
    def TrainModel(self,
                   datapath,
                   epoch=2,
                   batch_size=32,
                   load_weights=False,
                   filename='model_set/speech_model25'):
        assert (batch_size % CLASS_NUM == 0)
        data = DataSpeech(datapath, 'train')
        num_data = sum(data.DataNum)  # 获取数据的数量

        os.system('pkill tensorboard')
        os.system('rm -rf ./checkpoints/files_summary/* ')
        train_writter = tf.summary.FileWriter(
            os.path.join(os.getcwd(), 'checkpoints', 'files_summary'))
        os.system(
            'tensorboard --logdir=/home/zhaok14/example/PycharmProjects/setsail/individual_spp/checkpoints/files_summary/ &'
        )
        print('\n')
        print(90 * '*')
        print(90 * '*')

        iterations_per_epoch = min(
            data.DataNum) // (batch_size // CLASS_NUM) + 1
        # iterations_per_epoch = 0
        print('trainer info:')
        print('training data size: %d' % num_data)
        print('increased epoches: ', epoch)
        print('minibatch size: %d' % batch_size)
        print('iterations per epoch: %d' % iterations_per_epoch)

        with k.get_session() as sess:
            train_writter.add_graph(sess.graph)
            if load_weights == True:
                try:
                    # modelpath = os.path.join(os.getcwd(), 'network&&weights', 'spectrogram', 'inception','spec_inception.h5')
                    self.model = load_model(modelpath,
                                            custom_objects={
                                                'focal_loss': focal_loss,
                                                'focal_loss_fixed':
                                                focal_loss()
                                            })
                    print('Successfully loading the model.')
                except:
                    print('Loading weights failed. Train from scratch.')
            sess.run(tf.global_variables_initializer())
            best_score = 0
            # epoch = 0
            duration = 0
            for i in range(0, epoch):
                iteration = 0
                yielddatas = data.data_genetator(batch_size, epoch)
                pbar = tqdm(yielddatas)
                for input, labels in pbar:
                    stime = time.time()
                    loss = self.model.train_on_batch(input[0], labels)
                    temp = self.model.predict_on_batch(input[0])
                    dtime = time.time() - stime
                    duration = duration + dtime
                    # okay = self.model.predict_on_batch(input[0])
                    # compare = self.interlayer.predict_on_batch(input[0])
                    train_summary = tf.Summary()
                    train_summary.value.add(tag='loss', simple_value=loss)
                    train_writter.add_summary(
                        train_summary, iteration + i * iterations_per_epoch)
                    pr = 'epoch:%d/%d,iteration: %d/%d ,loss: %s' % (
                        epoch, i, iterations_per_epoch, iteration, loss)
                    pbar.set_description(pr)
                    if iteration == iterations_per_epoch:
                        break
                    else:
                        iteration += 1
                pbar.close()
                if i % 1 == 0:
                    tmetrics = self.TestModel(sess=sess,
                                              datapath=datapath,
                                              str_dataset='train',
                                              data_count=1000,
                                              out_report=False,
                                              writer=train_writter,
                                              step=i)
                    metrics = self.TestModel(sess=sess,
                                             datapath=datapath,
                                             str_dataset='eval',
                                             data_count=-1,
                                             out_report=False,
                                             writer=train_writter,
                                             step=i)
                    if PRIOR_ART == False:
                        condition = tmetrics['score'] > metrics[
                            'score'] and i > 0 and tmetrics[
                                'sensitivity'] >= 91 and tmetrics[
                                    'specificity'] >= 91
                    else:
                        condition = i > 0
                    if condition:
                        if metrics['score'] >= best_score:
                            self.metrics = metrics
                            self.metrics['epoch'] = i
                            best_score = metrics['score']
                            self.savpath = []
                            self.savpath.append((self.baseSavPath[0] +
                                                 '_epoch' + str(i) + '.h5'))
                            self.savpath.append((self.baseSavPath[1] +
                                                 '_epoch' + str(i) + '.h5'))
                            self.model.save(self.savpath[0])
                            self.model.save_weights(self.savpath[1])
            if 'epoch' in self.metrics.keys():
                print(
                    'The best metric after restriction took place in the epoch: ',
                    self.metrics['epoch'])
                print(
                    'Sensitivity: {}; Specificity: {}; Score: {}; Accuracy: {}'
                    .format(self.metrics['sensitivity'],
                            self.metrics['specificity'], self.metrics['score'],
                            self.metrics['accuracy']))
                self.TestGenerability(weightspath=self.savpath[1])
            else:
                print('The restricted best metric is not found. Done!')
                # path_test = '/home/zhaok14/example/PycharmProjects/setsail/individual_spp/network&&weights/spectrogram/mlp/spec_mlp_weights_epoch12.h5'
                # self.TestGenerability(weightspath=path_test)
            print('Training duration: {}s'.format(round(duration, 2)))
    def CreatePureXceptionModel(self, input_shape, classes):

        X_input = Input(name='the_input', shape=input_shape)

        # temp = BatchNormalization()(X_input)
        layer_h1 = SeparableConv2D(32, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       X_input)  # 卷积层
        layer_h2 = SeparableConv2D(32, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_h1)  # 卷积层
        layer_p2 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层

        layer_h3 = SeparableConv2D(64, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_p2)  # 卷积层
        layer_h4 = SeparableConv2D(64, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_h3)  # 卷积层
        layer_p4 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h4)  # 池化层

        layer_h5 = SeparableConv2D(128, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_p4)  # 卷积层
        layer_h6 = SeparableConv2D(128, (3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=l2(0.0005))(
                                       layer_h5)  # 卷积层
        layer_p6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h6)  # 池化层

        # layer_h7 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_p6)  # 卷积层
        # layer_h8 = Conv2D(256, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal',kernel_regularizer=None)(layer_h7)  # 卷积层
        # layer_p8 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h8)  # 池化层

        flayer = Flatten()(layer_p6)
        fc2 = Dense(classes, use_bias=True,
                    kernel_initializer='he_normal')(flayer)  # 全连接层
        y_pred = Activation('softmax', name='Activation0')(fc2)

        model = Model(inputs=X_input, outputs=y_pred)
        optimizer = optimizers.Adadelta()
        model.compile(optimizer=optimizer,
                      loss=[focal_loss(alpha=0.25, gamma=2)])
        print('Pure Xception cnn model estabished.')
        self.modelname = 'inception'

        self.intelayer = Model(inputs=X_input, outputs=fc2)

        return model