Example #1
0
def test_model1():
    features, labels = loadFromPickle()
    labels_count = int(len(labels) / MAX_NUM)
    # features, labels = augmentData(features, labels)
    features, labels = shuffle(features, labels)
    labels = prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features,
                                                        labels,
                                                        random_state=0,
                                                        test_size=0.1)
    # train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)
    # test_x = test_x.reshape(test_x.shape[0], 28, 28, 1)
    model, callbacks_list = keras_model1((28 * 28, ), labels_count)
    print_summary(model)
    model.fit(train_x,
              train_y,
              validation_data=(test_x, test_y),
              epochs=500,
              batch_size=256)
    # model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=500, batch_size=256,
    #          callbacks=[TensorBoard(log_dir="TensorBoard")])

    # 开始评估模型效果 # verbose=0为不输出日志信息
    score = model.evaluate(test_x, test_y, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])  # 准确度

    model.save('model1.h5')
Example #2
0
def test_model():
    features, labels = loadFromPickle()
    labels_count = len(load_label_name())
    # features, labels = augmentData(features, labels)
    features, labels = shuffle(features, labels)
    labels = prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features,
                                                        labels,
                                                        random_state=0,
                                                        test_size=0.2)
    train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)
    test_x = test_x.reshape(test_x.shape[0], 28, 28, 1)
    model, callbacks_list = keras_model((
        28,
        28,
        1,
    ), labels_count)
    print_summary(model)
    model.fit(train_x,
              train_y,
              validation_data=(test_x, test_y),
              epochs=40,
              batch_size=128)
    # 开始评估模型效果 # verbose=0为不输出日志信息
    score = model.evaluate(test_x, test_y, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])  # 准确度
    model.save('model.h5')
Example #3
0
def validate():
    data_dir = ".\\data\\"
    label_names = get_labels(data_dir)
    model = load_model('asr_model.h5') # 加载训练模型

    features, labels = loadFromPickle()
    features, labels = shuffle(features, labels)
    features=features.reshape(features.shape[0],20,32,1)
    labels=prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,
                                                        test_size=0.3)
    print_summary(model)

    # 开始评估模型效果 # verbose=0为不输出日志信息
    score = model.evaluate(test_x, test_y, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1]) # 准确度	
Example #4
0
 def evaluate(self, data_dir):
     x_data, y_label = load_img_from_dir(data_dir,
                                         target_size=(self.IMAGE_SIZE,
                                                      self.IMAGE_SIZE),
                                         max_num=60)
     for i in range(x_data.shape[0]):
         x_data[i] = preprocess_input(x_data[i])
     x_data = x_data.reshape(x_data.shape[0], self.IMAGE_SIZE,
                             self.IMAGE_SIZE, 3)
     y_label_one_hot = prepress_labels(y_label)
     # 验证应该使用从未见过的图片
     train_x, test_x, train_y, test_y = train_test_split(x_data,
                                                         y_label_one_hot,
                                                         random_state=0,
                                                         test_size=0.5)
     # 开始评估模型效果 # verbose=0为不输出日志信息
     score = model.evaluate(test_x, test_y, verbose=1, steps=1)
     print('Test loss:', score[0])
     print('Test accuracy:', score[1])  # 准确度
Example #5
0
    def train(self, data_dir, epochs=3, callback=None, model_name='vgg16'):
        """
        train 函数, 训练模型
        Keyword arguments::
        epochs: int 训练次数
        """
        if self.busy:
            return 1
        self.busy = True
        label_names = get_labels(data_dir)
        self.NUM_CLASSES = len(label_names)
        base_model = self.get_base_model(model_name)
        x_data, y_label = load_img_from_dir(data_dir,
                                            target_size=(self.IMAGE_SIZE,
                                                         self.IMAGE_SIZE),
                                            max_num=30)
        for i in range(x_data.shape[0]):
            x_data[i] = self.preprocess_input(x_data[i])
        print(x_data.shape)
        print(x_data[0].shape)
        x_data = x_data.reshape(x_data.shape[0], self.IMAGE_SIZE,
                                self.IMAGE_SIZE, 3)
        y_label_one_hot = prepress_labels(y_label)
        # 验证应该使用从未见过的图片
        train_x, test_x, train_y, test_y = train_test_split(x_data,
                                                            y_label_one_hot,
                                                            random_state=0,
                                                            test_size=0.3)
        # 自定义FC层以基本模型的输入为卷积层的最后一层
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(self.FC_NUMS, activation='relu')(x)
        prediction = Dense(self.NUM_CLASSES, activation='softmax')(x)

        # 构造完新的FC层,加入custom层
        model = Model(inputs=base_model.input, outputs=prediction)

        # 获取模型的层数
        print("layer nums:", len(model.layers))

        # 除了FC层,靠近FC层的一部分卷积层可参与参数训练,
        # 一般来说,模型结构已经标明一个卷积块包含的层数,
        # 在这里我们选择TRAIN_LAYERS为3,表示最后一个卷积块和FC层要参与参数训练
        for layer in model.layers:
            layer.trainable = False
        for layer in model.layers[-self.TRAIN_LAYERS:]:
            layer.trainable = True
        for layer in model.layers:
            print("layer.trainable:", layer.trainable)

        # 预编译模型
        model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        model.fit(
            train_x,
            train_y,
            validation_data=(test_x, test_y),
            # model.fit(x_data, y_label_one_hot,
            #         validation_split=0.4,
            callbacks=[AccuracyLogger(callback)],
            epochs=epochs,
            batch_size=4,
            # steps_per_epoch=1,validation_steps =1 ,
            verbose=1,
            shuffle=True)
        self.model = model
        model.save(os.path.join(data_dir, 'model.h5'))
        self.label_names = label_names
        self.dump_label_name(label_names)
        # self.convert_tflite()
        self.session = K.get_session()
        self.graph = tf.get_default_graph()
        self.busy = False
Example #6
0
    def train(self, data_dir, epochs=3, callback=None):
        if self.busy:
            return 1
        self.busy = True
        label_names = get_labels(data_dir)
        self.NUM_CLASSES = len(label_names)
        from keras.applications.vgg16 import VGG16, preprocess_input
        # 采用VGG16为基本模型,include_top为False,表示FC层是可自定义的,抛弃模型中的FC层;该模型会在~/.keras/models下载基本模型
        base_model = VGG16(input_shape=(self.IMAGE_SIZE, self.IMAGE_SIZE, 3),
                           include_top=False,
                           weights='imagenet')
        x_data, y_label = load_img_from_dir(data_dir,
                                            target_size=(self.IMAGE_SIZE,
                                                         self.IMAGE_SIZE),
                                            max_num=30)
        for i in range(x_data.shape[0]):
            x_data[i] = preprocess_input(x_data[i])
        print(x_data.shape)
        print(x_data[0].shape)
        x_data = x_data.reshape(x_data.shape[0], self.IMAGE_SIZE,
                                self.IMAGE_SIZE, 3)
        y_label_one_hot = prepress_labels(y_label)
        # 验证应该使用从未见过的图片
        train_x, test_x, train_y, test_y = train_test_split(x_data,
                                                            y_label_one_hot,
                                                            random_state=0,
                                                            test_size=0.3)
        # 自定义FC层以基本模型的输入为卷积层的最后一层
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(self.FC_NUMS, activation='relu')(x)
        prediction = Dense(self.NUM_CLASSES, activation='softmax')(x)

        # 构造完新的FC层,加入custom层
        model = Model(inputs=base_model.input, outputs=prediction)

        # 获取模型的层数
        print("layer nums:", len(model.layers))

        # 除了FC层,靠近FC层的一部分卷积层可参与参数训练,
        # 一般来说,模型结构已经标明一个卷积块包含的层数,
        # 在这里我们选择FREEZE_LAYERS为17,表示最后一个卷积块和FC层要参与参数训练
        for layer in model.layers[:self.FREEZE_LAYERS]:
            layer.trainable = False
        for layer in model.layers[self.FREEZE_LAYERS:]:
            layer.trainable = True
        for layer in model.layers:
            print("layer.trainable:", layer.trainable)

        # 预编译模型
        model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        model.fit(
            train_x,
            train_y,
            validation_data=(test_x, test_y),
            # model.fit(x_data, y_label_one_hot,
            #         validation_split=0.4,
            callbacks=[AccuracyLogger(callback)],
            epochs=epochs,
            batch_size=4,
            # steps_per_epoch=1,validation_steps =1 ,
            verbose=1,
            shuffle=True)
        self.model = model
        model.save(os.path.join(data_dir, 'model.h5'))
        self.dump_label_name(label_names)
        self.session = K.get_session()
        self.graph = tf.get_default_graph()
        self.busy = False